summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.autom4te.cfg10
-rw-r--r--.gitattributes3
-rw-r--r--.gitignore1
-rw-r--r--.h5chkright.ini10
-rw-r--r--CMakeFilters.cmake65
-rw-r--r--CMakeInstallation.cmake181
-rw-r--r--CMakeLists.txt347
-rw-r--r--COPYING15
-rw-r--r--COPYING_LBNL_HDF549
-rw-r--r--CTestConfig.cmake15
-rw-r--r--MANIFEST131
-rw-r--r--Makefile.am10
-rw-r--r--Makefile.dist10
-rw-r--r--README.txt2
-rw-r--r--UserMacros.cmake18
-rw-r--r--acsite.m411
-rwxr-xr-xautogen.sh73
-rwxr-xr-xbin/COPYING11
-rwxr-xr-xbin/bbrelease10
-rwxr-xr-xbin/buildhdf510
-rwxr-xr-xbin/checkapi10
-rwxr-xr-xbin/checkposix10
-rwxr-xr-xbin/chkconfigure12
-rwxr-xr-xbin/chkcopyright10
-rwxr-xr-xbin/chkmanifest10
-rwxr-xr-xbin/debug-ohdr10
-rwxr-xr-xbin/dependencies10
-rwxr-xr-xbin/deploy10
-rwxr-xr-xbin/distdep10
-rwxr-xr-xbin/errors10
-rwxr-xr-xbin/gcov_script10
-rwxr-xr-xbin/genparser10
-rwxr-xr-xbin/h5vers114
-rwxr-xr-xbin/iostats10
-rwxr-xr-xbin/locate_sw10
-rwxr-xr-xbin/make_err20
-rwxr-xr-xbin/make_overflow20
-rwxr-xr-xbin/make_vers20
-rwxr-xr-xbin/mkdirs10
-rwxr-xr-xbin/newer10
-rw-r--r--bin/output_filter.sh25
-rwxr-xr-xbin/pkgscrpts/h5rmflags10
-rwxr-xr-xbin/pkgscrpts/makeHDF5BinaryTarfiles.pl10
-rwxr-xr-xbin/release249
-rwxr-xr-xbin/restore.sh10
-rwxr-xr-xbin/runtest10
-rwxr-xr-xbin/snapshot564
-rw-r--r--bin/snapshot_version10
-rwxr-xr-xbin/switch_maint_mode10
-rwxr-xr-xbin/timekeeper10
-rwxr-xr-xbin/trace131
-rwxr-xr-xbin/yodconfigure11
-rw-r--r--c++/CMakeLists.txt8
-rw-r--r--c++/COPYING11
-rw-r--r--c++/Makefile.am10
-rw-r--r--c++/examples/CMakeLists.txt12
-rw-r--r--c++/examples/CMakeTests.cmake60
-rw-r--r--c++/examples/Makefile.am10
-rw-r--r--c++/examples/chunks.cpp10
-rw-r--r--c++/examples/compound.cpp10
-rw-r--r--c++/examples/create.cpp10
-rw-r--r--c++/examples/extend_ds.cpp10
-rw-r--r--c++/examples/h5group.cpp10
-rw-r--r--c++/examples/h5tutr_cmprss.cpp10
-rw-r--r--c++/examples/h5tutr_crtatt.cpp10
-rw-r--r--c++/examples/h5tutr_crtdat.cpp10
-rw-r--r--c++/examples/h5tutr_crtgrp.cpp10
-rw-r--r--c++/examples/h5tutr_crtgrpar.cpp10
-rw-r--r--c++/examples/h5tutr_crtgrpd.cpp10
-rw-r--r--c++/examples/h5tutr_extend.cpp10
-rw-r--r--c++/examples/h5tutr_rdwt.cpp10
-rw-r--r--c++/examples/h5tutr_subset.cpp10
-rw-r--r--c++/examples/readdata.cpp10
-rw-r--r--c++/examples/run-c++-ex.sh.in10
-rw-r--r--c++/examples/testh5c++.sh.in10
-rw-r--r--c++/examples/writedata.cpp10
-rw-r--r--c++/src/CMakeLists.txt11
-rw-r--r--c++/src/H5AbstractDs.cpp447
-rw-r--r--c++/src/H5AbstractDs.h84
-rw-r--r--c++/src/H5Alltypes.h10
-rw-r--r--c++/src/H5ArrayType.cpp147
-rw-r--r--c++/src/H5ArrayType.h62
-rw-r--r--c++/src/H5AtomType.cpp371
-rw-r--r--c++/src/H5AtomType.h76
-rw-r--r--c++/src/H5Attribute.cpp511
-rw-r--r--c++/src/H5Attribute.h109
-rw-r--r--c++/src/H5Classes.h61
-rw-r--r--c++/src/H5CommonFG.cpp358
-rw-r--r--c++/src/H5CommonFG.h77
-rw-r--r--c++/src/H5CompType.cpp588
-rw-r--r--c++/src/H5CompType.h140
-rw-r--r--c++/src/H5Cpp.h12
-rw-r--r--c++/src/H5CppDoc.h28
-rw-r--r--c++/src/H5DataSet.cpp833
-rw-r--r--c++/src/H5DataSet.h135
-rw-r--r--c++/src/H5DataSpace.cpp803
-rw-r--r--c++/src/H5DataSpace.h168
-rw-r--r--c++/src/H5DataType.cpp1002
-rw-r--r--c++/src/H5DataType.h180
-rw-r--r--c++/src/H5DcreatProp.cpp963
-rw-r--r--c++/src/H5DcreatProp.h162
-rw-r--r--c++/src/H5DxferProp.cpp588
-rw-r--r--c++/src/H5DxferProp.h145
-rw-r--r--c++/src/H5EnumType.cpp359
-rw-r--r--c++/src/H5EnumType.h86
-rw-r--r--c++/src/H5Exception.cpp661
-rw-r--r--c++/src/H5Exception.h169
-rw-r--r--c++/src/H5FaccProp.cpp872
-rw-r--r--c++/src/H5FaccProp.h188
-rw-r--r--c++/src/H5FcreatProp.cpp389
-rw-r--r--c++/src/H5FcreatProp.h107
-rw-r--r--c++/src/H5File.cpp742
-rw-r--r--c++/src/H5File.h141
-rw-r--r--c++/src/H5FloatType.cpp409
-rw-r--r--c++/src/H5FloatType.h80
-rw-r--r--c++/src/H5Group.cpp248
-rw-r--r--c++/src/H5Group.h85
-rw-r--r--c++/src/H5IdComponent.cpp425
-rw-r--r--c++/src/H5IdComponent.h126
-rw-r--r--c++/src/H5Include.h14
-rw-r--r--c++/src/H5IntType.cpp173
-rw-r--r--c++/src/H5IntType.h56
-rw-r--r--c++/src/H5LaccProp.cpp149
-rw-r--r--c++/src/H5LaccProp.h73
-rw-r--r--c++/src/H5Library.cpp225
-rw-r--r--c++/src/H5Library.h51
-rw-r--r--c++/src/H5Location.cpp2271
-rw-r--r--c++/src/H5Location.h340
-rw-r--r--c++/src/H5Object.cpp484
-rw-r--r--c++/src/H5Object.h131
-rw-r--r--c++/src/H5OcreatProp.cpp134
-rw-r--r--c++/src/H5OcreatProp.h57
-rw-r--r--c++/src/H5PredType.cpp464
-rw-r--r--c++/src/H5PredType.h343
-rw-r--r--c++/src/H5PropList.cpp838
-rw-r--r--c++/src/H5PropList.h162
-rw-r--r--c++/src/H5StrType.cpp337
-rw-r--r--c++/src/H5StrType.h70
-rw-r--r--c++/src/H5VarLenType.cpp101
-rw-r--r--c++/src/H5VarLenType.h46
-rw-r--r--c++/src/Makefile.am37
-rw-r--r--c++/src/cpp_doc_config2
-rw-r--r--c++/src/h5c++.in10
-rw-r--r--c++/src/header.html10
-rw-r--r--c++/test/CMakeLists.txt2
-rw-r--r--c++/test/CMakeTests.cmake38
-rw-r--r--c++/test/H5srcdir_str.h.in10
-rw-r--r--c++/test/Makefile.am10
-rw-r--r--c++/test/dsets.cpp1760
-rw-r--r--c++/test/h5cpputil.cpp152
-rw-r--r--c++/test/h5cpputil.h80
-rw-r--r--c++/test/tarray.cpp492
-rw-r--r--c++/test/tattr.cpp2072
-rw-r--r--c++/test/tcompound.cpp968
-rw-r--r--c++/test/tdspl.cpp134
-rw-r--r--c++/test/testhdf5.cpp98
-rw-r--r--c++/test/tfile.cpp882
-rw-r--r--c++/test/tfilter.cpp100
-rw-r--r--c++/test/th5s.cpp585
-rw-r--r--c++/test/titerate.cpp380
-rw-r--r--c++/test/tlinks.cpp374
-rw-r--r--c++/test/tobject.cpp606
-rw-r--r--c++/test/trefer.cpp1072
-rw-r--r--c++/test/ttypes.cpp640
-rw-r--r--c++/test/tvlstr.cpp1033
-rw-r--r--config/BlankForm10
-rw-r--r--config/COPYING11
-rw-r--r--config/Makefile.am.blank10
-rw-r--r--config/apple10
-rw-r--r--config/cce-fflags10
-rw-r--r--config/cce-flags10
-rw-r--r--config/cmake/CMakeFindJavaCommon.cmake14
-rw-r--r--config/cmake/CTestCustom.cmake16
-rwxr-xr-xconfig/cmake/CTestScript.cmake163
-rw-r--r--config/cmake/ConfigureChecks.cmake113
-rw-r--r--config/cmake/ConversionTests.c11
-rw-r--r--config/cmake/FindHDFJAVA.cmake.in23
-rw-r--r--config/cmake/FindJNI.cmake11
-rw-r--r--config/cmake/H5cxx_config.h.in11
-rw-r--r--config/cmake/H5pubconf.h.in106
-rw-r--r--config/cmake/HDF518_Examples.cmake.in91
-rw-r--r--config/cmake/HDF5Macros.cmake35
-rw-r--r--config/cmake/HDF5UseFortran.cmake296
-rw-r--r--config/cmake/HDF5_Examples.cmake.in268
-rwxr-xr-xconfig/cmake/HDF5_Examples_options.cmake59
-rw-r--r--config/cmake/HDF5_Process_Flex_Files.cmake15
-rw-r--r--config/cmake/HDFCompilerFlags.cmake211
-rw-r--r--config/cmake/README.txt.cmake.in6
-rw-r--r--config/cmake/UseJava.cmake2
-rw-r--r--config/cmake/UserMacros/Windows_MT.cmake30
-rw-r--r--config/cmake/cacheinit.cmake15
-rw-r--r--config/cmake/hdf5-config-version.cmake.in41
-rw-r--r--config/cmake/hdf5-config.cmake.in13
-rw-r--r--config/cmake/jrunTest.cmake190
-rw-r--r--config/cmake/mccacheinit.cmake13
-rwxr-xr-xconfig/cmake/scripts/CTestScript.cmake399
-rwxr-xr-xconfig/cmake/scripts/HDF5config.cmake410
-rwxr-xr-xconfig/cmake/scripts/HDF5options.cmake52
-rw-r--r--config/cmake/userblockTest.cmake45
-rw-r--r--config/cmake/vfdTest.cmake56
-rw-r--r--config/cmake_ext_mod/CheckTypeSize.cmake35
-rw-r--r--config/cmake_ext_mod/ConfigureChecks.cmake235
-rw-r--r--config/cmake_ext_mod/FindMPI.cmake37
-rw-r--r--config/cmake_ext_mod/FindSZIP.cmake55
-rw-r--r--config/cmake_ext_mod/GetTimeOfDayTest.cpp11
-rw-r--r--config/cmake_ext_mod/HDFCXXTests.cpp11
-rw-r--r--config/cmake_ext_mod/HDFLibMacros.cmake54
-rw-r--r--config/cmake_ext_mod/HDFMacros.cmake183
-rw-r--r--config/cmake_ext_mod/HDFTests.c11
-rw-r--r--config/cmake_ext_mod/HDFUseFortran.cmake34
-rw-r--r--config/cmake_ext_mod/grepTest.cmake31
-rw-r--r--config/cmake_ext_mod/prunTest.cmake145
-rw-r--r--config/cmake_ext_mod/runTest.cmake91
-rw-r--r--config/commence.am10
-rw-r--r--config/conclude.am19
-rw-r--r--config/conclude_fc.am10
-rw-r--r--config/cygwin10
-rw-r--r--config/examples.am10
-rw-r--r--config/freebsd10
-rw-r--r--config/gnu-cxxflags10
-rw-r--r--config/gnu-fflags10
-rw-r--r--config/gnu-flags134
-rw-r--r--config/ibm-aix10
-rw-r--r--config/ibm-flags10
-rw-r--r--config/intel-fflags10
-rw-r--r--config/intel-flags10
-rw-r--r--config/linux-gnu10
-rw-r--r--config/linux-gnuaout10
-rw-r--r--config/linux-gnulibc110
-rw-r--r--config/linux-gnulibc210
-rw-r--r--config/lt_vers.am26
-rw-r--r--config/pgi-fflags10
-rw-r--r--config/pgi-flags10
-rw-r--r--config/site-specific/BlankForm10
-rw-r--r--config/solaris10
-rw-r--r--configure.ac87
-rw-r--r--examples/CMakeLists.txt12
-rw-r--r--examples/CMakeTests.cmake103
-rw-r--r--examples/Makefile.am10
-rw-r--r--examples/h5_attribute.c10
-rw-r--r--examples/h5_chunk_read.c10
-rw-r--r--examples/h5_cmprss.c10
-rw-r--r--examples/h5_compound.c10
-rw-r--r--examples/h5_crtatt.c10
-rw-r--r--examples/h5_crtdat.c10
-rw-r--r--examples/h5_crtgrp.c10
-rw-r--r--examples/h5_crtgrpar.c10
-rw-r--r--examples/h5_crtgrpd.c10
-rw-r--r--examples/h5_drivers.c10
-rw-r--r--examples/h5_dtransform.c10
-rw-r--r--examples/h5_elink_unix2win.c10
-rw-r--r--examples/h5_extend.c10
-rw-r--r--examples/h5_extend_write.c10
-rw-r--r--examples/h5_extlink.c10
-rw-r--r--examples/h5_group.c10
-rw-r--r--examples/h5_interm_group.c10
-rw-r--r--examples/h5_mount.c10
-rw-r--r--examples/h5_rdwt.c10
-rw-r--r--examples/h5_read.c10
-rw-r--r--examples/h5_ref2reg.c10
-rw-r--r--examples/h5_reference.c10
-rw-r--r--examples/h5_select.c10
-rw-r--r--examples/h5_shared_mesg.c10
-rw-r--r--examples/h5_subset.c10
-rw-r--r--examples/h5_vds-eiger.c11
-rw-r--r--examples/h5_vds-exc.c11
-rw-r--r--examples/h5_vds-exclim.c11
-rw-r--r--examples/h5_vds-percival-unlim-maxmin.c11
-rw-r--r--examples/h5_vds-percival-unlim.c11
-rw-r--r--examples/h5_vds-percival.c11
-rw-r--r--examples/h5_vds-simpleIO.c11
-rw-r--r--examples/h5_vds.c10
-rw-r--r--examples/h5_write.c10
-rw-r--r--examples/ph5example.c10
-rwxr-xr-xexamples/run-all-ex.sh10
-rw-r--r--examples/run-c-ex.sh.in10
-rw-r--r--examples/testh5cc.sh.in10
-rw-r--r--fortran/CMakeLists.txt12
-rw-r--r--fortran/COPYING11
-rw-r--r--fortran/Makefile.am10
-rw-r--r--fortran/examples/CMakeLists.txt16
-rw-r--r--fortran/examples/CMakeTests.cmake99
-rw-r--r--fortran/examples/Makefile.am10
-rw-r--r--fortran/examples/compound.f9010
-rw-r--r--fortran/examples/compound_complex_fortran2003.f9010
-rw-r--r--fortran/examples/compound_fortran2003.f9010
-rw-r--r--fortran/examples/h5_cmprss.f9010
-rw-r--r--fortran/examples/h5_crtatt.f9010
-rw-r--r--fortran/examples/h5_crtdat.f9010
-rw-r--r--fortran/examples/h5_crtgrp.f9010
-rw-r--r--fortran/examples/h5_crtgrpar.f9010
-rw-r--r--fortran/examples/h5_crtgrpd.f9010
-rw-r--r--fortran/examples/h5_extend.f9010
-rw-r--r--fortran/examples/h5_rdwt.f9010
-rw-r--r--fortran/examples/h5_subset.f9010
-rw-r--r--fortran/examples/hyperslab.f9010
-rw-r--r--fortran/examples/mountexample.f9010
-rw-r--r--fortran/examples/nested_derived_type.f9010
-rw-r--r--fortran/examples/ph5example.f9010
-rw-r--r--fortran/examples/refobjexample.f9010
-rw-r--r--fortran/examples/refregexample.f9010
-rw-r--r--fortran/examples/run-fortran-ex.sh.in10
-rw-r--r--fortran/examples/rwdset_fortran2003.f9010
-rw-r--r--fortran/examples/selectele.f9010
-rw-r--r--fortran/examples/testh5fc.sh.in10
-rw-r--r--fortran/src/CMakeLists.txt58
-rw-r--r--fortran/src/H5Af.c10
-rw-r--r--fortran/src/H5Aff.F9010
-rw-r--r--fortran/src/H5Df.c10
-rw-r--r--fortran/src/H5Dff.F9010
-rw-r--r--fortran/src/H5Ef.c10
-rw-r--r--fortran/src/H5Eff.F9010
-rw-r--r--fortran/src/H5Ff.c10
-rw-r--r--fortran/src/H5Fff.F9010
-rw-r--r--fortran/src/H5Gf.c10
-rw-r--r--fortran/src/H5Gff.F9010
-rw-r--r--fortran/src/H5If.c10
-rw-r--r--fortran/src/H5Iff.F9010
-rw-r--r--fortran/src/H5Lf.c10
-rw-r--r--fortran/src/H5Lff.F9010
-rw-r--r--fortran/src/H5Of.c10
-rw-r--r--fortran/src/H5Off.F9010
-rw-r--r--fortran/src/H5Pf.c10
-rw-r--r--fortran/src/H5Pff.F9010
-rw-r--r--fortran/src/H5Rf.c10
-rw-r--r--fortran/src/H5Rff.F9010
-rw-r--r--fortran/src/H5Sf.c10
-rw-r--r--fortran/src/H5Sff.F9010
-rw-r--r--fortran/src/H5Tf.c10
-rw-r--r--fortran/src/H5Tff.F9010
-rw-r--r--fortran/src/H5Zf.c10
-rw-r--r--fortran/src/H5Zff.F9010
-rw-r--r--fortran/src/H5_buildiface.F9020
-rw-r--r--fortran/src/H5_f.c10
-rw-r--r--fortran/src/H5_ff.F9010
-rw-r--r--fortran/src/H5config_f.inc.cmake11
-rw-r--r--fortran/src/H5config_f.inc.in11
-rw-r--r--fortran/src/H5f90.h10
-rw-r--r--fortran/src/H5f90global.F9010
-rw-r--r--fortran/src/H5f90i.h10
-rw-r--r--fortran/src/H5f90kit.c10
-rw-r--r--fortran/src/H5f90proto.h10
-rw-r--r--fortran/src/H5fort_type_defines.h.in11
-rw-r--r--fortran/src/H5fortkit.F9010
-rw-r--r--fortran/src/H5match_types.c30
-rw-r--r--fortran/src/HDF5.F9010
-rw-r--r--fortran/src/Makefile.am14
-rw-r--r--fortran/src/README260
-rw-r--r--fortran/src/README_DEVELOPEMENT38
-rw-r--r--fortran/src/h5fc.in10
-rw-r--r--fortran/test/CMakeLists.txt60
-rw-r--r--fortran/test/CMakeTests.cmake135
-rw-r--r--fortran/test/H5_test_buildiface.F9020
-rw-r--r--fortran/test/Makefile.am10
-rw-r--r--fortran/test/fflush1.F9010
-rw-r--r--fortran/test/fflush2.F9010
-rw-r--r--fortran/test/fortranlib_test.F90103
-rw-r--r--fortran/test/fortranlib_test_1_8.F9016
-rw-r--r--fortran/test/fortranlib_test_F03.F9016
-rw-r--r--fortran/test/t.c10
-rw-r--r--fortran/test/t.h10
-rw-r--r--fortran/test/tH5A.F9010
-rw-r--r--fortran/test/tH5A_1_8.F9010
-rw-r--r--fortran/test/tH5D.F9010
-rw-r--r--fortran/test/tH5E.F9010
-rw-r--r--fortran/test/tH5E_F03.F9010
-rw-r--r--fortran/test/tH5F.F9016
-rw-r--r--fortran/test/tH5F_F03.F9010
-rw-r--r--fortran/test/tH5G.F9010
-rw-r--r--fortran/test/tH5G_1_8.F9010
-rw-r--r--fortran/test/tH5I.F9010
-rw-r--r--fortran/test/tH5L_F03.F9010
-rw-r--r--fortran/test/tH5MISC_1_8.F9010
-rw-r--r--fortran/test/tH5O.F9010
-rw-r--r--fortran/test/tH5O_F03.F9010
-rw-r--r--fortran/test/tH5P.F9010
-rw-r--r--fortran/test/tH5P_F03.F9010
-rw-r--r--fortran/test/tH5R.F9010
-rw-r--r--fortran/test/tH5S.F9010
-rw-r--r--fortran/test/tH5Sselect.F9010
-rw-r--r--fortran/test/tH5T.F9010
-rw-r--r--fortran/test/tH5T_F03.F9010
-rw-r--r--fortran/test/tH5VL.F9010
-rw-r--r--fortran/test/tH5Z.F9010
-rw-r--r--fortran/test/tHDF5.F9010
-rw-r--r--fortran/test/tHDF5_1_8.F9010
-rw-r--r--fortran/test/tHDF5_F03.F9010
-rw-r--r--fortran/test/tf.F9010
-rw-r--r--fortran/testpar/CMakeLists.txt10
-rw-r--r--fortran/testpar/CMakeTests.cmake11
-rw-r--r--fortran/testpar/Makefile.am10
-rw-r--r--fortran/testpar/hyper.f9010
-rw-r--r--fortran/testpar/mdset.f9010
-rw-r--r--fortran/testpar/ptest.f9010
-rw-r--r--hl/CMakeLists.txt10
-rw-r--r--hl/COPYING11
-rw-r--r--hl/Makefile.am10
-rw-r--r--hl/c++/CMakeLists.txt6
-rw-r--r--hl/c++/COPYING11
-rw-r--r--hl/c++/Makefile.am10
-rw-r--r--hl/c++/examples/CMakeLists.txt4
-rw-r--r--hl/c++/examples/CMakeTests.cmake28
-rw-r--r--hl/c++/examples/Makefile.am10
-rw-r--r--hl/c++/examples/ptExampleFL.cpp10
-rw-r--r--hl/c++/examples/run-hlc++-ex.sh.in10
-rw-r--r--hl/c++/src/CMakeLists.txt9
-rw-r--r--hl/c++/src/H5PacketTable.cpp10
-rw-r--r--hl/c++/src/H5PacketTable.h10
-rw-r--r--hl/c++/src/Makefile.am10
-rw-r--r--hl/c++/test/CMakeLists.txt2
-rw-r--r--hl/c++/test/CMakeTests.cmake26
-rw-r--r--hl/c++/test/Makefile.am10
-rw-r--r--hl/c++/test/ptableTest.cpp10
-rw-r--r--hl/c++/test/ptableTest.h10
-rw-r--r--hl/examples/CMakeLists.txt6
-rw-r--r--hl/examples/CMakeTests.cmake38
-rw-r--r--hl/examples/Makefile.am10
-rw-r--r--hl/examples/ex_ds1.c10
-rw-r--r--hl/examples/ex_image1.c10
-rw-r--r--hl/examples/ex_image2.c10
-rw-r--r--hl/examples/ex_lite1.c10
-rw-r--r--hl/examples/ex_lite2.c10
-rw-r--r--hl/examples/ex_lite3.c10
-rw-r--r--hl/examples/ex_table_01.c10
-rw-r--r--hl/examples/ex_table_02.c10
-rw-r--r--hl/examples/ex_table_03.c10
-rw-r--r--hl/examples/ex_table_04.c10
-rw-r--r--hl/examples/ex_table_05.c10
-rw-r--r--hl/examples/ex_table_06.c10
-rw-r--r--hl/examples/ex_table_07.c10
-rw-r--r--hl/examples/ex_table_08.c10
-rw-r--r--hl/examples/ex_table_09.c10
-rw-r--r--hl/examples/ex_table_10.c10
-rw-r--r--hl/examples/ex_table_11.c10
-rw-r--r--hl/examples/ex_table_12.c10
-rw-r--r--hl/examples/pal_rgb.h10
-rw-r--r--hl/examples/ptExampleFL.c10
-rwxr-xr-xhl/examples/run-hl-ex.sh10
-rw-r--r--hl/examples/run-hlc-ex.sh.in10
-rw-r--r--hl/fortran/CMakeLists.txt6
-rw-r--r--hl/fortran/COPYING11
-rw-r--r--hl/fortran/Makefile.am10
-rw-r--r--hl/fortran/examples/CMakeLists.txt8
-rw-r--r--hl/fortran/examples/CMakeTests.cmake28
-rw-r--r--hl/fortran/examples/Makefile.am10
-rw-r--r--hl/fortran/examples/ex_ds1.f9010
-rw-r--r--hl/fortran/examples/exlite.f9010
-rw-r--r--hl/fortran/examples/run-hlfortran-ex.sh.in10
-rw-r--r--hl/fortran/src/CMakeLists.txt44
-rw-r--r--hl/fortran/src/H5DSfc.c10
-rw-r--r--hl/fortran/src/H5DSff.F9010
-rw-r--r--hl/fortran/src/H5HL_buildiface.F9030
-rw-r--r--hl/fortran/src/H5IMcc.c13
-rw-r--r--hl/fortran/src/H5IMcc.h10
-rw-r--r--hl/fortran/src/H5IMfc.c10
-rw-r--r--hl/fortran/src/H5IMff.F9010
-rw-r--r--hl/fortran/src/H5LTf90proto.h10
-rw-r--r--hl/fortran/src/H5LTfc.c10
-rw-r--r--hl/fortran/src/H5LTff.F9010
-rw-r--r--hl/fortran/src/H5TBfc.c10
-rw-r--r--hl/fortran/src/H5TBff.F9010
-rw-r--r--hl/fortran/src/Makefile.am10
-rw-r--r--hl/fortran/test/CMakeLists.txt10
-rw-r--r--hl/fortran/test/CMakeTests.cmake133
-rw-r--r--hl/fortran/test/Makefile.am10
-rw-r--r--hl/fortran/test/tstds.F9010
-rw-r--r--hl/fortran/test/tstimage.F9010
-rw-r--r--hl/fortran/test/tstlite.F9010
-rw-r--r--hl/fortran/test/tsttable.F9010
-rw-r--r--hl/src/CMakeLists.txt9
-rw-r--r--hl/src/COPYING11
-rw-r--r--hl/src/H5DO.c93
-rw-r--r--hl/src/H5DOpublic.h16
-rw-r--r--hl/src/H5DS.c10
-rw-r--r--hl/src/H5DSprivate.h10
-rw-r--r--hl/src/H5DSpublic.h10
-rw-r--r--hl/src/H5HLprivate2.h10
-rw-r--r--hl/src/H5IM.c21
-rw-r--r--hl/src/H5IMprivate.h10
-rw-r--r--hl/src/H5IMpublic.h10
-rw-r--r--hl/src/H5LD.c10
-rw-r--r--hl/src/H5LDprivate.h10
-rw-r--r--hl/src/H5LDpublic.h10
-rw-r--r--hl/src/H5LT.c10
-rw-r--r--hl/src/H5LTanalyze.c10
-rw-r--r--hl/src/H5LTanalyze.l10
-rw-r--r--hl/src/H5LTparse.y10
-rw-r--r--hl/src/H5LTprivate.h10
-rw-r--r--hl/src/H5LTpublic.h10
-rw-r--r--hl/src/H5PT.c10
-rw-r--r--hl/src/H5PTprivate.h10
-rw-r--r--hl/src/H5PTpublic.h10
-rw-r--r--hl/src/H5TB.c10
-rw-r--r--hl/src/H5TBprivate.h10
-rw-r--r--hl/src/H5TBpublic.h10
-rw-r--r--hl/src/Makefile.am10
-rw-r--r--hl/src/hdf5_hl.h10
-rw-r--r--hl/test/CMakeLists.txt12
-rw-r--r--hl/test/CMakeTests.cmake34
-rw-r--r--hl/test/COPYING11
-rw-r--r--hl/test/H5srcdir_str.h.in10
-rw-r--r--hl/test/Makefile.am13
-rw-r--r--hl/test/dectris_hl_perf.c10
-rw-r--r--hl/test/gen_test_ds.c10
-rw-r--r--hl/test/gen_test_ld.c11
-rw-r--r--hl/test/h5hltest.h10
-rw-r--r--hl/test/pal_rgb.h10
-rw-r--r--hl/test/test_ds.c10
-rw-r--r--hl/test/test_dset_append.c10
-rw-r--r--hl/test/test_dset_opt.c1127
-rw-r--r--hl/test/test_file_image.c10
-rw-r--r--hl/test/test_image.c10
-rw-r--r--hl/test/test_ld.c10
-rw-r--r--hl/test/test_lite.c10
-rw-r--r--hl/test/test_packet.c10
-rw-r--r--hl/test/test_packet_vlen.c10
-rw-r--r--hl/test/test_table.c10
-rw-r--r--hl/tools/CMakeLists.txt2
-rw-r--r--hl/tools/COPYING11
-rw-r--r--hl/tools/Makefile.am10
-rw-r--r--hl/tools/gif2h5/CMakeLists.txt24
-rw-r--r--hl/tools/gif2h5/CMakeTests.cmake11
-rw-r--r--hl/tools/gif2h5/Makefile.am10
-rw-r--r--hl/tools/gif2h5/decompress.c10
-rw-r--r--hl/tools/gif2h5/gif.h10
-rw-r--r--hl/tools/gif2h5/gif2hdf.c10
-rw-r--r--hl/tools/gif2h5/gif2mem.c20
-rw-r--r--hl/tools/gif2h5/gifread.c10
-rw-r--r--hl/tools/gif2h5/h52gifgentst.c10
-rw-r--r--hl/tools/gif2h5/h52giftest.sh.in10
-rw-r--r--hl/tools/gif2h5/hdf2gif.c10
-rw-r--r--hl/tools/gif2h5/hdfgifwr.c10
-rw-r--r--hl/tools/gif2h5/writehdf.c10
-rw-r--r--hl/tools/h5watch/CMakeLists.txt28
-rw-r--r--hl/tools/h5watch/CMakeTests.cmake77
-rw-r--r--hl/tools/h5watch/Makefile.am20
-rw-r--r--hl/tools/h5watch/extend_dset.c380
-rw-r--r--hl/tools/h5watch/h5watch.c513
-rw-r--r--hl/tools/h5watch/h5watchgentest.c10
-rw-r--r--hl/tools/h5watch/swmr_check_compat_vfd.c10
-rw-r--r--hl/tools/h5watch/testh5watch.sh.in177
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl5
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl5
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl10
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-esc.ddl12
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-f1.ddl10
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-f2.ddl5
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-ff3.ddl8
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-label.ddl8
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-two-f1.ddl56
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-two-f3.ddl41
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl47
-rw-r--r--hl/tools/testfiles/w-ext-cmpd-two.ddl66
-rw-r--r--hl/tools/testfiles/w-ext-cmpd.ddl6
-rw-r--r--hl/tools/testfiles/w-ext-early.ddl9
-rw-r--r--hl/tools/testfiles/w-ext-late.ddl8
-rw-r--r--hl/tools/testfiles/w-ext-one-d.ddl3
-rw-r--r--hl/tools/testfiles/w-ext-one-simple.ddl9
-rw-r--r--hl/tools/testfiles/w-ext-one.ddl5
-rw-r--r--hl/tools/testfiles/w-ext-two-d.ddl18
-rw-r--r--hl/tools/testfiles/w-ext-two-width.ddl42
-rw-r--r--hl/tools/testfiles/w-ext-two.ddl38
-rw-r--r--java/CMakeLists.txt12
-rw-r--r--java/COPYING11
-rw-r--r--java/Makefile.am10
-rw-r--r--java/examples/CMakeLists.txt2
-rw-r--r--java/examples/Makefile.am10
-rw-r--r--java/examples/datasets/CMakeLists.txt40
-rw-r--r--java/examples/datasets/H5Ex_D_Alloc.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Checksum.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Chunk.java15
-rw-r--r--java/examples/datasets/H5Ex_D_Compact.java15
-rw-r--r--java/examples/datasets/H5Ex_D_External.java10
-rw-r--r--java/examples/datasets/H5Ex_D_FillValue.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Gzip.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Hyperslab.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Nbit.java10
-rw-r--r--java/examples/datasets/H5Ex_D_ReadWrite.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Shuffle.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Sofloat.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Soint.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Szip.java10
-rw-r--r--java/examples/datasets/H5Ex_D_Transform.java10
-rw-r--r--java/examples/datasets/H5Ex_D_UnlimitedAdd.java10
-rw-r--r--java/examples/datasets/H5Ex_D_UnlimitedGzip.java10
-rw-r--r--java/examples/datasets/H5Ex_D_UnlimitedMod.java10
-rw-r--r--java/examples/datasets/Makefile.am10
-rw-r--r--java/examples/datasets/runExample.sh.in12
-rw-r--r--java/examples/datatypes/CMakeLists.txt28
-rw-r--r--java/examples/datatypes/H5Ex_T_Array.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_ArrayAttribute.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_Bit.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_BitAttribute.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_Commit.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_Compound.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_CompoundAttribute.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_Float.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_FloatAttribute.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_Integer.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_IntegerAttribute.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_ObjectReference.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_Opaque.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_OpaqueAttribute.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_String.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_StringAttribute.java10
-rw-r--r--java/examples/datatypes/H5Ex_T_VLString.java10
-rw-r--r--java/examples/datatypes/Makefile.am10
-rw-r--r--java/examples/datatypes/runExample.sh.in11
-rw-r--r--java/examples/groups/CMakeLists.txt38
-rw-r--r--java/examples/groups/H5Ex_G_Compact.java10
-rw-r--r--java/examples/groups/H5Ex_G_Corder.java10
-rw-r--r--java/examples/groups/H5Ex_G_Create.java10
-rw-r--r--java/examples/groups/H5Ex_G_Intermediate.java10
-rw-r--r--java/examples/groups/H5Ex_G_Iterate.java10
-rw-r--r--java/examples/groups/H5Ex_G_Phase.java10
-rw-r--r--java/examples/groups/H5Ex_G_Traverse.java10
-rw-r--r--java/examples/groups/H5Ex_G_Visit.java10
-rw-r--r--java/examples/groups/Makefile.am10
-rw-r--r--java/examples/groups/runExample.sh.in18
-rw-r--r--java/examples/intro/CMakeLists.txt30
-rw-r--r--java/examples/intro/H5_CreateAttribute.java10
-rw-r--r--java/examples/intro/H5_CreateDataset.java10
-rw-r--r--java/examples/intro/H5_CreateFile.java10
-rw-r--r--java/examples/intro/H5_CreateGroup.java10
-rw-r--r--java/examples/intro/H5_CreateGroupAbsoluteRelative.java10
-rw-r--r--java/examples/intro/H5_CreateGroupDataset.java10
-rw-r--r--java/examples/intro/H5_ReadWrite.java10
-rw-r--r--java/examples/intro/Makefile.am10
-rw-r--r--java/examples/intro/runExample.sh.in11
-rw-r--r--java/src/CMakeLists.txt2
-rw-r--r--java/src/Makefile.am11
-rw-r--r--java/src/hdf/CMakeLists.txt2
-rw-r--r--java/src/hdf/hdf5lib/CMakeLists.txt4
-rw-r--r--java/src/hdf/hdf5lib/H5.java232
-rw-r--r--java/src/hdf/hdf5lib/HDF5Constants.java34
-rw-r--r--java/src/hdf/hdf5lib/HDF5GroupInfo.java10
-rw-r--r--java/src/hdf/hdf5lib/HDFArray.java10
-rw-r--r--java/src/hdf/hdf5lib/HDFNativeData.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/Callbacks.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5A_iterate_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5D_append_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5D_iterate_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5E_walk_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5L_iterate_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5O_iterate_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_iterate_t.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5AtomException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5Exception.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java10
-rw-r--r--java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5A_info_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5E_error2_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5F_info2_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5G_info_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5L_info_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5O_info_t.java10
-rw-r--r--java/src/hdf/hdf5lib/structs/H5_ih_info_t.java10
-rw-r--r--java/src/jni/CMakeLists.txt10
-rw-r--r--java/src/jni/Makefile.am12
-rw-r--r--java/src/jni/exceptionImp.c12
-rw-r--r--java/src/jni/exceptionImp.h10
-rw-r--r--java/src/jni/h5Constants.c1244
-rw-r--r--java/src/jni/h5Imp.c12
-rw-r--r--java/src/jni/h5Imp.h10
-rw-r--r--java/src/jni/h5aImp.c12
-rw-r--r--java/src/jni/h5aImp.h10
-rw-r--r--java/src/jni/h5dImp.c103
-rw-r--r--java/src/jni/h5dImp.h23
-rw-r--r--java/src/jni/h5eImp.c12
-rw-r--r--java/src/jni/h5eImp.h10
-rw-r--r--java/src/jni/h5fImp.c12
-rw-r--r--java/src/jni/h5fImp.h18
-rw-r--r--java/src/jni/h5gImp.c12
-rw-r--r--java/src/jni/h5gImp.h14
-rw-r--r--java/src/jni/h5iImp.c12
-rw-r--r--java/src/jni/h5iImp.h10
-rw-r--r--java/src/jni/h5jni.h12
-rw-r--r--java/src/jni/h5lImp.c12
-rw-r--r--java/src/jni/h5lImp.h10
-rw-r--r--java/src/jni/h5oImp.c12
-rw-r--r--java/src/jni/h5oImp.h14
-rw-r--r--java/src/jni/h5pImp.c127
-rw-r--r--java/src/jni/h5pImp.h61
-rw-r--r--java/src/jni/h5plImp.c171
-rw-r--r--java/src/jni/h5plImp.h73
-rw-r--r--java/src/jni/h5rImp.c12
-rw-r--r--java/src/jni/h5rImp.h10
-rw-r--r--java/src/jni/h5sImp.c12
-rw-r--r--java/src/jni/h5sImp.h10
-rw-r--r--java/src/jni/h5tImp.c12
-rw-r--r--java/src/jni/h5tImp.h14
-rw-r--r--java/src/jni/h5util.c44
-rw-r--r--java/src/jni/h5util.h12
-rw-r--r--java/src/jni/h5zImp.c12
-rw-r--r--java/src/jni/h5zImp.h10
-rw-r--r--java/src/jni/nativeData.c12
-rw-r--r--java/src/jni/nativeData.h10
-rw-r--r--java/test/CMakeLists.txt16
-rw-r--r--java/test/JUnit-interface.txt7
-rw-r--r--java/test/Makefile.am10
-rw-r--r--java/test/TestAll.java10
-rw-r--r--java/test/TestH5.java14
-rw-r--r--java/test/TestH5A.java10
-rw-r--r--java/test/TestH5D.java10
-rw-r--r--java/test/TestH5Dparams.java10
-rw-r--r--java/test/TestH5Dplist.java10
-rw-r--r--java/test/TestH5E.java14
-rw-r--r--java/test/TestH5Edefault.java10
-rw-r--r--java/test/TestH5Eregister.java10
-rw-r--r--java/test/TestH5F.java10
-rw-r--r--java/test/TestH5Fbasic.java10
-rw-r--r--java/test/TestH5Fparams.java10
-rw-r--r--java/test/TestH5Fswmr.java10
-rw-r--r--java/test/TestH5G.java10
-rw-r--r--java/test/TestH5Gbasic.java10
-rw-r--r--java/test/TestH5Giterate.java10
-rw-r--r--java/test/TestH5Lbasic.java10
-rw-r--r--java/test/TestH5Lcreate.java10
-rw-r--r--java/test/TestH5Lparams.java10
-rw-r--r--java/test/TestH5Obasic.java10
-rw-r--r--java/test/TestH5Ocopy.java11
-rw-r--r--java/test/TestH5Ocreate.java10
-rw-r--r--java/test/TestH5Oparams.java10
-rw-r--r--java/test/TestH5P.java51
-rw-r--r--java/test/TestH5PData.java10
-rw-r--r--java/test/TestH5PL.java40
-rw-r--r--java/test/TestH5Pfapl.java10
-rw-r--r--java/test/TestH5Plist.java10
-rw-r--r--java/test/TestH5Pvirtual.java15
-rw-r--r--java/test/TestH5R.java10
-rw-r--r--java/test/TestH5S.java10
-rw-r--r--java/test/TestH5Sbasic.java10
-rw-r--r--java/test/TestH5T.java10
-rw-r--r--java/test/TestH5Tbasic.java10
-rw-r--r--java/test/TestH5Tparams.java10
-rw-r--r--java/test/TestH5Z.java10
-rw-r--r--java/test/junit.sh.in19
-rw-r--r--m4/aclocal_cxx.m410
-rw-r--r--m4/aclocal_fc.f9013
-rw-r--r--m4/aclocal_fc.m410
-rw-r--r--release_docs/COPYING11
-rw-r--r--release_docs/HISTORY-1_10.txt2101
-rw-r--r--release_docs/HISTORY-1_8.txt12344
-rw-r--r--release_docs/HISTORY-1_8_0-1_10_0.txt1742
-rw-r--r--release_docs/HISTORY-1_9.txt6
-rw-r--r--release_docs/INSTALL_CMake.txt477
-rw-r--r--release_docs/INSTALL_parallel12
-rw-r--r--release_docs/RELEASE.txt1751
-rw-r--r--release_docs/USING_CMake_Examples.txt4
-rw-r--r--release_docs/USING_HDF5_CMake.txt236
-rw-r--r--src/CMakeLists.txt56
-rw-r--r--src/COPYING11
-rw-r--r--src/H5.c16
-rw-r--r--src/H5A.c10
-rw-r--r--src/H5AC.c702
-rw-r--r--src/H5ACdbg.c253
-rw-r--r--src/H5AClog.c105
-rw-r--r--src/H5ACmodule.h10
-rw-r--r--src/H5ACmpio.c92
-rw-r--r--src/H5ACpkg.h27
-rw-r--r--src/H5ACprivate.h178
-rw-r--r--src/H5ACproxy_entry.c39
-rw-r--r--src/H5ACpublic.h251
-rw-r--r--src/H5Abtree2.c10
-rw-r--r--src/H5Adense.c10
-rw-r--r--src/H5Adeprec.c10
-rw-r--r--src/H5Aint.c10
-rw-r--r--src/H5Amodule.h10
-rw-r--r--src/H5Apkg.h10
-rw-r--r--src/H5Aprivate.h10
-rw-r--r--src/H5Apublic.h10
-rw-r--r--src/H5Atest.c10
-rw-r--r--src/H5B.c10
-rw-r--r--src/H5B2.c10
-rw-r--r--src/H5B2cache.c16
-rw-r--r--src/H5B2dbg.c10
-rw-r--r--src/H5B2hdr.c10
-rw-r--r--src/H5B2int.c10
-rw-r--r--src/H5B2internal.c10
-rw-r--r--src/H5B2leaf.c10
-rw-r--r--src/H5B2module.h10
-rw-r--r--src/H5B2pkg.h19
-rw-r--r--src/H5B2private.h10
-rw-r--r--src/H5B2public.h10
-rw-r--r--src/H5B2stat.c10
-rw-r--r--src/H5B2test.c10
-rw-r--r--src/H5Bcache.c10
-rw-r--r--src/H5Bdbg.c10
-rw-r--r--src/H5Bmodule.h10
-rw-r--r--src/H5Bpkg.h13
-rw-r--r--src/H5Bprivate.h10
-rw-r--r--src/H5Bpublic.h10
-rw-r--r--src/H5C.c2903
-rw-r--r--src/H5CS.c10
-rw-r--r--src/H5CSprivate.h10
-rw-r--r--src/H5Cdbg.c780
-rw-r--r--src/H5Cepoch.c32
-rw-r--r--src/H5Cimage.c3569
-rw-r--r--src/H5Clog.c10
-rw-r--r--src/H5Cmodule.h10
-rw-r--r--src/H5Cmpio.c1426
-rw-r--r--src/H5Cpkg.h1123
-rw-r--r--src/H5Cprefetched.c350
-rw-r--r--src/H5Cprivate.h666
-rw-r--r--src/H5Cpublic.h10
-rw-r--r--src/H5Cquery.c50
-rw-r--r--src/H5Ctag.c59
-rw-r--r--src/H5Ctest.c10
-rw-r--r--src/H5D.c50
-rw-r--r--src/H5Dbtree.c10
-rw-r--r--src/H5Dbtree2.c10
-rw-r--r--src/H5Dchunk.c324
-rw-r--r--src/H5Dcompact.c16
-rw-r--r--src/H5Dcontig.c10
-rw-r--r--src/H5Ddbg.c10
-rw-r--r--src/H5Ddeprec.c10
-rw-r--r--src/H5Dearray.c25
-rw-r--r--src/H5Defl.c10
-rw-r--r--src/H5Dfarray.c10
-rw-r--r--src/H5Dfill.c10
-rw-r--r--src/H5Dint.c15
-rw-r--r--src/H5Dio.c159
-rw-r--r--src/H5Dlayout.c22
-rw-r--r--src/H5Dmodule.h10
-rw-r--r--src/H5Dmpio.c10
-rw-r--r--src/H5Dnone.c10
-rw-r--r--src/H5Doh.c10
-rw-r--r--src/H5Dpkg.h14
-rw-r--r--src/H5Dprivate.h10
-rw-r--r--src/H5Dpublic.h16
-rw-r--r--src/H5Dscatgath.c10
-rw-r--r--src/H5Dselect.c10
-rw-r--r--src/H5Dsingle.c10
-rw-r--r--src/H5Dtest.c51
-rw-r--r--src/H5Dvirtual.c10
-rw-r--r--src/H5E.c10
-rw-r--r--src/H5EA.c10
-rw-r--r--src/H5EAcache.c20
-rw-r--r--src/H5EAdbg.c10
-rw-r--r--src/H5EAdblkpage.c10
-rw-r--r--src/H5EAdblock.c10
-rw-r--r--src/H5EAhdr.c10
-rw-r--r--src/H5EAiblock.c10
-rw-r--r--src/H5EAint.c10
-rw-r--r--src/H5EAmodule.h10
-rw-r--r--src/H5EApkg.h25
-rw-r--r--src/H5EAprivate.h10
-rw-r--r--src/H5EAsblock.c10
-rw-r--r--src/H5EAstat.c10
-rw-r--r--src/H5EAtest.c10
-rw-r--r--src/H5Edeprec.c10
-rw-r--r--src/H5Eint.c10
-rw-r--r--src/H5Emodule.h10
-rw-r--r--src/H5Epkg.h10
-rw-r--r--src/H5Eprivate.h10
-rw-r--r--src/H5Epublic.h10
-rw-r--r--src/H5F.c228
-rw-r--r--src/H5FA.c10
-rw-r--r--src/H5FAcache.c16
-rw-r--r--src/H5FAdbg.c10
-rw-r--r--src/H5FAdblkpage.c10
-rw-r--r--src/H5FAdblock.c10
-rw-r--r--src/H5FAhdr.c10
-rw-r--r--src/H5FAint.c10
-rw-r--r--src/H5FAmodule.h10
-rw-r--r--src/H5FApkg.h19
-rw-r--r--src/H5FAprivate.h10
-rw-r--r--src/H5FAstat.c10
-rw-r--r--src/H5FAtest.c10
-rw-r--r--src/H5FD.c79
-rw-r--r--src/H5FDcore.c10
-rw-r--r--src/H5FDcore.h10
-rw-r--r--src/H5FDdirect.c10
-rw-r--r--src/H5FDdirect.h10
-rw-r--r--src/H5FDdrvr_module.h10
-rw-r--r--src/H5FDfamily.c12
-rw-r--r--src/H5FDfamily.h10
-rw-r--r--src/H5FDint.c69
-rw-r--r--src/H5FDlog.c18
-rw-r--r--src/H5FDlog.h10
-rw-r--r--src/H5FDmodule.h10
-rw-r--r--src/H5FDmpi.c49
-rw-r--r--src/H5FDmpi.h10
-rw-r--r--src/H5FDmpio.c47
-rw-r--r--src/H5FDmpio.h10
-rw-r--r--src/H5FDmulti.c80
-rw-r--r--src/H5FDmulti.h10
-rw-r--r--src/H5FDpkg.h10
-rw-r--r--src/H5FDprivate.h51
-rw-r--r--src/H5FDpublic.h24
-rw-r--r--src/H5FDsec2.c10
-rw-r--r--src/H5FDsec2.h10
-rw-r--r--src/H5FDspace.c119
-rw-r--r--src/H5FDstdio.c17
-rw-r--r--src/H5FDstdio.h10
-rw-r--r--src/H5FDtest.c10
-rw-r--r--src/H5FDwindows.c10
-rw-r--r--src/H5FDwindows.h10
-rw-r--r--src/H5FL.c10
-rw-r--r--src/H5FLmodule.h10
-rw-r--r--src/H5FLprivate.h10
-rw-r--r--src/H5FO.c10
-rw-r--r--src/H5FOprivate.h10
-rw-r--r--src/H5FS.c52
-rw-r--r--src/H5FScache.c111
-rw-r--r--src/H5FSdbg.c10
-rw-r--r--src/H5FSint.c10
-rw-r--r--src/H5FSmodule.h10
-rw-r--r--src/H5FSpkg.h18
-rw-r--r--src/H5FSprivate.h36
-rw-r--r--src/H5FSpublic.h10
-rw-r--r--src/H5FSsection.c329
-rw-r--r--src/H5FSstat.c10
-rw-r--r--src/H5FStest.c10
-rw-r--r--src/H5Faccum.c98
-rw-r--r--src/H5Fcwfs.c10
-rw-r--r--src/H5Fdbg.c10
-rw-r--r--src/H5Fdeprec.c10
-rw-r--r--src/H5Fefc.c10
-rw-r--r--src/H5Ffake.c10
-rw-r--r--src/H5Fint.c462
-rw-r--r--src/H5Fio.c89
-rw-r--r--src/H5Fmodule.h10
-rw-r--r--src/H5Fmount.c20
-rw-r--r--src/H5Fmpi.c36
-rw-r--r--src/H5Fpkg.h109
-rw-r--r--src/H5Fprivate.h110
-rw-r--r--src/H5Fpublic.h27
-rw-r--r--src/H5Fquery.c157
-rw-r--r--src/H5Fsfile.c10
-rw-r--r--src/H5Fspace.c224
-rw-r--r--src/H5Fsuper.c424
-rw-r--r--src/H5Fsuper_cache.c501
-rw-r--r--src/H5Ftest.c10
-rw-r--r--src/H5G.c10
-rw-r--r--src/H5Gbtree2.c10
-rw-r--r--src/H5Gcache.c10
-rw-r--r--src/H5Gcompact.c10
-rw-r--r--src/H5Gdense.c10
-rw-r--r--src/H5Gdeprec.c10
-rw-r--r--src/H5Gent.c10
-rw-r--r--src/H5Gint.c10
-rw-r--r--src/H5Glink.c10
-rw-r--r--src/H5Gloc.c10
-rw-r--r--src/H5Gmodule.h10
-rw-r--r--src/H5Gname.c10
-rw-r--r--src/H5Gnode.c10
-rw-r--r--src/H5Gobj.c10
-rw-r--r--src/H5Goh.c10
-rw-r--r--src/H5Gpkg.h13
-rw-r--r--src/H5Gprivate.h10
-rw-r--r--src/H5Gpublic.h10
-rw-r--r--src/H5Groot.c10
-rw-r--r--src/H5Gstab.c10
-rw-r--r--src/H5Gtest.c10
-rw-r--r--src/H5Gtraverse.c10
-rw-r--r--src/H5HF.c10
-rw-r--r--src/H5HFbtree2.c10
-rw-r--r--src/H5HFcache.c819
-rw-r--r--src/H5HFdbg.c10
-rw-r--r--src/H5HFdblock.c14
-rw-r--r--src/H5HFdtable.c10
-rw-r--r--src/H5HFhdr.c10
-rw-r--r--src/H5HFhuge.c10
-rw-r--r--src/H5HFiblock.c10
-rw-r--r--src/H5HFiter.c10
-rw-r--r--src/H5HFman.c10
-rw-r--r--src/H5HFmodule.h10
-rw-r--r--src/H5HFpkg.h19
-rw-r--r--src/H5HFprivate.h10
-rw-r--r--src/H5HFpublic.h10
-rw-r--r--src/H5HFsection.c52
-rw-r--r--src/H5HFspace.c10
-rw-r--r--src/H5HFstat.c10
-rw-r--r--src/H5HFtest.c10
-rw-r--r--src/H5HFtiny.c10
-rw-r--r--src/H5HG.c10
-rw-r--r--src/H5HGcache.c118
-rw-r--r--src/H5HGdbg.c10
-rw-r--r--src/H5HGmodule.h10
-rw-r--r--src/H5HGpkg.h13
-rw-r--r--src/H5HGprivate.h10
-rw-r--r--src/H5HGpublic.h10
-rw-r--r--src/H5HGquery.c10
-rw-r--r--src/H5HL.c10
-rw-r--r--src/H5HLcache.c136
-rw-r--r--src/H5HLdbg.c10
-rw-r--r--src/H5HLdblk.c10
-rw-r--r--src/H5HLint.c10
-rw-r--r--src/H5HLmodule.h10
-rw-r--r--src/H5HLpkg.h16
-rw-r--r--src/H5HLprfx.c10
-rw-r--r--src/H5HLprivate.h10
-rw-r--r--src/H5HLpublic.h10
-rw-r--r--src/H5HP.c10
-rw-r--r--src/H5HPprivate.h10
-rw-r--r--src/H5I.c10
-rw-r--r--src/H5Imodule.h10
-rw-r--r--src/H5Ipkg.h10
-rw-r--r--src/H5Iprivate.h10
-rw-r--r--src/H5Ipublic.h10
-rw-r--r--src/H5Itest.c10
-rw-r--r--src/H5L.c16
-rw-r--r--src/H5Lexternal.c10
-rw-r--r--src/H5Lmodule.h10
-rw-r--r--src/H5Lpkg.h10
-rw-r--r--src/H5Lprivate.h10
-rw-r--r--src/H5Lpublic.h10
-rw-r--r--src/H5MF.c3575
-rw-r--r--src/H5MFaggr.c147
-rw-r--r--src/H5MFdbg.c124
-rw-r--r--src/H5MFmodule.h10
-rw-r--r--src/H5MFpkg.h79
-rw-r--r--src/H5MFprivate.h29
-rw-r--r--src/H5MFsection.c724
-rw-r--r--src/H5MM.c10
-rw-r--r--src/H5MMprivate.h10
-rw-r--r--src/H5MMpublic.h10
-rw-r--r--src/H5MP.c10
-rw-r--r--src/H5MPmodule.h10
-rw-r--r--src/H5MPpkg.h10
-rw-r--r--src/H5MPprivate.h10
-rw-r--r--src/H5MPtest.c10
-rw-r--r--src/H5O.c19
-rw-r--r--src/H5Oainfo.c10
-rw-r--r--src/H5Oalloc.c18
-rw-r--r--src/H5Oattr.c10
-rw-r--r--src/H5Oattribute.c10
-rw-r--r--src/H5Obogus.c10
-rw-r--r--src/H5Obtreek.c10
-rw-r--r--src/H5Ocache.c338
-rw-r--r--src/H5Ocache_image.c377
-rw-r--r--src/H5Ochunk.c10
-rw-r--r--src/H5Ocont.c10
-rw-r--r--src/H5Ocopy.c10
-rw-r--r--src/H5Odbg.c10
-rw-r--r--src/H5Odrvinfo.c10
-rw-r--r--src/H5Odtype.c297
-rw-r--r--src/H5Oefl.c10
-rw-r--r--src/H5Ofill.c10
-rw-r--r--src/H5Oflush.c10
-rw-r--r--src/H5Ofsinfo.c202
-rw-r--r--src/H5Oginfo.c10
-rw-r--r--src/H5Olayout.c10
-rw-r--r--src/H5Olinfo.c10
-rw-r--r--src/H5Olink.c10
-rw-r--r--src/H5Omessage.c65
-rw-r--r--src/H5Omodule.h10
-rw-r--r--src/H5Omtime.c10
-rw-r--r--src/H5Oname.c10
-rw-r--r--src/H5Onull.c10
-rw-r--r--src/H5Opkg.h23
-rw-r--r--src/H5Opline.c10
-rw-r--r--src/H5Oprivate.h55
-rw-r--r--src/H5Opublic.h10
-rw-r--r--src/H5Orefcount.c10
-rw-r--r--src/H5Osdspace.c10
-rw-r--r--src/H5Oshared.c10
-rw-r--r--src/H5Oshared.h10
-rw-r--r--src/H5Oshmesg.c10
-rw-r--r--src/H5Ostab.c10
-rw-r--r--src/H5Otest.c10
-rw-r--r--src/H5Ounknown.c10
-rw-r--r--src/H5P.c10
-rw-r--r--src/H5PB.c1537
-rw-r--r--src/H5PBmodule.h32
-rw-r--r--src/H5PBpkg.h58
-rw-r--r--src/H5PBprivate.h106
-rw-r--r--src/H5PL.c308
-rw-r--r--src/H5PLextern.h10
-rw-r--r--src/H5PLmodule.h10
-rw-r--r--src/H5PLpkg.h48
-rw-r--r--src/H5PLprivate.h10
-rw-r--r--src/H5PLpublic.h19
-rw-r--r--src/H5Pacpl.c10
-rw-r--r--src/H5Pdapl.c10
-rw-r--r--src/H5Pdcpl.c10
-rw-r--r--src/H5Pdeprec.c140
-rw-r--r--src/H5Pdxpl.c40
-rw-r--r--src/H5Pencdec.c10
-rw-r--r--src/H5Pfapl.c403
-rw-r--r--src/H5Pfcpl.c260
-rw-r--r--src/H5Pfmpl.c10
-rw-r--r--src/H5Pgcpl.c10
-rw-r--r--src/H5Pint.c10
-rw-r--r--src/H5Plapl.c10
-rw-r--r--src/H5Plcpl.c10
-rw-r--r--src/H5Pmodule.h10
-rw-r--r--src/H5Pocpl.c10
-rw-r--r--src/H5Pocpypl.c10
-rw-r--r--src/H5Ppkg.h10
-rw-r--r--src/H5Pprivate.h10
-rw-r--r--src/H5Ppublic.h22
-rw-r--r--src/H5Pstrcpl.c10
-rw-r--r--src/H5Ptest.c10
-rw-r--r--src/H5R.c10
-rw-r--r--src/H5RS.c10
-rw-r--r--src/H5RSprivate.h10
-rw-r--r--src/H5Rdeprec.c10
-rw-r--r--src/H5Rmodule.h10
-rw-r--r--src/H5Rpkg.h10
-rw-r--r--src/H5Rprivate.h10
-rw-r--r--src/H5Rpublic.h10
-rw-r--r--src/H5S.c12
-rw-r--r--src/H5SL.c10
-rw-r--r--src/H5SLmodule.h10
-rw-r--r--src/H5SLprivate.h10
-rw-r--r--src/H5SM.c14
-rw-r--r--src/H5SMbtree2.c10
-rw-r--r--src/H5SMcache.c10
-rw-r--r--src/H5SMmessage.c10
-rw-r--r--src/H5SMmodule.h10
-rw-r--r--src/H5SMpkg.h12
-rw-r--r--src/H5SMprivate.h10
-rw-r--r--src/H5SMtest.c10
-rw-r--r--src/H5ST.c10
-rw-r--r--src/H5STprivate.h10
-rw-r--r--src/H5Sall.c10
-rw-r--r--src/H5Sdbg.c10
-rw-r--r--src/H5Shyper.c22
-rw-r--r--src/H5Smodule.h10
-rw-r--r--src/H5Smpio.c10
-rw-r--r--src/H5Snone.c10
-rw-r--r--src/H5Spkg.h10
-rw-r--r--src/H5Spoint.c10
-rw-r--r--src/H5Sprivate.h10
-rw-r--r--src/H5Spublic.h10
-rw-r--r--src/H5Sselect.c10
-rw-r--r--src/H5Stest.c10
-rw-r--r--src/H5T.c2869
-rw-r--r--src/H5TS.c10
-rw-r--r--src/H5TSprivate.h10
-rw-r--r--src/H5Tarray.c10
-rw-r--r--src/H5Tbit.c10
-rw-r--r--src/H5Tcommit.c10
-rw-r--r--src/H5Tcompound.c10
-rw-r--r--src/H5Tconv.c10
-rw-r--r--src/H5Tcset.c10
-rw-r--r--src/H5Tdbg.c10
-rw-r--r--src/H5Tdeprec.c10
-rw-r--r--src/H5Tenum.c10
-rw-r--r--src/H5Tfields.c10
-rw-r--r--src/H5Tfixed.c10
-rw-r--r--src/H5Tfloat.c10
-rw-r--r--src/H5Tmodule.h10
-rw-r--r--src/H5Tnative.c10
-rw-r--r--src/H5Toffset.c10
-rw-r--r--src/H5Toh.c10
-rw-r--r--src/H5Topaque.c10
-rw-r--r--src/H5Torder.c10
-rw-r--r--src/H5Tpad.c10
-rw-r--r--src/H5Tpkg.h10
-rw-r--r--src/H5Tprecis.c10
-rw-r--r--src/H5Tprivate.h10
-rw-r--r--src/H5Tpublic.h10
-rw-r--r--src/H5Tstrpad.c10
-rw-r--r--src/H5Tvisit.c10
-rw-r--r--src/H5Tvlen.c10
-rw-r--r--src/H5UC.c10
-rw-r--r--src/H5UCprivate.h10
-rw-r--r--src/H5VM.c10
-rw-r--r--src/H5VMprivate.h10
-rw-r--r--src/H5WB.c10
-rw-r--r--src/H5WBprivate.h10
-rw-r--r--src/H5Z.c12
-rw-r--r--src/H5Zdeflate.c10
-rw-r--r--src/H5Zfletcher32.c10
-rw-r--r--src/H5Zmodule.h10
-rw-r--r--src/H5Znbit.c10
-rw-r--r--src/H5Zpkg.h10
-rw-r--r--src/H5Zprivate.h10
-rw-r--r--src/H5Zpublic.h10
-rw-r--r--src/H5Zscaleoffset.c10
-rw-r--r--src/H5Zshuffle.c10
-rw-r--r--src/H5Zszip.c14
-rw-r--r--src/H5Ztrans.c10
-rw-r--r--src/H5api_adpt.h10
-rw-r--r--src/H5checksum.c10
-rw-r--r--src/H5dbg.c10
-rw-r--r--src/H5detect.c20
-rw-r--r--src/H5err.txt14
-rw-r--r--src/H5make_libsettings.c20
-rw-r--r--src/H5overflow.txt10
-rw-r--r--src/H5private.h20
-rw-r--r--src/H5public.h16
-rw-r--r--src/H5system.c43
-rw-r--r--src/H5timer.c10
-rw-r--r--src/H5trace.c47
-rw-r--r--src/H5vers.txt10
-rw-r--r--src/H5win32defs.h30
-rw-r--r--src/Makefile.am19
-rw-r--r--src/hdf5.h10
-rw-r--r--test/AtomicWriterReader.txt48
-rw-r--r--test/CMakeLists.txt145
-rw-r--r--test/CMakeTests.cmake342
-rw-r--r--test/COPYING11
-rw-r--r--test/H5srcdir.h10
-rw-r--r--test/H5srcdir_str.h.in10
-rw-r--r--test/Makefile.am100
-rw-r--r--test/POSIX_Order_Write_Test_Report.docxbin0 -> 145445 bytes
-rw-r--r--test/POSIX_Order_Write_Test_Report.pdfbin0 -> 84166 bytes
-rw-r--r--test/SWMR_POSIX_Order_UG.txt94
-rw-r--r--test/SWMR_UseCase_UG.txt223
-rw-r--r--test/accum.c69
-rw-r--r--test/accum_swmr_reader.c10
-rw-r--r--test/aggr.h5bin0 -> 2448 bytes
-rw-r--r--test/app_ref.c10
-rw-r--r--test/atomic_reader.c361
-rw-r--r--test/atomic_writer.c243
-rw-r--r--test/big.c10
-rw-r--r--test/bittests.c10
-rw-r--r--test/btree2.c10
-rw-r--r--test/cache.c1739
-rw-r--r--test/cache_api.c129
-rw-r--r--test/cache_common.c252
-rw-r--r--test/cache_common.h51
-rw-r--r--test/cache_image.c8081
-rw-r--r--test/cache_logging.c10
-rw-r--r--test/cache_tagging.c128
-rw-r--r--test/chunk_info.c10
-rw-r--r--test/cmpd_dset.c10
-rw-r--r--test/cork.c11
-rw-r--r--test/cross_read.c10
-rw-r--r--test/dangle.c10
-rw-r--r--test/dsets.c360
-rw-r--r--test/dt_arith.c10
-rw-r--r--test/dtransform.c10
-rw-r--r--test/dtypes.c10
-rw-r--r--test/dynlib1.c10
-rw-r--r--test/dynlib2.c10
-rw-r--r--test/dynlib3.c10
-rw-r--r--test/dynlib4.c10
-rw-r--r--test/earray.c256
-rw-r--r--test/efc.c10
-rw-r--r--test/enc_dec_plist.c17
-rw-r--r--test/enc_dec_plist_cross_platform.c10
-rw-r--r--test/enum.c10
-rw-r--r--test/err_compat.c10
-rw-r--r--test/error_test.c10
-rw-r--r--test/evict_on_close.c126
-rw-r--r--test/extend.c10
-rw-r--r--test/external.c10
-rw-r--r--test/farray.c10
-rw-r--r--test/fheap.c922
-rw-r--r--test/file_image.c10
-rw-r--r--test/filespace_1_8.h5bin2448 -> 2544 bytes
-rw-r--r--test/fillval.c12
-rw-r--r--test/filter_fail.c10
-rw-r--r--test/flush1.c345
-rw-r--r--test/flush2.c466
-rw-r--r--test/flushrefresh.c1149
-rw-r--r--test/freespace.c73
-rw-r--r--test/fsm_aggr_nopersist.h5bin0 -> 2448 bytes
-rw-r--r--test/fsm_aggr_persist.h5bin0 -> 2565 bytes
-rw-r--r--test/gen_bad_compound.c10
-rw-r--r--test/gen_bad_ohdr.c10
-rw-r--r--test/gen_bogus.c10
-rw-r--r--test/gen_cross.c10
-rw-r--r--test/gen_deflate.c10
-rw-r--r--test/gen_file_image.c10
-rw-r--r--test/gen_filespace.c146
-rw-r--r--test/gen_filters.c10
-rw-r--r--test/gen_mergemsg.c10
-rw-r--r--test/gen_new_array.c10
-rw-r--r--test/gen_new_fill.c10
-rw-r--r--test/gen_new_group.c10
-rw-r--r--test/gen_new_mtime.c10
-rw-r--r--test/gen_new_super.c10
-rw-r--r--test/gen_noencoder.c10
-rw-r--r--test/gen_nullspace.c10
-rw-r--r--test/gen_old_array.c10
-rw-r--r--test/gen_old_group.c10
-rw-r--r--test/gen_old_layout.c10
-rw-r--r--test/gen_old_mtime.c10
-rw-r--r--test/gen_plist.c25
-rw-r--r--test/gen_sizes_lheap.c10
-rw-r--r--test/gen_specmetaread.c10
-rw-r--r--test/gen_udlinks.c10
-rw-r--r--test/genall5.c3886
-rw-r--r--test/genall5.h51
-rw-r--r--test/getname.c10
-rw-r--r--test/gheap.c10
-rw-r--r--test/h5fc_ext1_f.h5bin0 -> 6760 bytes
-rw-r--r--test/h5fc_ext1_i.h5bin0 -> 6526 bytes
-rw-r--r--test/h5fc_ext2_if.h5bin0 -> 6526 bytes
-rw-r--r--test/h5fc_ext2_sf.h5bin0 -> 5076 bytes
-rw-r--r--test/h5fc_ext3_isf.h5bin0 -> 6679 bytes
-rw-r--r--test/h5fc_ext_none.h5bin0 -> 6474 bytes
-rw-r--r--test/h5test.c129
-rw-r--r--test/h5test.h13
-rw-r--r--test/hyperslab.c10
-rw-r--r--test/istore.c10
-rw-r--r--test/lheap.c10
-rw-r--r--test/links.c27
-rw-r--r--test/links_env.c10
-rw-r--r--test/mf.c5063
-rw-r--r--test/mount.c10
-rw-r--r--test/mtime.c10
-rw-r--r--test/none.h5bin0 -> 1808 bytes
-rw-r--r--test/ntypes.c10
-rw-r--r--test/objcopy.c12
-rw-r--r--test/ohdr.c10
-rw-r--r--test/page_buffer.c2206
-rw-r--r--test/paged_nopersist.h5 (renamed from tools/test/h5diff/testfiles/h5diff_dset_idx1.h5)bin5974 -> 8192 bytes
-rw-r--r--test/paged_persist.h5bin0 -> 16384 bytes
-rw-r--r--test/plugin.c963
-rw-r--r--test/pool.c10
-rw-r--r--test/reserved.c10
-rw-r--r--test/set_extent.c41
-rw-r--r--test/space_overflow.c10
-rw-r--r--test/stab.c91
-rw-r--r--test/swmr.c10
-rw-r--r--test/swmr_addrem_writer.c441
-rw-r--r--test/swmr_check_compat_vfd.c54
-rw-r--r--test/swmr_common.c288
-rw-r--r--test/swmr_common.h78
-rw-r--r--test/swmr_generator.c384
-rw-r--r--test/swmr_reader.c546
-rw-r--r--test/swmr_remove_reader.c517
-rw-r--r--test/swmr_remove_writer.c379
-rw-r--r--test/swmr_sparse_reader.c447
-rw-r--r--test/swmr_sparse_writer.c452
-rw-r--r--test/swmr_start_write.c713
-rw-r--r--test/swmr_writer.c449
-rw-r--r--test/tarray.c10
-rw-r--r--test/tattr.c10
-rw-r--r--test/tcheck_version.c10
-rw-r--r--test/tchecksum.c10
-rw-r--r--test/tconfig.c10
-rw-r--r--test/tcoords.c10
-rw-r--r--test/test_plugin.sh.in25
-rw-r--r--test/test_usecases.sh.in199
-rw-r--r--test/testcheck_version.sh.in10
-rw-r--r--test/testerror.sh.in10
-rw-r--r--test/testfiles/plist_files/dapl_32bebin136 -> 186 bytes
-rw-r--r--test/testfiles/plist_files/dapl_32lebin136 -> 186 bytes
-rw-r--r--test/testfiles/plist_files/dapl_64bebin136 -> 186 bytes
-rw-r--r--test/testfiles/plist_files/dapl_64lebin136 -> 186 bytes
-rw-r--r--test/testfiles/plist_files/def_dapl_32bebin131 -> 181 bytes
-rw-r--r--test/testfiles/plist_files/def_dapl_32lebin131 -> 181 bytes
-rw-r--r--test/testfiles/plist_files/def_dapl_64bebin131 -> 181 bytes
-rw-r--r--test/testfiles/plist_files/def_dapl_64lebin131 -> 181 bytes
-rw-r--r--test/testfiles/plist_files/def_dxpl_32bebin225 -> 245 bytes
-rw-r--r--test/testfiles/plist_files/def_dxpl_32lebin225 -> 245 bytes
-rw-r--r--test/testfiles/plist_files/def_dxpl_64bebin225 -> 245 bytes
-rw-r--r--test/testfiles/plist_files/def_dxpl_64lebin225 -> 245 bytes
-rw-r--r--test/testfiles/plist_files/def_fapl_32bebin1460 -> 1520 bytes
-rw-r--r--test/testfiles/plist_files/def_fapl_32lebin1460 -> 1520 bytes
-rw-r--r--test/testfiles/plist_files/def_fapl_64bebin1460 -> 1520 bytes
-rw-r--r--test/testfiles/plist_files/def_fapl_64lebin1460 -> 1520 bytes
-rw-r--r--test/testfiles/plist_files/def_fcpl_32bebin412 -> 452 bytes
-rw-r--r--test/testfiles/plist_files/def_fcpl_32lebin412 -> 452 bytes
-rw-r--r--test/testfiles/plist_files/def_fcpl_64bebin412 -> 452 bytes
-rw-r--r--test/testfiles/plist_files/def_fcpl_64lebin412 -> 452 bytes
-rw-r--r--test/testfiles/plist_files/dxpl_32bebin229 -> 249 bytes
-rw-r--r--test/testfiles/plist_files/dxpl_32lebin229 -> 249 bytes
-rw-r--r--test/testfiles/plist_files/dxpl_64bebin229 -> 249 bytes
-rw-r--r--test/testfiles/plist_files/dxpl_64lebin229 -> 249 bytes
-rw-r--r--test/testfiles/plist_files/fapl_32bebin1462 -> 1522 bytes
-rw-r--r--test/testfiles/plist_files/fapl_32lebin1462 -> 1522 bytes
-rw-r--r--test/testfiles/plist_files/fapl_64bebin1462 -> 1522 bytes
-rw-r--r--test/testfiles/plist_files/fapl_64lebin1462 -> 1522 bytes
-rw-r--r--test/testfiles/plist_files/fcpl_32bebin413 -> 453 bytes
-rw-r--r--test/testfiles/plist_files/fcpl_32lebin413 -> 453 bytes
-rw-r--r--test/testfiles/plist_files/fcpl_64bebin413 -> 453 bytes
-rw-r--r--test/testfiles/plist_files/fcpl_64lebin413 -> 453 bytes
-rw-r--r--test/testfiles/plist_files/lapl_32bebin1565 -> 1625 bytes
-rw-r--r--test/testfiles/plist_files/lapl_32lebin1565 -> 1625 bytes
-rw-r--r--test/testfiles/plist_files/lapl_64bebin1565 -> 1625 bytes
-rw-r--r--test/testfiles/plist_files/lapl_64lebin1565 -> 1625 bytes
-rw-r--r--test/testflushrefresh.sh.in220
-rw-r--r--test/testframe.c10
-rw-r--r--test/testhdf5.c10
-rw-r--r--test/testhdf5.h10
-rw-r--r--test/testlibinfo.sh.in10
-rw-r--r--test/testlinks_env.sh.in10
-rw-r--r--test/testmeta.c10
-rw-r--r--test/testswmr.sh.in562
-rw-r--r--test/testvdsswmr.sh.in226
-rw-r--r--test/tfile.c2203
-rw-r--r--test/tgenprop.c10
-rw-r--r--test/th5o.c10
-rw-r--r--test/th5s.c10
-rw-r--r--test/theap.c10
-rw-r--r--test/tid.c10
-rw-r--r--test/titerate.c10
-rw-r--r--test/tmeta.c10
-rw-r--r--test/tmisc.c26
-rw-r--r--test/trefer.c10
-rw-r--r--test/trefstr.c10
-rw-r--r--test/tselect.c10
-rw-r--r--test/tskiplist.c10
-rw-r--r--test/tsohm.c10
-rw-r--r--test/ttime.c10
-rw-r--r--test/ttsafe.c10
-rw-r--r--test/ttsafe.h10
-rw-r--r--test/ttsafe_acreate.c10
-rw-r--r--test/ttsafe_cancel.c10
-rw-r--r--test/ttsafe_dcreate.c10
-rw-r--r--test/ttsafe_error.c10
-rw-r--r--test/ttst.c10
-rw-r--r--test/tunicode.c10
-rw-r--r--test/tvlstr.c10
-rw-r--r--test/tvltypes.c10
-rw-r--r--test/twriteorder.c461
-rw-r--r--test/unlink.c14
-rw-r--r--test/unregister.c10
-rw-r--r--test/use.h63
-rw-r--r--test/use_append_chunk.c231
-rw-r--r--test/use_append_mchunks.c224
-rw-r--r--test/use_common.c651
-rw-r--r--test/use_disable_mdc_flushes.c547
-rw-r--r--test/vds.c65
-rw-r--r--test/vds_swmr.h163
-rw-r--r--test/vds_swmr_gen.c176
-rw-r--r--test/vds_swmr_reader.c140
-rw-r--r--test/vds_swmr_writer.c165
-rw-r--r--test/vfd.c10
-rw-r--r--testpar/CMakeLists.txt4
-rw-r--r--testpar/CMakeTests.cmake33
-rw-r--r--testpar/COPYING11
-rw-r--r--testpar/Makefile.am12
-rw-r--r--testpar/t_cache.c80
-rw-r--r--testpar/t_cache_image.c4275
-rw-r--r--testpar/t_chunk_alloc.c10
-rw-r--r--testpar/t_coll_chunk.c10
-rw-r--r--testpar/t_dset.c10
-rw-r--r--testpar/t_file.c625
-rw-r--r--testpar/t_file_image.c10
-rw-r--r--testpar/t_filter_read.c10
-rw-r--r--testpar/t_init_term.c10
-rw-r--r--testpar/t_mdset.c16
-rw-r--r--testpar/t_mpi.c10
-rw-r--r--testpar/t_pflush1.c10
-rw-r--r--testpar/t_pflush2.c10
-rw-r--r--testpar/t_ph5basic.c10
-rw-r--r--testpar/t_prestart.c10
-rw-r--r--testpar/t_prop.c10
-rw-r--r--testpar/t_pshutdown.c10
-rw-r--r--testpar/t_shapesame.c10
-rw-r--r--testpar/t_span_tree.c10
-rw-r--r--testpar/testpar.h10
-rw-r--r--testpar/testphdf5.c15
-rw-r--r--testpar/testphdf5.h11
-rw-r--r--tools/CMakeLists.txt6
-rw-r--r--tools/COPYING11
-rw-r--r--tools/Makefile.am10
-rw-r--r--tools/lib/CMakeLists.txt9
-rw-r--r--tools/lib/Makefile.am10
-rw-r--r--tools/lib/h5diff.c178
-rw-r--r--tools/lib/h5diff.h10
-rw-r--r--tools/lib/h5diff_array.c143
-rw-r--r--tools/lib/h5diff_attr.c35
-rw-r--r--tools/lib/h5diff_dset.c441
-rw-r--r--tools/lib/h5diff_util.c148
-rw-r--r--tools/lib/h5tools.c90
-rw-r--r--tools/lib/h5tools.h15
-rw-r--r--tools/lib/h5tools_dump.c152
-rw-r--r--tools/lib/h5tools_dump.h10
-rw-r--r--tools/lib/h5tools_error.h10
-rw-r--r--tools/lib/h5tools_filters.c10
-rw-r--r--tools/lib/h5tools_ref.c10
-rw-r--r--tools/lib/h5tools_ref.h10
-rw-r--r--tools/lib/h5tools_str.c464
-rw-r--r--tools/lib/h5tools_str.h10
-rw-r--r--tools/lib/h5tools_type.c67
-rw-r--r--tools/lib/h5tools_utils.c10
-rw-r--r--tools/lib/h5tools_utils.h10
-rw-r--r--tools/lib/h5trav.c10
-rw-r--r--tools/lib/h5trav.h10
-rw-r--r--tools/lib/io_timer.c10
-rw-r--r--tools/lib/io_timer.h10
-rw-r--r--tools/lib/ph5diff.h10
-rw-r--r--tools/src/CMakeLists.txt2
-rw-r--r--tools/src/Makefile.am13
-rw-r--r--tools/src/h5copy/CMakeLists.txt35
-rw-r--r--tools/src/h5copy/Makefile.am10
-rw-r--r--tools/src/h5copy/h5copy.c10
-rw-r--r--tools/src/h5diff/CMakeLists.txt53
-rw-r--r--tools/src/h5diff/Makefile.am10
-rw-r--r--tools/src/h5diff/h5diff_common.c55
-rw-r--r--tools/src/h5diff/h5diff_common.h10
-rw-r--r--tools/src/h5diff/h5diff_main.c10
-rw-r--r--tools/src/h5diff/ph5diff_main.c10
-rw-r--r--tools/src/h5dump/CMakeLists.txt39
-rw-r--r--tools/src/h5dump/Makefile.am10
-rw-r--r--tools/src/h5dump/h5dump.c10
-rw-r--r--tools/src/h5dump/h5dump.h14
-rw-r--r--tools/src/h5dump/h5dump_ddl.c43
-rw-r--r--tools/src/h5dump/h5dump_ddl.h10
-rw-r--r--tools/src/h5dump/h5dump_defines.h10
-rw-r--r--tools/src/h5dump/h5dump_extern.h14
-rw-r--r--tools/src/h5dump/h5dump_xml.c462
-rw-r--r--tools/src/h5dump/h5dump_xml.h10
-rw-r--r--tools/src/h5format_convert/CMakeLists.txt21
-rw-r--r--tools/src/h5format_convert/Makefile.am13
-rw-r--r--tools/src/h5format_convert/h5format_convert.c10
-rw-r--r--tools/src/h5import/CMakeLists.txt21
-rw-r--r--tools/src/h5import/Makefile.am10
-rw-r--r--tools/src/h5import/h5import.c10
-rw-r--r--tools/src/h5import/h5import.h10
-rw-r--r--tools/src/h5jam/CMakeLists.txt23
-rw-r--r--tools/src/h5jam/Makefile.am10
-rw-r--r--tools/src/h5jam/h5jam.c10
-rw-r--r--tools/src/h5jam/h5unjam.c10
-rw-r--r--tools/src/h5ls/CMakeLists.txt39
-rw-r--r--tools/src/h5ls/Makefile.am10
-rw-r--r--tools/src/h5ls/h5ls.c12
-rw-r--r--tools/src/h5repack/CMakeLists.txt35
-rw-r--r--tools/src/h5repack/Makefile.am33
-rw-r--r--tools/src/h5repack/h5repack.c935
-rw-r--r--tools/src/h5repack/h5repack.h34
-rw-r--r--tools/src/h5repack/h5repack_copy.c282
-rw-r--r--tools/src/h5repack/h5repack_filters.c10
-rw-r--r--tools/src/h5repack/h5repack_main.c567
-rw-r--r--tools/src/h5repack/h5repack_opttable.c10
-rw-r--r--tools/src/h5repack/h5repack_parse.c17
-rw-r--r--tools/src/h5repack/h5repack_refs.c56
-rw-r--r--tools/src/h5repack/h5repack_verify.c140
-rw-r--r--tools/src/h5stat/CMakeLists.txt35
-rw-r--r--tools/src/h5stat/Makefile.am10
-rw-r--r--tools/src/h5stat/h5stat.c35
-rw-r--r--tools/src/misc/CMakeLists.txt27
-rw-r--r--tools/src/misc/Makefile.am10
-rw-r--r--tools/src/misc/h5cc.in10
-rw-r--r--tools/src/misc/h5clear.c259
-rw-r--r--tools/src/misc/h5debug.c10
-rw-r--r--tools/src/misc/h5mkgrp.c13
-rw-r--r--tools/src/misc/h5redeploy.in10
-rw-r--r--tools/src/misc/h5repart.c10
-rw-r--r--tools/test/CMakeLists.txt2
-rw-r--r--tools/test/Makefile.am14
-rw-r--r--tools/test/h5copy/CMakeLists.txt4
-rw-r--r--tools/test/h5copy/CMakeTests.cmake163
-rw-r--r--tools/test/h5copy/Makefile.am10
-rw-r--r--tools/test/h5copy/h5copygentest.c22
-rw-r--r--tools/test/h5copy/testfiles/h5copy_extlinks_src.out.ls14
-rw-r--r--tools/test/h5copy/testfiles/h5copy_ref.out.ls10
-rw-r--r--tools/test/h5copy/testfiles/h5copytst.h5bin15900 -> 15944 bytes
-rw-r--r--tools/test/h5copy/testfiles/h5copytst.out.ls182
-rw-r--r--tools/test/h5copy/testh5copy.sh.in107
-rw-r--r--tools/test/h5diff/CMakeLists.txt32
-rw-r--r--tools/test/h5diff/CMakeTests.cmake170
-rw-r--r--tools/test/h5diff/Makefile.am30
-rw-r--r--tools/test/h5diff/dynlib_diff.c89
-rw-r--r--tools/test/h5diff/h5diff_plugin.sh.in315
-rw-r--r--tools/test/h5diff/h5diffgentest.c318
-rw-r--r--tools/test/h5diff/testfiles/diff_strings1.h5bin0 -> 4640 bytes
-rw-r--r--tools/test/h5diff/testfiles/diff_strings2.h5bin0 -> 4640 bytes
-rw-r--r--tools/test/h5diff/testfiles/h5diff_10.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_58.txt2
-rw-r--r--tools/test/h5diff/testfiles/h5diff_60.txt10
-rw-r--r--tools/test/h5diff/testfiles/h5diff_600.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_603.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_606.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_61.txt30
-rw-r--r--tools/test/h5diff/testfiles/h5diff_612.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_615.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_62.txt37
-rw-r--r--tools/test/h5diff/testfiles/h5diff_621.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_622.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_623.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_624.txt21
-rw-r--r--tools/test/h5diff/testfiles/h5diff_63.txt10
-rw-r--r--tools/test/h5diff/testfiles/h5diff_80.txt2
-rw-r--r--tools/test/h5diff/testfiles/h5diff_800.txt12
-rw-r--r--tools/test/h5diff/testfiles/h5diff_801.txt13
-rw-r--r--tools/test/h5diff/testfiles/h5diff_dset1.h5bin23624 -> 23416 bytes
-rw-r--r--tools/test/h5diff/testfiles/h5diff_dset2.h5bin23624 -> 23416 bytes
-rw-r--r--tools/test/h5diff/testfiles/h5diff_dset3.h5bin0 -> 23416 bytes
-rw-r--r--tools/test/h5diff/testfiles/h5diff_ud.txt (renamed from tools/test/h5diff/testfiles/h5diff_idx.txt)7
-rw-r--r--tools/test/h5diff/testfiles/h5diff_udfail.txt12
-rw-r--r--tools/test/h5diff/testfiles/tudfilter.h5bin0 -> 4816 bytes
-rw-r--r--tools/test/h5diff/testfiles/tudfilter2.h5bin0 -> 4816 bytes
-rw-r--r--tools/test/h5diff/testh5diff.sh.in125
-rw-r--r--tools/test/h5diff/testph5diff.sh.in10
-rw-r--r--tools/test/h5dump/CMakeLists.txt32
-rw-r--r--tools/test/h5dump/CMakeTests.cmake176
-rw-r--r--tools/test/h5dump/CMakeTestsPBITS.cmake29
-rw-r--r--tools/test/h5dump/CMakeTestsVDS.cmake45
-rw-r--r--tools/test/h5dump/CMakeTestsXML.cmake52
-rw-r--r--tools/test/h5dump/Makefile.am30
-rw-r--r--tools/test/h5dump/binread.c10
-rw-r--r--tools/test/h5dump/dynlib_dump.c89
-rw-r--r--tools/test/h5dump/h5dump_plugin.sh.in241
-rw-r--r--tools/test/h5dump/h5dumpgentest.c212
-rw-r--r--tools/test/h5dump/testh5dump.sh.in69
-rw-r--r--tools/test/h5dump/testh5dumppbits.sh.in32
-rw-r--r--tools/test/h5dump/testh5dumpvds.sh.in18
-rw-r--r--tools/test/h5dump/testh5dumpxml.sh.in35
-rw-r--r--tools/test/h5format_convert/CMakeLists.txt4
-rw-r--r--tools/test/h5format_convert/CMakeTests.cmake61
-rw-r--r--tools/test/h5format_convert/Makefile.am12
-rw-r--r--tools/test/h5format_convert/h5fc_chk_idx.c10
-rw-r--r--tools/test/h5format_convert/h5fc_gentest.c22
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_edge_v3.h5bin2526 -> 2478 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_err_level.h5bin7294 -> 6236774 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext1_f.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext1_f.h5bin8296 -> 12288 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext1_i.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext1_i.h5bin8062 -> 7990 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext1_s.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext1_s.h5bin8128 -> 8167 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext2_if.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext2_if.h5bin8062 -> 8373 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext2_is.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext2_is.h5bin8178 -> 8186 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext2_sf.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext2_sf.h5bin6612 -> 8246 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext3_isf.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext3_isf.h5bin8215 -> 6642 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_ext_none.h5bin8010 -> 8014 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/h5fc_non_v3.h5bin6818 -> 7010 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.h5bin19987 -> 28672 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.h5bin32716 -> 32716 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.h5bin20032 -> 20032 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.h5bin32720 -> 32984 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.h5bin32872 -> 32872 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.h5bin18512 -> 20096 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.ddl6
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.h5bin32896 -> 31344 bytes
-rw-r--r--tools/test/h5format_convert/testfiles/old_h5fc_ext_none.h5bin19912 -> 19912 bytes
-rw-r--r--tools/test/h5format_convert/testh5fc.sh.in13
-rw-r--r--tools/test/h5import/CMakeLists.txt2
-rw-r--r--tools/test/h5import/CMakeTests.cmake55
-rw-r--r--tools/test/h5import/Makefile.am10
-rw-r--r--tools/test/h5import/h5importtest.c10
-rw-r--r--tools/test/h5import/h5importtestutil.sh.in10
-rw-r--r--tools/test/h5jam/CMakeLists.txt4
-rw-r--r--tools/test/h5jam/CMakeTests.cmake79
-rw-r--r--tools/test/h5jam/Makefile.am10
-rw-r--r--tools/test/h5jam/getub.c10
-rw-r--r--tools/test/h5jam/h5jamgentest.c10
-rw-r--r--tools/test/h5jam/tellub.c10
-rw-r--r--tools/test/h5jam/testh5jam.sh.in448
-rw-r--r--tools/test/h5ls/CMakeLists.txt30
-rw-r--r--tools/test/h5ls/CMakeTests.cmake92
-rw-r--r--tools/test/h5ls/CMakeTestsVDS.cmake25
-rw-r--r--tools/test/h5ls/Makefile.am31
-rw-r--r--tools/test/h5ls/dynlib_ls.c89
-rw-r--r--tools/test/h5ls/h5ls_plugin.sh.in253
-rw-r--r--tools/test/h5ls/testh5ls.sh.in75
-rw-r--r--tools/test/h5ls/testh5lsvds.sh.in44
-rw-r--r--tools/test/h5repack/CMakeLists.txt2
-rw-r--r--tools/test/h5repack/CMakeTests.cmake221
-rw-r--r--tools/test/h5repack/Makefile.am34
-rw-r--r--tools/test/h5repack/dynlib_rpk.c10
-rw-r--r--tools/test/h5repack/dynlib_vrpk.c10
-rw-r--r--tools/test/h5repack/h5repack.sh.in84
-rw-r--r--tools/test/h5repack/h5repack_plugin.sh.in13
-rw-r--r--tools/test/h5repack/h5repacktst.c479
-rw-r--r--tools/test/h5repack/testfiles/4_vds.h5-vds_conti-v.ddl2
-rw-r--r--tools/test/h5repack/testfiles/crtorder.tordergr.h5.ddl36
-rw-r--r--tools/test/h5repack/testfiles/h5repack-help.txt43
-rw-r--r--tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl14
-rw-r--r--tools/test/h5repack/testh5repack_detect_szip.c10
-rw-r--r--tools/test/h5stat/CMakeLists.txt4
-rw-r--r--tools/test/h5stat/CMakeTests.cmake33
-rw-r--r--tools/test/h5stat/Makefile.am10
-rw-r--r--tools/test/h5stat/h5stat_gentest.c12
-rw-r--r--tools/test/h5stat/testfiles/h5stat_filters.ddl4
-rw-r--r--tools/test/h5stat/testfiles/h5stat_idx.ddl8
-rw-r--r--tools/test/h5stat/testfiles/h5stat_idx.h5bin2206 -> 2158 bytes
-rw-r--r--tools/test/h5stat/testfiles/h5stat_links2.ddl4
-rw-r--r--tools/test/h5stat/testfiles/h5stat_newgrat.ddl10
-rw-r--r--tools/test/h5stat/testfiles/h5stat_newgrat.h5bin6362168 -> 6362235 bytes
-rw-r--r--tools/test/h5stat/testfiles/h5stat_numattrs1.ddl3
-rw-r--r--tools/test/h5stat/testfiles/h5stat_numattrs2.ddl4
-rw-r--r--tools/test/h5stat/testfiles/h5stat_tsohm.ddl4
-rw-r--r--tools/test/h5stat/testh5stat.sh.in70
-rw-r--r--tools/test/misc/CMakeLists.txt21
-rw-r--r--tools/test/misc/CMakeTests.cmake340
-rw-r--r--tools/test/misc/CMakeTestsClear.cmake264
-rw-r--r--tools/test/misc/CMakeTestsMkgrp.cmake199
-rw-r--r--tools/test/misc/CMakeTestsRepart.cmake96
-rw-r--r--tools/test/misc/Makefile.am12
-rw-r--r--tools/test/misc/clear_open_chk.c43
-rw-r--r--tools/test/misc/h5clear_gentest.c233
-rw-r--r--tools/test/misc/h5perf_gentest.c11
-rw-r--r--tools/test/misc/h5repart_gentest.c10
-rw-r--r--tools/test/misc/repart_test.c10
-rw-r--r--tools/test/misc/talign.c49
-rw-r--r--tools/test/misc/testfiles/h5clear_log_v3.h5 (renamed from tools/test/h5diff/testfiles/h5diff_dset_idx2.h5)bin2206 -> 2048 bytes
-rw-r--r--tools/test/misc/testfiles/h5clear_mdc_image.h5bin0 -> 23467 bytes
-rw-r--r--tools/test/misc/testfiles/h5clear_missing_file.ddl15
-rw-r--r--tools/test/misc/testfiles/h5clear_no_mdc_image.ddl1
-rw-r--r--tools/test/misc/testfiles/h5clear_open_fail.ddl1
-rw-r--r--tools/test/misc/testfiles/h5clear_sec2_v0.h5bin0 -> 800 bytes
-rw-r--r--tools/test/misc/testfiles/h5clear_sec2_v2.h5bin0 -> 830 bytes
-rw-r--r--tools/test/misc/testfiles/h5clear_sec2_v3.h5bin0 -> 195 bytes
-rw-r--r--tools/test/misc/testfiles/h5clear_usage.ddl14
-rw-r--r--tools/test/misc/testfiles/latest_h5clear_log_v3.h5bin0 -> 2048 bytes
-rw-r--r--tools/test/misc/testfiles/latest_h5clear_sec2_v3.h5bin0 -> 195 bytes
-rw-r--r--tools/test/misc/testfiles/mod_h5clear_mdc_image.h5bin0 -> 22048 bytes
-rw-r--r--tools/test/misc/testh5clear.sh.in310
-rw-r--r--tools/test/misc/testh5mkgrp.sh.in42
-rw-r--r--tools/test/misc/testh5repart.sh.in12
-rw-r--r--tools/test/misc/vds/CMakeLists.txt6
-rw-r--r--tools/test/misc/vds/Makefile.am10
-rw-r--r--tools/test/misc/vds/UC_1.h10
-rw-r--r--tools/test/misc/vds/UC_1_one_dim_gen.c10
-rw-r--r--tools/test/misc/vds/UC_2.h10
-rw-r--r--tools/test/misc/vds/UC_2_two_dims_gen.c10
-rw-r--r--tools/test/misc/vds/UC_3.h10
-rw-r--r--tools/test/misc/vds/UC_3_gaps_gen.c10
-rw-r--r--tools/test/misc/vds/UC_4.h10
-rw-r--r--tools/test/misc/vds/UC_4_printf_gen.c10
-rw-r--r--tools/test/misc/vds/UC_5.h10
-rw-r--r--tools/test/misc/vds/UC_5_stride_gen.c10
-rw-r--r--tools/test/misc/vds/UC_common.h10
-rw-r--r--tools/test/perform/CMakeLists.txt8
-rw-r--r--tools/test/perform/CMakeTests.cmake122
-rw-r--r--tools/test/perform/COPYING11
-rw-r--r--tools/test/perform/Makefile.am10
-rw-r--r--tools/test/perform/build_h5perf_alone.sh10
-rw-r--r--tools/test/perform/build_h5perf_serial_alone.sh10
-rw-r--r--tools/test/perform/chunk.c10
-rw-r--r--tools/test/perform/gen_report.pl10
-rw-r--r--tools/test/perform/iopipe.c10
-rw-r--r--tools/test/perform/overhead.c10
-rw-r--r--tools/test/perform/perf.c10
-rw-r--r--tools/test/perform/perf_meta.c10
-rw-r--r--tools/test/perform/pio_engine.c10
-rw-r--r--tools/test/perform/pio_perf.c10
-rw-r--r--tools/test/perform/pio_perf.h10
-rw-r--r--tools/test/perform/pio_standalone.c10
-rw-r--r--tools/test/perform/pio_standalone.h19
-rw-r--r--tools/test/perform/sio_engine.c21
-rw-r--r--tools/test/perform/sio_perf.c40
-rw-r--r--tools/test/perform/sio_perf.h12
-rw-r--r--tools/test/perform/sio_standalone.c10
-rw-r--r--tools/test/perform/sio_standalone.h19
-rw-r--r--tools/test/perform/zip_perf.c10
-rw-r--r--tools/testfiles/file_space.ddl6
-rw-r--r--tools/testfiles/file_space.h5bin792 -> 808 bytes
-rw-r--r--tools/testfiles/tbitfields_be.h5.xml103
-rw-r--r--tools/testfiles/tbitfields_le.h5.xml (renamed from tools/testfiles/tbitfields.h5.xml)0
-rw-r--r--tools/testfiles/tbitnopaque.h5bin8240 -> 8240 bytes
-rw-r--r--tools/testfiles/tbitnopaque_be.ddl293
-rw-r--r--tools/testfiles/tbitnopaque_le.ddl (renamed from tools/testfiles/tbitnopaque.ddl)32
-rw-r--r--tools/testfiles/tboot1.ddl6
-rw-r--r--tools/testfiles/tboot2.ddl6
-rw-r--r--tools/testfiles/tboot2A.ddl6
-rw-r--r--tools/testfiles/tboot2B.ddl6
-rw-r--r--tools/testfiles/tdset_idx.ddl61
-rw-r--r--tools/testfiles/tudfilter.ddl30
-rw-r--r--tools/testfiles/tudfilter.h5bin0 -> 4816 bytes
-rw-r--r--tools/testfiles/tudfilter.ls16
1797 files changed, 116856 insertions, 44817 deletions
diff --git a/.autom4te.cfg b/.autom4te.cfg
index 7b4dd6b..e7c2ec1 100644
--- a/.autom4te.cfg
+++ b/.autom4te.cfg
@@ -4,12 +4,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
## ------------------ ##
## User Preferences. ##
diff --git a/.gitattributes b/.gitattributes
index d18f9b9..3c7dae1 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -726,7 +726,8 @@ tools/testfiles/tattrreg.h5 -text
tools/testfiles/tbigdims.h5 -text
tools/testfiles/tbinary.h5 -text
tools/testfiles/tbitfields.h5 -text
-tools/testfiles/tbitnopaque.ddl -text
+tools/testfiles/tbitnopaque_be.ddl -text
+tools/testfiles/tbitnopaque_le.ddl -text
tools/testfiles/tbitnopaque.h5 -text svneol=unset#application/x-hdf
tools/testfiles/tchar.h5 -text
tools/testfiles/tcmpdattrintsize.h5 -text
diff --git a/.gitignore b/.gitignore
index 7424325..3caf16a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -40,3 +40,4 @@ src/H5config.h.in
src/H5overflow.h
src/H5version.h
+/.classpath
diff --git a/.h5chkright.ini b/.h5chkright.ini
index 10daeaf..1010dce 100644
--- a/.h5chkright.ini
+++ b/.h5chkright.ini
@@ -4,12 +4,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# Initialization files for the Copyright Checker, chkcopyright.
# Each line is a keyword for action and the rest are values.
diff --git a/CMakeFilters.cmake b/CMakeFilters.cmake
index b7aa77d..9b66d95 100644
--- a/CMakeFilters.cmake
+++ b/CMakeFilters.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
include (ExternalProject)
#option (HDF5_ALLOW_EXTERNAL_SUPPORT "Allow External Library Building (NO GIT SVN TGZ)" "NO")
@@ -17,14 +28,14 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT
elseif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
if (NOT TGZPATH)
set (TGZPATH ${HDF5_SOURCE_DIR})
- endif (NOT TGZPATH)
+ endif ()
set (ZLIB_URL ${TGZPATH}/${ZLIB_TGZ_NAME})
set (SZIP_URL ${TGZPATH}/${SZIP_TGZ_NAME})
- else (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT")
+ else ()
set (ZLIB_USE_EXTERNAL 0)
set (SZIP_USE_EXTERNAL 0)
- endif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT")
-endif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option for ZLib support
@@ -39,9 +50,9 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT)
if (ZLIB_FOUND)
set (LINK_LIBS ${LINK_LIBS} ${ZLIB_LIBRARIES})
set (LINK_SHARED_LIBS ${LINK_SHARED_LIBS} ${ZLIB_LIBRARIES})
- endif (ZLIB_FOUND)
- endif (NOT ZLIB_FOUND)
- endif (NOT ZLIB_USE_EXTERNAL)
+ endif ()
+ endif ()
+ endif ()
if (ZLIB_FOUND)
set (H5_HAVE_FILTER_DEFLATE 1)
set (H5_HAVE_ZLIB_H 1)
@@ -49,33 +60,33 @@ if (HDF5_ENABLE_Z_LIB_SUPPORT)
set (H5_ZLIB_HEADER "zlib.h")
set (ZLIB_INCLUDE_DIR_GEN ${ZLIB_INCLUDE_DIR})
set (ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIRS} ${ZLIB_INCLUDE_DIR})
- else (ZLIB_FOUND)
+ else ()
if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
EXTERNAL_ZLIB_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT})
set (H5_HAVE_FILTER_DEFLATE 1)
set (H5_HAVE_ZLIB_H 1)
set (H5_HAVE_LIBZ 1)
message (STATUS "Filter ZLIB is built")
- else (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
+ else ()
message (FATAL_ERROR " ZLib is Required for ZLib support in HDF5")
- endif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
- endif (ZLIB_FOUND)
- else (NOT H5_ZLIB_HEADER)
+ endif ()
+ endif ()
+ else ()
# This project is being called from within another and ZLib is already configured
set (H5_HAVE_FILTER_DEFLATE 1)
set (H5_HAVE_ZLIB_H 1)
set (H5_HAVE_LIBZ 1)
- endif (NOT H5_ZLIB_HEADER)
+ endif ()
if (H5_HAVE_FILTER_DEFLATE)
set (EXTERNAL_FILTERS "${EXTERNAL_FILTERS} DEFLATE")
- endif (H5_HAVE_FILTER_DEFLATE)
+ endif ()
if (BUILD_SHARED_LIBS)
set (LINK_SHARED_LIBS ${LINK_SHARED_LIBS} ${ZLIB_SHARED_LIBRARY})
- endif (BUILD_SHARED_LIBS)
+ endif ()
set (LINK_LIBS ${LINK_LIBS} ${ZLIB_STATIC_LIBRARY})
INCLUDE_DIRECTORIES (${ZLIB_INCLUDE_DIRS})
message (STATUS "Filter ZLIB is ON")
-endif (HDF5_ENABLE_Z_LIB_SUPPORT)
+endif ()
#-----------------------------------------------------------------------------
# Option for SzLib support
@@ -90,37 +101,37 @@ if (HDF5_ENABLE_SZIP_SUPPORT)
if (SZIP_FOUND)
set (LINK_LIBS ${LINK_LIBS} ${SZIP_LIBRARIES})
set (LINK_SHARED_LIBS ${LINK_SHARED_LIBS} ${SZIP_LIBRARIES})
- endif (SZIP_FOUND)
- endif (NOT SZIP_FOUND)
- endif (NOT SZIP_USE_EXTERNAL)
+ endif ()
+ endif ()
+ endif ()
if (SZIP_FOUND)
set (H5_HAVE_FILTER_SZIP 1)
set (H5_HAVE_SZLIB_H 1)
set (H5_HAVE_LIBSZ 1)
set (SZIP_INCLUDE_DIR_GEN ${SZIP_INCLUDE_DIR})
set (SZIP_INCLUDE_DIRS ${SZIP_INCLUDE_DIRS} ${SZIP_INCLUDE_DIR})
- else (SZIP_FOUND)
+ else ()
if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
EXTERNAL_SZIP_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT} ${HDF5_ENABLE_SZIP_ENCODING})
set (H5_HAVE_FILTER_SZIP 1)
set (H5_HAVE_SZLIB_H 1)
set (H5_HAVE_LIBSZ 1)
message (STATUS "Filter SZIP is built")
- else (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
+ else ()
message (FATAL_ERROR "SZIP is Required for SZIP support in HDF5")
- endif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
- endif (SZIP_FOUND)
+ endif ()
+ endif ()
if (BUILD_SHARED_LIBS)
set (LINK_SHARED_LIBS ${LINK_SHARED_LIBS} ${SZIP_SHARED_LIBRARY})
- endif (BUILD_SHARED_LIBS)
+ endif ()
set (LINK_LIBS ${LINK_LIBS} ${SZIP_STATIC_LIBRARY})
INCLUDE_DIRECTORIES (${SZIP_INCLUDE_DIRS})
message (STATUS "Filter SZIP is ON")
if (H5_HAVE_FILTER_SZIP)
set (EXTERNAL_FILTERS "${EXTERNAL_FILTERS} DECODE")
- endif (H5_HAVE_FILTER_SZIP)
+ endif ()
if (HDF5_ENABLE_SZIP_ENCODING)
set (H5_HAVE_SZIP_ENCODER 1)
set (EXTERNAL_FILTERS "${EXTERNAL_FILTERS} ENCODE")
- endif (HDF5_ENABLE_SZIP_ENCODING)
-endif (HDF5_ENABLE_SZIP_SUPPORT)
+ endif ()
+endif ()
diff --git a/CMakeInstallation.cmake b/CMakeInstallation.cmake
index 65d2222..70638d6 100644
--- a/CMakeInstallation.cmake
+++ b/CMakeInstallation.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
include (CMakePackageConfigHelpers)
#-----------------------------------------------------------------------------
@@ -8,9 +19,9 @@ if (WIN32)
find_program (NSIS_EXECUTABLE NSIS.exe PATHS "$ENV{ProgramFiles}\\NSIS" "$ENV{ProgramFiles${PF_ENV_EXT}}\\NSIS")
if(NOT CPACK_WIX_ROOT)
file(TO_CMAKE_PATH "$ENV{WIX}" CPACK_WIX_ROOT)
- endif()
+ endif ()
find_program (WIX_EXECUTABLE candle PATHS "${CPACK_WIX_ROOT}/bin")
-endif (WIN32)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
@@ -21,31 +32,33 @@ if (NOT HDF5_INSTALL_NO_DEVELOPMENT)
DESTINATION ${HDF5_INSTALL_INCLUDE_DIR}
COMPONENT headers
)
-endif (NOT HDF5_INSTALL_NO_DEVELOPMENT)
+endif ()
#-----------------------------------------------------------------------------
# Add Target(s) to CMake Install for import into other projects
#-----------------------------------------------------------------------------
if (NOT HDF5_EXTERNALLY_CONFIGURED)
- install (
- EXPORT ${HDF5_EXPORTED_TARGETS}
- DESTINATION ${HDF5_INSTALL_CMAKE_DIR}
- FILE ${HDF5_PACKAGE}${HDF_PACKAGE_EXT}-targets.cmake
- NAMESPACE ${HDF5_PACKAGE}::
- COMPONENT configinstall
- )
-endif (NOT HDF5_EXTERNALLY_CONFIGURED)
-
-#-----------------------------------------------------------------------------
-# Export all exported targets to the build tree for use by parent project
-#-----------------------------------------------------------------------------
-if (NOT HDF5_EXTERNALLY_CONFIGURED)
- export (
- TARGETS ${HDF5_LIBRARIES_TO_EXPORT} ${HDF5_LIB_DEPENDENCIES} ${HDF5_UTILS_TO_EXPORT}
- FILE ${HDF5_PACKAGE}${HDF_PACKAGE_EXT}-targets.cmake
- NAMESPACE ${HDF5_PACKAGE}::
- )
-endif (NOT HDF5_EXTERNALLY_CONFIGURED)
+ if (HDF5_EXPORTED_TARGETS)
+ install (
+ EXPORT ${HDF5_EXPORTED_TARGETS}
+ DESTINATION ${HDF5_INSTALL_CMAKE_DIR}
+ FILE ${HDF5_PACKAGE}${HDF_PACKAGE_EXT}-targets.cmake
+ NAMESPACE ${HDF5_PACKAGE}::
+ COMPONENT configinstall
+ )
+ endif ()
+
+ #-----------------------------------------------------------------------------
+ # Export all exported targets to the build tree for use by parent project
+ #-----------------------------------------------------------------------------
+ if (NOT HDF5_EXTERNALLY_CONFIGURED)
+ export (
+ TARGETS ${HDF5_LIBRARIES_TO_EXPORT} ${HDF5_LIB_DEPENDENCIES} ${HDF5_UTILS_TO_EXPORT}
+ FILE ${HDF5_PACKAGE}${HDF_PACKAGE_EXT}-targets.cmake
+ NAMESPACE ${HDF5_PACKAGE}::
+ )
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Set includes needed for build
@@ -95,7 +108,7 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED)
DESTINATION ${HDF5_INSTALL_CMAKE_DIR}
COMPONENT configinstall
)
-endif (NOT HDF5_EXTERNALLY_CONFIGURED)
+endif ()
#-----------------------------------------------------------------------------
# Configure the hdf5-config-version .cmake file for the install directory
@@ -110,16 +123,16 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED)
DESTINATION ${HDF5_INSTALL_CMAKE_DIR}
COMPONENT configinstall
)
-endif (NOT HDF5_EXTERNALLY_CONFIGURED)
+endif ()
#-----------------------------------------------------------------------------
# Configure the libhdf5.settings file for the lib info
#-----------------------------------------------------------------------------
if (H5_WORDS_BIGENDIAN)
set (BYTESEX big-endian)
-else (H5_WORDS_BIGENDIAN)
+else ()
set (BYTESEX little-endian)
-endif (H5_WORDS_BIGENDIAN)
+endif ()
configure_file (
${HDF_RESOURCES_DIR}/libhdf5.settings.cmake.in
${HDF5_BINARY_DIR}/libhdf5.settings @ONLY
@@ -135,10 +148,10 @@ install (
#-----------------------------------------------------------------------------
#foreach (libs ${LINK_LIBS})
# set (LIBS "${LIBS} -l${libs}")
-#endforeach (libs ${LINK_LIBS})
+#endforeach ()
#foreach (libs ${HDF5_LIBRARIES_TO_EXPORT})
# set (HDF5LIBS "${HDF5LIBS} -l${libs}")
-#endforeach (libs ${HDF5_LIBRARIES_TO_EXPORT})
+#endforeach ()
#configure_file (
# ${HDF_RESOURCES_DIR}/libhdf5.pc.in
# ${HDF5_BINARY_DIR}/CMakeFiles/libhdf5.pc @ONLY
@@ -172,14 +185,26 @@ if (HDF5_PACK_EXAMPLES)
USE_SOURCE_PERMISSIONS
COMPONENT hdfdocuments
)
- install (
- FILES
- ${HDF5_SOURCE_DIR}/release_docs/USING_CMake_Examples.txt
- DESTINATION ${HDF5_INSTALL_DATA_DIR}
- COMPONENT hdfdocuments
- )
- endif (EXISTS "${HDF5_EXAMPLES_COMPRESSED_DIR}/${HDF5_EXAMPLES_COMPRESSED}")
-endif (HDF5_PACK_EXAMPLES)
+ endif ()
+ install (
+ FILES
+ ${HDF5_SOURCE_DIR}/release_docs/USING_CMake_Examples.txt
+ DESTINATION ${HDF5_INSTALL_DATA_DIR}
+ COMPONENT hdfdocuments
+ )
+ install (
+ FILES
+ ${HDF_RESOURCES_DIR}/CTestScript.cmake
+ DESTINATION ${HDF5_INSTALL_DATA_DIR}
+ COMPONENT hdfdocuments
+ )
+ install (
+ FILES
+ ${HDF_RESOURCES_DIR}/HDF5_Examples_options.cmake
+ DESTINATION ${HDF5_INSTALL_DATA_DIR}
+ COMPONENT hdfdocuments
+ )
+endif ()
#-----------------------------------------------------------------------------
# Configure the README.txt file for the binary package
@@ -191,7 +216,7 @@ HDF_README_PROPERTIES(HDF5_BUILD_FORTRAN)
#-----------------------------------------------------------------------------
if (WIN32)
configure_file (${HDF5_SOURCE_DIR}/COPYING ${HDF5_BINARY_DIR}/COPYING.txt @ONLY)
-endif (WIN32)
+endif ()
#-----------------------------------------------------------------------------
# Add Document File(s) to CMake Install
@@ -214,7 +239,7 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED)
${release_files}
${HDF5_SOURCE_DIR}/release_docs/USING_HDF5_VS.txt
)
- endif (WIN32)
+ endif ()
if (HDF5_PACK_INSTALL_DOCS)
set (release_files
${release_files}
@@ -227,40 +252,40 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED)
${release_files}
${HDF5_SOURCE_DIR}/release_docs/INSTALL_Windows.txt
)
- endif (WIN32)
+ endif ()
if (CYGWIN)
set (release_files
${release_files}
${HDF5_SOURCE_DIR}/release_docs/INSTALL_Cygwin.txt
)
- endif (CYGWIN)
+ endif ()
if (HDF5_ENABLE_PARALLEL)
set (release_files
${release_files}
${HDF5_SOURCE_DIR}/release_docs/INSTALL_parallel
)
- endif (HDF5_ENABLE_PARALLEL)
- endif (HDF5_PACK_INSTALL_DOCS)
+ endif ()
+ endif ()
install (
FILES ${release_files}
DESTINATION ${HDF5_INSTALL_DATA_DIR}
COMPONENT hdfdocuments
)
- endif (EXISTS "${HDF5_SOURCE_DIR}/release_docs" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/release_docs")
-endif (NOT HDF5_EXTERNALLY_CONFIGURED)
+ endif ()
+endif ()
if (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
if (CMAKE_HOST_UNIX)
set (CMAKE_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}/HDF_Group/${HDF5_PACKAGE_NAME}/${HDF5_PACKAGE_VERSION}"
CACHE PATH "Install path prefix, prepended onto install directories." FORCE)
- else (CMAKE_HOST_UNIX)
+ else ()
GetDefaultWindowsPrefixBase(CMAKE_GENERIC_PROGRAM_FILES)
set (CMAKE_INSTALL_PREFIX
"${CMAKE_GENERIC_PROGRAM_FILES}/HDF_Group/${HDF5_PACKAGE_NAME}/${HDF5_PACKAGE_VERSION}"
CACHE PATH "Install path prefix, prepended onto install directories." FORCE)
set (CMAKE_GENERIC_PROGRAM_FILES)
- endif (CMAKE_HOST_UNIX)
-endif (CMAKE_INSTALL_PREFIX_INITIALIZED_TO_DEFAULT)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Set the cpack variables
@@ -270,9 +295,9 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
set (CPACK_PACKAGE_NAME "${HDF5_PACKAGE_NAME}")
if (CDASH_LOCAL)
set (CPACK_PACKAGE_VERSION "${HDF5_PACKAGE_VERSION}")
- else (CDASH_LOCAL)
+ else ()
set (CPACK_PACKAGE_VERSION "${HDF5_PACKAGE_VERSION_STRING}")
- endif (CDASH_LOCAL)
+ endif ()
set (CPACK_PACKAGE_VERSION_MAJOR "${HDF5_PACKAGE_VERSION_MAJOR}")
set (CPACK_PACKAGE_VERSION_MINOR "${HDF5_PACKAGE_VERSION_MINOR}")
set (CPACK_PACKAGE_VERSION_PATCH "")
@@ -280,13 +305,13 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
set (CPACK_PACKAGE_DESCRIPTION_FILE "${CMAKE_CURRENT_SOURCE_DIR}/release_docs/RELEASE.txt")
set (CPACK_RESOURCE_FILE_LICENSE "${CMAKE_CURRENT_SOURCE_DIR}/release_docs/COPYING")
set (CPACK_RESOURCE_FILE_README "${CMAKE_CURRENT_SOURCE_DIR}/release_docs/RELEASE.txt")
- endif (EXISTS "${HDF5_SOURCE_DIR}/release_docs")
+ endif ()
set (CPACK_PACKAGE_RELOCATABLE TRUE)
if (OVERRIDE_INSTALL_VERSION)
set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}/${CPACK_PACKAGE_NAME}/${OVERRIDE_INSTALL_VERSION}")
- else (OVERRIDE_INSTALL_VERSION)
+ else ()
set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}/${CPACK_PACKAGE_NAME}/${CPACK_PACKAGE_VERSION}")
- endif (OVERRIDE_INSTALL_VERSION)
+ endif ()
set (CPACK_PACKAGE_ICON "${HDF_RESOURCES_EXT_DIR}/hdf.bmp")
set (CPACK_GENERATOR "TGZ")
@@ -295,7 +320,7 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
if (NSIS_EXECUTABLE)
list (APPEND CPACK_GENERATOR "NSIS")
- endif (NSIS_EXECUTABLE)
+ endif ()
# Installers for 32- vs. 64-bit CMake:
# - Root install directory (displayed to end user at installer-run time)
# - "NSIS package/display name" (text used in the installer GUI)
@@ -304,10 +329,10 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
if (CMAKE_CL_64)
set (CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES64")
set (CPACK_PACKAGE_INSTALL_REGISTRY_KEY "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION} (Win64)")
- else (CMAKE_CL_64)
+ else ()
set (CPACK_NSIS_INSTALL_ROOT "$PROGRAMFILES")
set (CPACK_PACKAGE_INSTALL_REGISTRY_KEY "${CPACK_PACKAGE_NAME}-${CPACK_PACKAGE_VERSION}")
- endif (CMAKE_CL_64)
+ endif ()
# set the install/unistall icon used for the installer itself
# There is a bug in NSI that does not handle full unix paths properly.
set (CPACK_NSIS_MUI_ICON "${HDF_RESOURCES_EXT_DIR}\\\\hdf.ico")
@@ -317,15 +342,15 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
set (CPACK_NSIS_DISPLAY_NAME "${CPACK_NSIS_PACKAGE_NAME}")
if (OVERRIDE_INSTALL_VERSION)
set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}\\\\${CPACK_PACKAGE_NAME}\\\\${OVERRIDE_INSTALL_VERSION}")
- else (OVERRIDE_INSTALL_VERSION)
+ else ()
set (CPACK_PACKAGE_INSTALL_DIRECTORY "${CPACK_PACKAGE_VENDOR}\\\\${CPACK_PACKAGE_NAME}\\\\${CPACK_PACKAGE_VERSION}")
- endif (OVERRIDE_INSTALL_VERSION)
+ endif ()
set (CPACK_NSIS_CONTACT "${HDF5_PACKAGE_BUGREPORT}")
set (CPACK_NSIS_MODIFY_PATH ON)
if (WIX_EXECUTABLE)
list (APPEND CPACK_GENERATOR "WIX")
- endif (WIX_EXECUTABLE)
+ endif ()
#WiX variables
set (CPACK_WIX_UNINSTALL "1")
# .. variable:: CPACK_WIX_LICENSE_RTF
@@ -368,7 +393,7 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
set(CPACK_WIX_PROPERTY_ARPHELPLINK "${HDF5_PACKAGE_BUGREPORT}")
if (BUILD_SHARED_LIBS)
set(CPACK_WIX_PATCH_FILE "${HDF_RESOURCES_DIR}/patch.xml")
- endif (BUILD_SHARED_LIBS)
+ endif ()
elseif (APPLE)
list (APPEND CPACK_GENERATOR "STGZ")
list (APPEND CPACK_GENERATOR "DragNDrop")
@@ -403,8 +428,8 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
FILES ${HDF5_BINARY_DIR}/CMakeFiles/PkgInfo
DESTINATION ..
)
- endif (HDF5_PACK_MACOSX_FRAMEWORK AND HDF5_BUILD_FRAMEWORKS)
- else (WIN32)
+ endif ()
+ else ()
list (APPEND CPACK_GENERATOR "STGZ")
set (CPACK_PACKAGING_INSTALL_PREFIX "/${CPACK_PACKAGE_INSTALL_DIRECTORY}")
set (CPACK_COMPONENTS_ALL_IN_ONE_PACKAGE ON)
@@ -442,12 +467,12 @@ The HDF5 data model, file format, API, library, and tools are open and distribut
#-----------------------------------------------------------------------------
# configure_file ("${HDF5_RESOURCES_DIR}/hdf5.spec.in" "${CMAKE_CURRENT_BINARY_DIR}/${HDF5_PACKAGE_NAME}.spec" @ONLY IMMEDIATE)
# set (CPACK_RPM_USER_BINARY_SPECFILE "${CMAKE_CURRENT_BINARY_DIR}/${HDF5_PACKAGE_NAME}.spec")
- endif (WIN32)
+ endif ()
# By default, do not warn when built on machines using only VS Express:
if (NOT DEFINED CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS)
set (CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS ON)
- endif (NOT DEFINED CMAKE_INSTALL_SYSTEM_RUNTIME_LIBS_NO_WARNINGS)
+ endif ()
include (InstallRequiredSystemLibraries)
set (CPACK_INSTALL_CMAKE_PROJECTS "${HDF5_BINARY_DIR};HDF5;ALL;/")
@@ -457,23 +482,23 @@ The HDF5 data model, file format, API, library, and tools are open and distribut
if (ZLIB_FOUND AND ZLIB_USE_EXTERNAL)
if (WIN32)
set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};ZLIB;ALL;/")
- else (WIN32)
+ else ()
set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};ZLIB;libraries;/")
set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};ZLIB;headers;/")
set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${ZLIB_INCLUDE_DIR_GEN};ZLIB;configinstall;/")
- endif (WIN32)
- endif (ZLIB_FOUND AND ZLIB_USE_EXTERNAL)
+ endif ()
+ endif ()
if (SZIP_FOUND AND SZIP_USE_EXTERNAL)
if (WIN32)
set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${SZIP_INCLUDE_DIR_GEN};SZIP;ALL;/")
- else (WIN32)
+ else ()
set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${SZIP_INCLUDE_DIR_GEN};SZIP;libraries;/")
set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${SZIP_INCLUDE_DIR_GEN};SZIP;headers;/")
set (CPACK_INSTALL_CMAKE_PROJECTS "${CPACK_INSTALL_CMAKE_PROJECTS};${SZIP_INCLUDE_DIR_GEN};SZIP;configinstall;/")
- endif (WIN32)
- endif (SZIP_FOUND AND SZIP_USE_EXTERNAL)
- endif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
- endif (HDF5_PACKAGE_EXTLIBS)
+ endif ()
+ endif ()
+ endif ()
+ endif ()
include (CPack)
@@ -537,7 +562,7 @@ The HDF5 data model, file format, API, library, and tools are open and distribut
GROUP Development
INSTALL_TYPES Full Developer
)
- endif (HDF5_BUILD_FORTRAN)
+ endif ()
if (HDF5_BUILD_CPP_LIB)
cpack_add_component (cpplibraries
@@ -552,7 +577,7 @@ The HDF5 data model, file format, API, library, and tools are open and distribut
GROUP Development
INSTALL_TYPES Full Developer
)
- endif (HDF5_BUILD_CPP_LIB)
+ endif ()
if (HDF5_BUILD_TOOLS)
cpack_add_component (toolsapplications
@@ -573,7 +598,7 @@ The HDF5 data model, file format, API, library, and tools are open and distribut
GROUP Development
INSTALL_TYPES Full Developer
)
- endif (HDF5_BUILD_TOOLS)
+ endif ()
if (HDF5_BUILD_HL_LIB)
cpack_add_component (hllibraries
@@ -607,7 +632,7 @@ The HDF5 data model, file format, API, library, and tools are open and distribut
GROUP Development
INSTALL_TYPES Full Developer
)
- endif (HDF5_BUILD_CPP_LIB)
+ endif ()
if (HDF5_BUILD_FORTRAN)
cpack_add_component (hlfortlibraries
DISPLAY_NAME "HDF5 HL Fortran Libraries"
@@ -615,7 +640,7 @@ The HDF5 data model, file format, API, library, and tools are open and distribut
GROUP Runtime
INSTALL_TYPES Full Developer User
)
- endif (HDF5_BUILD_FORTRAN)
- endif (HDF5_BUILD_HL_LIB)
+ endif ()
+ endif ()
-endif (NOT HDF5_EXTERNALLY_CONFIGURED AND NOT HDF5_NO_PACKAGES)
+endif ()
diff --git a/CMakeLists.txt b/CMakeLists.txt
index b59943e..b84cc7c 100644
--- a/CMakeLists.txt
+++ b/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5 C CXX)
#-----------------------------------------------------------------------------
@@ -76,13 +76,13 @@ endif ()
# # Override the zlib header file
# if (VTK_USE_SYSTEM_ZLIB)
# set (H5_ZLIB_HEADER "zlib.h")
-# else (VTK_USE_SYSTEM_ZLIB)
+# else ()
# set (H5_ZLIB_HEADER "vtk_zlib.h")
# # Set vars that FindZlib would have set if used in sub project
# set (ZLIB_INCLUDE_DIRS "${VTK_ZLIB_INCLUDE_DIRS}")
# set (ZLIB_LIBRARIES vtkzlib)
-# endif (VTK_USE_SYSTEM_ZLIB)
-# endif (HDF5_ENABLE_Z_LIB_SUPPORT)
+# endif ()
+# endif ()
#
# # Add the sub project
# add_subdirectory (Utilities/hdf5-1.8)
@@ -100,7 +100,7 @@ option (HDF5_USE_FOLDERS "Enable folder grouping of projects in IDEs." ON)
mark_as_advanced (HDF5_USE_FOLDERS)
if (HDF5_USE_FOLDERS)
set_property (GLOBAL PROPERTY USE_FOLDERS ON)
-endif (HDF5_USE_FOLDERS)
+endif ()
option (HDF5_NO_PACKAGES "CPACK - Disable packaging" OFF)
mark_as_advanced (HDF5_NO_PACKAGES)
option (ALLOW_UNSUPPORTED "Allow unsupported combinations of configure options" OFF)
@@ -201,48 +201,48 @@ set (HDF5_JAVA_LOGGING_SIMPLE_JAR ${HDF5_SOURCE_DIR}/java/lib/ext/slf4j-simple-
if (APPLE)
option (HDF5_BUILD_FRAMEWORKS "TRUE to build as frameworks libraries, FALSE to build according to BUILD_SHARED_LIBS" FALSE)
-endif (APPLE)
+endif ()
if (NOT HDF5_INSTALL_BIN_DIR)
set (HDF5_INSTALL_BIN_DIR bin)
-endif (NOT HDF5_INSTALL_BIN_DIR)
+endif ()
if (NOT HDF5_INSTALL_LIB_DIR)
if (APPLE)
if (HDF5_BUILD_FRAMEWORKS)
set (HDF5_INSTALL_JAR_DIR ../Java)
- else (HDF5_BUILD_FRAMEWORKS)
+ else ()
set (HDF5_INSTALL_JAR_DIR lib)
- endif (HDF5_BUILD_FRAMEWORKS)
+ endif ()
set (HDF5_INSTALL_FMWK_DIR ${CMAKE_INSTALL_FRAMEWORK_PREFIX})
- else (APPLE)
+ else ()
set (HDF5_INSTALL_JAR_DIR lib)
- endif (APPLE)
+ endif ()
set (HDF5_INSTALL_LIB_DIR lib)
-endif (NOT HDF5_INSTALL_LIB_DIR)
+endif ()
if (NOT HDF5_INSTALL_INCLUDE_DIR)
set (HDF5_INSTALL_INCLUDE_DIR include)
-endif (NOT HDF5_INSTALL_INCLUDE_DIR)
+endif ()
if (NOT HDF5_INSTALL_DATA_DIR)
if (NOT WIN32)
if (APPLE)
if (HDF5_BUILD_FRAMEWORKS)
set (HDF5_INSTALL_EXTRA_DIR ../SharedSupport)
- else (HDF5_BUILD_FRAMEWORKS)
+ else ()
set (HDF5_INSTALL_EXTRA_DIR share)
- endif (HDF5_BUILD_FRAMEWORKS)
+ endif ()
set (HDF5_INSTALL_FWRK_DIR ${CMAKE_INSTALL_FRAMEWORK_PREFIX})
- endif (APPLE)
+ endif ()
set (HDF5_INSTALL_DATA_DIR share)
set (HDF5_INSTALL_CMAKE_DIR share/cmake)
- else (NOT WIN32)
+ else ()
set (HDF5_INSTALL_DATA_DIR ".")
set (HDF5_INSTALL_CMAKE_DIR cmake)
- endif (NOT WIN32)
-endif (NOT HDF5_INSTALL_DATA_DIR)
+ endif ()
+endif ()
-if(DEFINED ADDITIONAL_CMAKE_PREFIX_PATH AND EXISTS "${ADDITIONAL_CMAKE_PREFIX_PATH}")
+if (DEFINED ADDITIONAL_CMAKE_PREFIX_PATH AND EXISTS "${ADDITIONAL_CMAKE_PREFIX_PATH}")
set (CMAKE_PREFIX_PATH ${ADDITIONAL_CMAKE_PREFIX_PATH} ${CMAKE_PREFIX_PATH})
-endif(DEFINED ADDITIONAL_CMAKE_PREFIX_PATH AND EXISTS "${ADDITIONAL_CMAKE_PREFIX_PATH}")
+endif ()
#-----------------------------------------------------------------------------
# parse the full version number from H5public.h and include in H5_VERS_INFO
@@ -268,75 +268,75 @@ string (REGEX REPLACE ".*LT_VERS_REVISION[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_SOVERS_MINOR ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_VERS_AGE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_SOVERS_RELEASE ${_lt_vers_am_contents})
-MATH (EXPR H5_SOVERS_MAJOR ${H5_SOVERS_INTERFACE}-${H5_SOVERS_RELEASE})
+math (EXPR H5_SOVERS_MAJOR ${H5_SOVERS_INTERFACE}-${H5_SOVERS_RELEASE})
message (STATUS "SOVERSION: ${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
string (REGEX MATCH ".*LT_TOOLS_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_TOOLS_SOVERS_EXISTS ${_lt_vers_am_contents})
-if(H5_TOOLS_SOVERS_EXISTS)
+if (H5_TOOLS_SOVERS_EXISTS)
string (REGEX REPLACE ".*LT_TOOLS_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_TOOLS_SOVERS_INTERFACE ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_TOOLS_VERS_REVISION[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_TOOLS_SOVERS_MINOR ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_TOOLS_VERS_AGE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_TOOLS_SOVERS_RELEASE ${_lt_vers_am_contents})
- MATH (EXPR H5_TOOLS_SOVERS_MAJOR ${H5_TOOLS_SOVERS_INTERFACE}-${H5_TOOLS_SOVERS_RELEASE})
+ math (EXPR H5_TOOLS_SOVERS_MAJOR ${H5_TOOLS_SOVERS_INTERFACE}-${H5_TOOLS_SOVERS_RELEASE})
message (STATUS "SOVERSION_TOOLS: ${H5_TOOLS_SOVERS_MAJOR}.${H5_TOOLS_SOVERS_RELEASE}.${H5_TOOLS_SOVERS_MINOR}")
-endif()
-string (REGEX REPLACE ".*LT_CXX_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_CXX_SOVERS_EXISTS ${_lt_vers_am_contents})
-if(H5_CXX_SOVERS_EXISTS)
+endif ()
+string (REGEX MATCH ".*LT_CXX_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_CXX_SOVERS_EXISTS ${_lt_vers_am_contents})
+if (H5_CXX_SOVERS_EXISTS)
string (REGEX REPLACE ".*LT_CXX_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_CXX_SOVERS_INTERFACE ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_CXX_VERS_REVISION[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_CXX_SOVERS_MINOR ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_CXX_VERS_AGE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_CXX_SOVERS_RELEASE ${_lt_vers_am_contents})
- MATH (EXPR H5_CXX_SOVERS_MAJOR ${H5_CXX_SOVERS_INTERFACE}-${H5_CXX_SOVERS_RELEASE})
+ math (EXPR H5_CXX_SOVERS_MAJOR ${H5_CXX_SOVERS_INTERFACE}-${H5_CXX_SOVERS_RELEASE})
message (STATUS "SOVERSION_CXX: ${H5_CXX_SOVERS_MAJOR}.${H5_CXX_SOVERS_RELEASE}.${H5_CXX_SOVERS_MINOR}")
-endif()
-string (REGEX REPLACE ".*LT_F_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_F_SOVERS_EXISTS ${_lt_vers_am_contents})
-if(H5_F_SOVERS_EXISTS)
+endif ()
+string (REGEX MATCH ".*LT_F_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_F_SOVERS_EXISTS ${_lt_vers_am_contents})
+if (H5_F_SOVERS_EXISTS)
string (REGEX REPLACE ".*LT_F_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_F_SOVERS_INTERFACE ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_F_VERS_REVISION[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_F_SOVERS_MINOR ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_F_VERS_AGE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_F_SOVERS_RELEASE ${_lt_vers_am_contents})
- MATH (EXPR H5_F_SOVERS_MAJOR ${H5_F_SOVERS_INTERFACE}-${H5_F_SOVERS_RELEASE})
+ math (EXPR H5_F_SOVERS_MAJOR ${H5_F_SOVERS_INTERFACE}-${H5_F_SOVERS_RELEASE})
message (STATUS "SOVERSION_F: ${H5_F_SOVERS_MAJOR}.${H5_F_SOVERS_RELEASE}.${H5_F_SOVERS_MINOR}")
-endif()
-string (REGEX REPLACE ".*LT_HL_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_HL_SOVERS_EXISTS ${_lt_vers_am_contents})
-if(H5_HL_SOVERS_EXISTS)
+endif ()
+string (REGEX MATCH ".*LT_HL_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_HL_SOVERS_EXISTS ${_lt_vers_am_contents})
+if (H5_HL_SOVERS_EXISTS)
string (REGEX REPLACE ".*LT_HL_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_SOVERS_INTERFACE ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_HL_VERS_REVISION[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_SOVERS_MINOR ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_HL_VERS_AGE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_SOVERS_RELEASE ${_lt_vers_am_contents})
- MATH (EXPR H5_HL_SOVERS_MAJOR ${H5_HL_SOVERS_INTERFACE}-${H5_HL_SOVERS_RELEASE})
+ math (EXPR H5_HL_SOVERS_MAJOR ${H5_HL_SOVERS_INTERFACE}-${H5_HL_SOVERS_RELEASE})
message (STATUS "SOVERSION_HL: ${H5_HL_SOVERS_MAJOR}.${H5_HL_SOVERS_RELEASE}.${H5_HL_SOVERS_MINOR}")
-endif()
-string (REGEX REPLACE ".*LT_HL_CXX_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_HL_CXX_SOVERS_EXISTS ${_lt_vers_am_contents})
-if(H5_HL_CXX_SOVERS_EXISTS)
+endif ()
+string (REGEX MATCH ".*LT_HL_CXX_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_HL_CXX_SOVERS_EXISTS ${_lt_vers_am_contents})
+if (H5_HL_CXX_SOVERS_EXISTS)
string (REGEX REPLACE ".*LT_HL_CXX_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_CXX_SOVERS_INTERFACE ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_HL_CXX_VERS_REVISION[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_CXX_SOVERS_MINOR ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_HL_CXX_VERS_AGE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_CXX_SOVERS_RELEASE ${_lt_vers_am_contents})
- MATH (EXPR H5_HL_CXX_SOVERS_MAJOR ${H5_HL_CXX_SOVERS_INTERFACE}-${H5_HL_CXX_SOVERS_RELEASE})
+ math (EXPR H5_HL_CXX_SOVERS_MAJOR ${H5_HL_CXX_SOVERS_INTERFACE}-${H5_HL_CXX_SOVERS_RELEASE})
message (STATUS "SOVERSION_HL_CXX: ${H5_HL_CXX_SOVERS_MAJOR}.${H5_HL_CXX_SOVERS_RELEASE}.${H5_HL_CXX_SOVERS_MINOR}")
-endif()
-string (REGEX REPLACE ".*LT_HL_F_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_HL_F_SOVERS_EXISTS ${_lt_vers_am_contents})
-if(H5_HL_F_SOVERS_EXISTS)
+endif ()
+string (REGEX MATCH ".*LT_HL_F_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_HL_F_SOVERS_EXISTS ${_lt_vers_am_contents})
+if (H5_HL_F_SOVERS_EXISTS)
string (REGEX REPLACE ".*LT_HL_F_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_F_SOVERS_INTERFACE ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_HL_F_VERS_REVISION[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_F_SOVERS_MINOR ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_HL_F_VERS_AGE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_HL_F_SOVERS_RELEASE ${_lt_vers_am_contents})
- MATH (EXPR H5_HL_F_SOVERS_MAJOR ${H5_HL_F_SOVERS_INTERFACE}-${H5_HL_F_SOVERS_RELEASE})
+ math (EXPR H5_HL_F_SOVERS_MAJOR ${H5_HL_F_SOVERS_INTERFACE}-${H5_HL_F_SOVERS_RELEASE})
message (STATUS "SOVERSION_HL_F: ${H5_HL_F_SOVERS_MAJOR}.${H5_HL_F_SOVERS_RELEASE}.${H5_HL_F_SOVERS_MINOR}")
-endif()
-string (REGEX REPLACE ".*LT_JAVA_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_JAVA_SOVERS_EXISTS ${_lt_vers_am_contents})
+endif ()
+string (REGEX MATCH ".*LT_JAVA_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$" H5_JAVA_SOVERS_EXISTS ${_lt_vers_am_contents})
if(H5_JAVA_SOVERS_EXISTS)
string (REGEX REPLACE ".*LT_JAVA_VERS_INTERFACE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_JAVA_SOVERS_INTERFACE ${_lt_vers_am_contents})
@@ -344,9 +344,9 @@ if(H5_JAVA_SOVERS_EXISTS)
"\\1" H5_JAVA_SOVERS_MINOR ${_lt_vers_am_contents})
string (REGEX REPLACE ".*LT_JAVA_VERS_AGE[ \t]+=[ \t]+([0-9]*).*$"
"\\1" H5_JAVA_SOVERS_RELEASE ${_lt_vers_am_contents})
- MATH (EXPR H5_JAVA_SOVERS_MAJOR ${H5_JAVA_SOVERS_INTERFACE}-${H5_JAVA_SOVERS_RELEASE})
+ math (EXPR H5_JAVA_SOVERS_MAJOR ${H5_JAVA_SOVERS_INTERFACE}-${H5_JAVA_SOVERS_RELEASE})
message (STATUS "SOVERSION_JAVA: ${H5_JAVA_SOVERS_MAJOR}.${H5_JAVA_SOVERS_RELEASE}.${H5_JAVA_SOVERS_MINOR}")
-endif()
+endif ()
#-----------------------------------------------------------------------------
# Basic HDF5 stuff here
@@ -358,45 +358,45 @@ set (HDF5_PACKAGE_VERSION_MAJOR "${H5_VERS_MAJOR}.${H5_VERS_MINOR}")
set (HDF5_PACKAGE_VERSION_MINOR "${H5_VERS_RELEASE}")
if (NOT "${H5_VERS_SUBRELEASE}" STREQUAL "")
set (HDF5_PACKAGE_VERSION_STRING "${HDF5_PACKAGE_VERSION}-${H5_VERS_SUBRELEASE}")
-else (NOT "${H5_VERS_SUBRELEASE}" STREQUAL "")
+else ()
set (HDF5_PACKAGE_VERSION_STRING "${HDF5_PACKAGE_VERSION}")
-endif (NOT "${H5_VERS_SUBRELEASE}" STREQUAL "")
+endif ()
set (HDF5_PACKAGE_SOVERSION "${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
-if(H5_TOOLS_SOVERS_EXISTS)
+if (H5_TOOLS_SOVERS_EXISTS)
set (HDF5_TOOLS_PACKAGE_SOVERSION "${H5_TOOLS_SOVERS_MAJOR}.${H5_TOOLS_SOVERS_RELEASE}.${H5_TOOLS_SOVERS_MINOR}")
-else()
+else ()
set (HDF5_TOOLS_PACKAGE_SOVERSION "${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
-endif()
-if(H5_CXX_SOVERS_EXISTS)
+endif ()
+if (H5_CXX_SOVERS_EXISTS)
set (HDF5_CXX_PACKAGE_SOVERSION "${H5_CXX_SOVERS_MAJOR}.${H5_CXX_SOVERS_RELEASE}.${H5_CXX_SOVERS_MINOR}")
-else()
+else ()
set (HDF5_CXX_PACKAGE_SOVERSION "${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
-endif()
-if(H5_F_SOVERS_EXISTS)
+endif ()
+if (H5_F_SOVERS_EXISTS)
set (HDF5_F_PACKAGE_SOVERSION "${H5_F_SOVERS_MAJOR}.${H5_F_SOVERS_RELEASE}.${H5_F_SOVERS_MINOR}")
-else()
+else ()
set (HDF5_F_PACKAGE_SOVERSION "${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
-endif()
-if(H5_HL_SOVERS_EXISTS)
+endif ()
+if (H5_HL_SOVERS_EXISTS)
set (HDF5_HL_PACKAGE_SOVERSION "${H5_HL_SOVERS_MAJOR}.${H5_HL_SOVERS_RELEASE}.${H5_HL_SOVERS_MINOR}")
-else()
+else ()
set (HDF5_HL_PACKAGE_SOVERSION "${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
-endif()
-if(H5_HL_F_SOVERS_EXISTS)
+endif ()
+if (H5_HL_F_SOVERS_EXISTS)
set (HDF5_HL_CXX_PACKAGE_SOVERSION "${H5_HL_CXX_SOVERS_MAJOR}.${H5_HL_CXX_SOVERS_RELEASE}.${H5_HL_CXX_SOVERS_MINOR}")
-else()
+else ()
set (HDF5_HL_CXX_PACKAGE_SOVERSION "${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
-endif()
-if(H5_HL_F_SOVERS_EXISTS)
+endif ()
+if (H5_HL_F_SOVERS_EXISTS)
set (HDF5_HL_F_PACKAGE_SOVERSION "${H5_HL_F_SOVERS_MAJOR}.${H5_HL_F_SOVERS_RELEASE}.${H5_HL_F_SOVERS_MINOR}")
-else()
+else ()
set (HDF5_HL_F_PACKAGE_SOVERSION "${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
-endif()
-if(H5_JAVA_SOVERS_EXISTS)
+endif ()
+if (H5_JAVA_SOVERS_EXISTS)
set (HDF5_PACKAGE_SOVERSION "${H5_JAVA_SOVERS_MAJOR}.${H5_JAVA_SOVERS_RELEASE}.${H5_JAVA_SOVERS_MINOR}")
-else()
+else ()
set (HDF5_JAVA_PACKAGE_SOVERSION "${H5_SOVERS_MAJOR}.${H5_SOVERS_RELEASE}.${H5_SOVERS_MINOR}")
-endif()
+endif ()
set (HDF5_PACKAGE_STRING "${HDF5_PACKAGE_NAME} ${HDF5_PACKAGE_VERSION_STRING}")
set (HDF5_PACKAGE_TARNAME "${HDF5_PACKAGE}${HDF_PACKAGE_EXT}")
set (HDF5_PACKAGE_URL "http://www.hdfgroup.org")
@@ -427,16 +427,19 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED)
)
if (WIN32)
set (CMAKE_TEST_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE})
- else (WIN32)
+ set (CMAKE_PDB_OUTPUT_DIRECTORY
+ ${PROJECT_BINARY_DIR}/bin CACHE PATH "Single Directory for all pdb files."
+ )
+ else ()
set (CMAKE_TEST_OUTPUT_DIRECTORY ${CMAKE_RUNTIME_OUTPUT_DIRECTORY})
- endif (WIN32)
-else (NOT HDF5_EXTERNALLY_CONFIGURED)
+ endif ()
+else ()
# if we are externally configured, but the project uses old cmake scripts
# this may not be set and utilities like H5detect will fail
if (NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY)
set (CMAKE_RUNTIME_OUTPUT_DIRECTORY ${EXECUTABLE_OUTPUT_PATH})
- endif (NOT CMAKE_RUNTIME_OUTPUT_DIRECTORY)
-endif (NOT HDF5_EXTERNALLY_CONFIGURED)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Targets built within this project are exported at Install time for use
@@ -444,7 +447,7 @@ endif (NOT HDF5_EXTERNALLY_CONFIGURED)
#-----------------------------------------------------------------------------
if (NOT HDF5_EXPORTED_TARGETS)
set (HDF5_EXPORTED_TARGETS "hdf5-targets")
-endif (NOT HDF5_EXPORTED_TARGETS)
+endif ()
#-----------------------------------------------------------------------------
# To include a library in the list exported by the project AT BUILD TIME,
@@ -470,17 +473,17 @@ set (CMAKE_INCLUDE_CURRENT_DIR_IN_INTERFACE ON)
#-----------------------------------------------------------------------------
if (HDF5_BUILD_FRAMEWORKS AND NOT BUILD_SHARED_LIBS)
set (BUILD_SHARED_LIBS ON CACHE BOOL "Build Shared Libraries")
-endif (HDF5_BUILD_FRAMEWORKS AND NOT BUILD_SHARED_LIBS)
+endif ()
#-----------------------------------------------------------------------------
-# Option to Build Shared and Static libs, default is static
+# Option to Build Shared and Static libs, default is both
#-----------------------------------------------------------------------------
option (BUILD_SHARED_LIBS "Build Shared Libraries" ON)
set (H5_ENABLE_SHARED_LIB NO)
if (BUILD_SHARED_LIBS)
set (H5_ENABLE_SHARED_LIB YES)
set (LINK_SHARED_LIBS ${LINK_LIBS})
-endif (BUILD_SHARED_LIBS)
+endif ()
set (H5_ENABLE_STATIC_LIB YES)
set (CMAKE_POSITION_INDEPENDENT_CODE ON)
@@ -492,8 +495,8 @@ if (BUILD_STATIC_EXECS)
if (NOT WIN32)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -static")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -static")
- endif (NOT WIN32)
-endif (BUILD_STATIC_EXECS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to use code coverage
@@ -503,7 +506,7 @@ if (HDF5_ENABLE_COVERAGE)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -g -O0 -fprofile-arcs -ftest-coverage")
set (LDFLAGS "${LDFLAGS} -fprofile-arcs -ftest-coverage")
-endif (HDF5_ENABLE_COVERAGE)
+endif ()
#-----------------------------------------------------------------------------
# Option to indicate using dmalloc
@@ -512,7 +515,7 @@ endif (HDF5_ENABLE_COVERAGE)
# if (HDF5_ENABLE_USING_DMALLOC)
# find_package (DMALLOC)
# set (H5_HAVE_DMALLOC DMALLOC_FOUND)
-# endif (HDF5_ENABLE_USING_DMALLOC)
+# endif ()
#-----------------------------------------------------------------------------
# Option to indicate using a memory checker
@@ -520,7 +523,7 @@ endif (HDF5_ENABLE_COVERAGE)
option (HDF5_ENABLE_USING_MEMCHECKER "Indicate that a memory checker is used" OFF)
if (HDF5_ENABLE_USING_MEMCHECKER)
set (H5_USING_MEMCHECKER 1)
-endif (HDF5_ENABLE_USING_MEMCHECKER)
+endif ()
#-----------------------------------------------------------------------------
# Option to indicate internal memory allocation sanity checks are enabled
@@ -528,7 +531,7 @@ endif (HDF5_ENABLE_USING_MEMCHECKER)
option (HDF5_MEMORY_ALLOC_SANITY_CHECK "Indicate that internal memory allocation sanity checks are enabled" OFF)
if (HDF5_MEMORY_ALLOC_SANITY_CHECK)
set (H5_MEMORY_ALLOC_SANITY_CHECK 1)
-endif (HDF5_MEMORY_ALLOC_SANITY_CHECK)
+endif ()
#-----------------------------------------------------------------------------
# Option to use deprecated public API symbols
@@ -536,9 +539,9 @@ endif (HDF5_MEMORY_ALLOC_SANITY_CHECK)
option (HDF5_ENABLE_DEPRECATED_SYMBOLS "Enable deprecated public API symbols" ON)
if (HDF5_ENABLE_DEPRECATED_SYMBOLS)
set (H5_NO_DEPRECATED_SYMBOLS 0)
-else (HDF5_ENABLE_DEPRECATED_SYMBOLS)
+else ()
set (H5_NO_DEPRECATED_SYMBOLS 1)
-endif (HDF5_ENABLE_DEPRECATED_SYMBOLS)
+endif ()
#-----------------------------------------------------------------------------
# When building utility executables that generate other (source) files :
@@ -553,24 +556,24 @@ if (WIN32)
add_definitions (-D_BIND_TO_CURRENT_VCLIBS_VERSION=1)
add_definitions (-D_CRT_SECURE_NO_WARNINGS)
add_definitions (-D_CONSOLE)
- endif (NOT CYGWIN)
-endif (WIN32)
+ endif ()
+endif ()
if (MSVC)
set (CMAKE_MFC_FLAG 0)
set (WIN_COMPILE_FLAGS "")
set (WIN_LINK_FLAGS "")
-endif (MSVC)
+endif ()
set (MAKE_SYSTEM)
if (CMAKE_BUILD_TOOL MATCHES "make")
set (MAKE_SYSTEM 1)
-endif (CMAKE_BUILD_TOOL MATCHES "make")
+endif ()
set (CFG_INIT "/${CMAKE_CFG_INTDIR}")
if (MAKE_SYSTEM)
set (CFG_INIT "")
-endif (MAKE_SYSTEM)
+endif ()
#-----------------------------------------------------------------------------
# Add some definitions for Debug Builds
@@ -582,19 +585,19 @@ if (CMAKE_BUILD_TYPE MATCHES Debug)
# Enable tracing of the API
if (HDF5_ENABLE_TRACE)
add_definitions (-DH5_DEBUG_API )
- endif (HDF5_ENABLE_TRACE)
+ endif ()
# Enable instrumenting of the library's internal operations
option (HDF5_ENABLE_INSTRUMENT "Instrument The library" OFF)
if (HDF5_ENABLE_INSTRUMENT)
set (H5_HAVE_INSTRUMENTED_LIBRARY 1)
- endif (HDF5_ENABLE_INSTRUMENT)
+ endif ()
mark_as_advanced (HDF5_ENABLE_INSTRUMENT)
-else (CMAKE_BUILD_TYPE MATCHES Debug)
+else ()
add_definitions (-DNDEBUG)
if (HDF5_ENABLE_TRACE)
add_definitions (-DH5_DEBUG_API )
- endif (HDF5_ENABLE_TRACE)
-endif (CMAKE_BUILD_TYPE MATCHES Debug)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to embed library info into executables
@@ -602,7 +605,7 @@ endif (CMAKE_BUILD_TYPE MATCHES Debug)
option (HDF5_ENABLE_EMBEDDED_LIBINFO "embed library info into executables" ON)
if (HDF5_ENABLE_EMBEDDED_LIBINFO)
set (H5_HAVE_EMBEDDED_LIBINFO 1)
-endif (HDF5_ENABLE_EMBEDDED_LIBINFO)
+endif ()
include (${HDF_RESOURCES_DIR}/HDFCompilerFlags.cmake)
@@ -627,10 +630,10 @@ if (HDF5_ENABLE_PARALLEL)
# Used by Fortran + MPI
CHECK_SYMBOL_EXISTS (MPI_Comm_c2f "${MPI_C_INCLUDE_PATH}/mpi.h" H5_HAVE_MPI_MULTI_LANG_Comm)
CHECK_SYMBOL_EXISTS (MPI_Info_c2f "${MPI_C_INCLUDE_PATH}/mpi.h" H5_HAVE_MPI_MULTI_LANG_Info)
- else (MPI_C_FOUND)
+ else ()
message (STATUS "Parallel libraries not found")
- endif (MPI_C_FOUND)
-endif (HDF5_ENABLE_PARALLEL)
+ endif ()
+endif ()
# Parallel IO usage requires MPI to be Linked and Included
if (H5_HAVE_PARALLEL)
@@ -638,8 +641,8 @@ if (H5_HAVE_PARALLEL)
set (LINK_SHARED_LIBS ${LINK_SHARED_LIBS} ${MPI_C_LIBRARIES})
if (MPI_C_LINK_FLAGS)
set (CMAKE_EXE_LINKER_FLAGS "${MPI_C_LINK_FLAGS} ${CMAKE_EXE_LINKER_FLAGS}")
- endif (MPI_C_LINK_FLAGS)
-endif (H5_HAVE_PARALLEL)
+ endif ()
+endif ()
set (DEFAULT_API_VERSION "v110")
#-----------------------------------------------------------------------------
@@ -650,7 +653,7 @@ set (H5_USE_16_API_DEFAULT 0)
if (HDF5_USE_16_API_DEFAULT)
set (H5_USE_16_API_DEFAULT 1)
set (DEFAULT_API_VERSION "v16")
-endif (HDF5_USE_16_API_DEFAULT)
+endif ()
#-----------------------------------------------------------------------------
# Option to use 1.8.x API
@@ -660,7 +663,7 @@ set (H5_USE_18_API_DEFAULT 0)
if (HDF5_USE_18_API_DEFAULT)
set (H5_USE_18_API_DEFAULT 1)
set (DEFAULT_API_VERSION "v18")
-endif (HDF5_USE_18_API_DEFAULT)
+endif ()
#-----------------------------------------------------------------------------
# Include user macros
@@ -681,13 +684,13 @@ if (NOT HDF5_EXTERNALLY_CONFIGURED)
set (HDF5_NO_PACKAGES OFF CACHE BOOL "CPACK - Disable packaging" FORCE)
if (HDF5_ENABLE_Z_LIB_SUPPORT AND ZLIB_FOUND)
PACKAGE_ZLIB_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT})
- endif (HDF5_ENABLE_Z_LIB_SUPPORT AND ZLIB_FOUND)
+ endif ()
if (HDF5_ENABLE_SZIP_SUPPORT AND SZIP_FOUND)
PACKAGE_SZIP_LIBRARY (${HDF5_ALLOW_EXTERNAL_SUPPORT})
- endif (HDF5_ENABLE_SZIP_SUPPORT AND SZIP_FOUND)
- endif (HDF5_PACKAGE_EXTLIBS)
-endif (NOT HDF5_EXTERNALLY_CONFIGURED)
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to use threadsafe
#-----------------------------------------------------------------------------
@@ -699,45 +702,45 @@ if (HDF5_ENABLE_THREADSAFE)
if (HDF5_ENABLE_PARALLEL)
if (NOT ALLOW_UNSUPPORTED)
message (FATAL_ERROR " **** parallel and thread-safety options are not supported **** ")
- else (NOT ALLOW_UNSUPPORTED)
+ else ()
message (STATUS " **** Allowing unsupported parallel and thread-safety options **** ")
- endif (NOT ALLOW_UNSUPPORTED)
- endif (HDF5_ENABLE_PARALLEL)
+ endif ()
+ endif ()
if (HDF5_BUILD_FORTRAN)
if (NOT ALLOW_UNSUPPORTED)
message (FATAL_ERROR " **** Fortran and thread-safety options are not supported **** ")
- else (NOT ALLOW_UNSUPPORTED)
+ else ()
message (STATUS " **** Allowing unsupported Fortran and thread-safety options **** ")
- endif (NOT ALLOW_UNSUPPORTED)
- endif (HDF5_BUILD_FORTRAN)
+ endif ()
+ endif ()
if (HDF5_BUILD_CPP_LIB)
if (NOT ALLOW_UNSUPPORTED)
message (FATAL_ERROR " **** C++ and thread-safety options are not supported **** ")
- else (NOT ALLOW_UNSUPPORTED)
+ else ()
message (STATUS " **** Allowing unsupported C++ and thread-safety options **** ")
- endif (NOT ALLOW_UNSUPPORTED)
- endif (HDF5_BUILD_CPP_LIB)
+ endif ()
+ endif ()
if (HDF5_BUILD_HL_LIB)
if (NOT ALLOW_UNSUPPORTED)
message (FATAL_ERROR " **** HL and thread-safety options are not supported **** ")
- else (NOT ALLOW_UNSUPPORTED)
+ else ()
message (STATUS " **** Allowing unsupported HL and thread-safety options **** ")
- endif (NOT ALLOW_UNSUPPORTED)
- endif (HDF5_BUILD_HL_LIB)
+ endif ()
+ endif ()
if (H5_HAVE_IOEO)
message (STATUS " **** Win32 threads requires WINVER>=0x600 (Windows Vista/7/8) **** ")
set (H5_HAVE_WIN_THREADS 1)
- else (H5_HAVE_IOEO)
+ else ()
if (NOT H5_HAVE_PTHREAD_H)
message (FATAL_ERROR " **** thread-safe option requires Win32 threads or Pthreads **** ")
- endif (NOT H5_HAVE_PTHREAD_H)
- endif (H5_HAVE_IOEO)
+ endif ()
+ endif ()
set(THREADS_PREFER_PTHREAD_FLAG ON)
find_package(Threads)
if (NOT Threads_FOUND)
message (STATUS " **** thread-safe package not found - threads still might work **** ")
- endif (NOT Threads_FOUND)
-endif (HDF5_ENABLE_THREADSAFE)
+ endif ()
+endif ()
# -----------------------------------------------------------------------
# wrapper script variables
@@ -755,15 +758,15 @@ if (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MAT
ADD_DEPENDENCIES (${HDF5_LIB_TARGET} ZLIB)
if (BUILD_SHARED_LIBS)
add_dependencies (${HDF5_LIBSH_TARGET} ZLIB)
- endif (BUILD_SHARED_LIBS)
- endif (ZLIB_FOUND AND ZLIB_USE_EXTERNAL)
+ endif ()
+ endif ()
if (SZIP_FOUND AND SZIP_USE_EXTERNAL)
ADD_DEPENDENCIES (${HDF5_LIB_TARGET} SZIP)
if (BUILD_SHARED_LIBS)
add_dependencies (${HDF5_LIBSH_TARGET} SZIP)
- endif (BUILD_SHARED_LIBS)
- endif (SZIP_FOUND AND SZIP_USE_EXTERNAL)
-endif (HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "SVN" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "GIT" OR HDF5_ALLOW_EXTERNAL_SUPPORT MATCHES "TGZ")
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Dashboard and Testing Settings
@@ -780,7 +783,7 @@ if (BUILD_TESTING)
if (HDF5_TEST_VFD)
option (HDF5_TEST_FHEAP_VFD "Execute tests with different VFDs" ON)
mark_as_advanced (HDF5_TEST_FHEAP_VFD)
- endif (HDF5_TEST_VFD)
+ endif ()
option (HDF_TEST_EXPRESS "Control testing framework (0-3)" "0")
mark_as_advanced (HDF_TEST_EXPRESS)
@@ -795,14 +798,14 @@ if (BUILD_TESTING)
if (EXISTS "${HDF5_SOURCE_DIR}/test" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/test")
add_subdirectory (${HDF5_SOURCE_DIR}/tools/lib ${PROJECT_BINARY_DIR}/tools/lib)
add_subdirectory (${HDF5_SOURCE_DIR}/test ${PROJECT_BINARY_DIR}/test)
- endif (EXISTS "${HDF5_SOURCE_DIR}/test" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/test")
+ endif ()
if (H5_HAVE_PARALLEL)
if (EXISTS "${HDF5_SOURCE_DIR}/testpar" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/testpar")
add_subdirectory (${HDF5_SOURCE_DIR}/testpar ${PROJECT_BINARY_DIR}/testpar)
- endif (EXISTS "${HDF5_SOURCE_DIR}/testpar" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/testpar")
- endif (H5_HAVE_PARALLEL)
- endif (NOT HDF5_EXTERNALLY_CONFIGURED)
-endif (BUILD_TESTING)
+ endif ()
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to build HDF5 Tools
@@ -811,8 +814,8 @@ if (EXISTS "${HDF5_SOURCE_DIR}/tools" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/tools
option (HDF5_BUILD_TOOLS "Build HDF5 Tools" ON)
if (HDF5_BUILD_TOOLS)
add_subdirectory (${HDF5_SOURCE_DIR}/tools ${PROJECT_BINARY_DIR}/tools)
- endif (HDF5_BUILD_TOOLS)
-endif (EXISTS "${HDF5_SOURCE_DIR}/tools" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/tools")
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to build examples
@@ -821,8 +824,8 @@ if (EXISTS "${HDF5_SOURCE_DIR}/examples" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/ex
option (HDF5_BUILD_EXAMPLES "Build HDF5 Library Examples" ON)
if (HDF5_BUILD_EXAMPLES)
add_subdirectory (${HDF5_SOURCE_DIR}/examples ${PROJECT_BINARY_DIR}/examples)
- endif (HDF5_BUILD_EXAMPLES)
-endif (EXISTS "${HDF5_SOURCE_DIR}/examples" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/examples")
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to build High Level API's
@@ -832,8 +835,8 @@ if (EXISTS "${HDF5_SOURCE_DIR}/hl" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/hl")
if (HDF5_BUILD_HL_LIB)
set (H5_INCLUDE_HL 1)
add_subdirectory (${HDF5_SOURCE_DIR}/hl ${PROJECT_BINARY_DIR}/hl)
- endif (HDF5_BUILD_HL_LIB)
-endif (EXISTS "${HDF5_SOURCE_DIR}/hl" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/hl")
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to build Fortran bindings/tests/examples
@@ -848,13 +851,18 @@ if (EXISTS "${HDF5_SOURCE_DIR}/fortran" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/for
option (SKIP_HDF5_FORTRAN_SHARED "Do not build the fortran shared libraries" OFF)
if (HDF5_BUILD_FORTRAN)
if (BUILD_SHARED_LIBS AND APPLE)
+ # Tell cmake to do the right thing with COMMON symbols, this fixes
+ # corrupt values with COMMON and EQUIVALENCE when building shared
+ # Fortran libraries on OSX with gnu and Intel compilers (HDFFV-2772).
+ # *** NOTE: Fix does not work with Cmake. ***
+ # set (HDF5_LINKER_FLAGS "-Wl,-commons,use_dylibs")
if (NOT ALLOW_UNSUPPORTED)
message (STATUS " **** Shared FORTRAN libraries are unsupported **** ")
set (SKIP_HDF5_FORTRAN_SHARED ON)
- else (NOT ALLOW_UNSUPPORTED)
+ else ()
message (STATUS " **** Allowing unsupported Fortran shared libraries **** ")
- endif (NOT ALLOW_UNSUPPORTED)
- endif (BUILD_SHARED_LIBS AND APPLE)
+ endif ()
+ endif ()
option (HDF5_ENABLE_F2003 "Enable FORTRAN 2003 Standard" ON)
include (${HDF_RESOURCES_EXT_DIR}/HDFUseFortran.cmake)
@@ -871,8 +879,17 @@ if (EXISTS "${HDF5_SOURCE_DIR}/fortran" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/for
if (HDF5_ENABLE_F2003)
if (NOT FORTRAN_HAVE_ISO_C_BINDING)
set (HDF5_ENABLE_F2003 OFF)
- endif (NOT FORTRAN_HAVE_ISO_C_BINDING)
- endif (HDF5_ENABLE_F2003)
+ endif ()
+ endif ()
+
+ # Parallel IO usage requires MPI to be Linked and Included
+ if (H5_HAVE_PARALLEL)
+ set (LINK_Fortran_LIBS ${LINK_LIBS} ${MPI_Fortran_LIBRARIES})
+ set (LINK_Fortran_SHARED_LIBS ${LINK_SHARED_LIBS} ${MPI_Fortran_LIBRARIES})
+ if (MPI_Fortran_LINK_FLAGS)
+ set (CMAKE_Fortran_EXE_LINKER_FLAGS "${MPI_Fortran_LINK_FLAGS} ${CMAKE_EXE_LINKER_FLAGS}")
+ endif ()
+ endif ()
# -----------------------------------------------------------------------
# wrapper script variables
@@ -884,10 +901,10 @@ if (EXISTS "${HDF5_SOURCE_DIR}/fortran" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/for
if (EXISTS "${HDF5_SOURCE_DIR}/hl/fortran" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/hl/fortran")
#-- Build the High Level Fortran source codes
add_subdirectory (${HDF5_SOURCE_DIR}/hl/fortran ${PROJECT_BINARY_DIR}/hl/fortran)
- endif (EXISTS "${HDF5_SOURCE_DIR}/hl/fortran" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/hl/fortran")
- endif (HDF5_BUILD_HL_LIB)
- endif (HDF5_BUILD_FORTRAN)
-endif (EXISTS "${HDF5_SOURCE_DIR}/fortran" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/fortran")
+ endif ()
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to build HDF5 C++ Library
@@ -899,22 +916,22 @@ if (EXISTS "${HDF5_SOURCE_DIR}/c++" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/c++")
if (HDF5_ENABLE_PARALLEL)
if (NOT ALLOW_UNSUPPORTED)
message (FATAL_ERROR " **** Parallel and C++ options are mutually exclusive **** ")
- else (NOT ALLOW_UNSUPPORTED)
+ else ()
message (STATUS " **** Allowing unsupported Parallel and C++ options **** ")
- endif (NOT ALLOW_UNSUPPORTED)
- endif (HDF5_ENABLE_PARALLEL)
+ endif ()
+ endif ()
if (CMAKE_NO_STD_NAMESPACE)
set (H5_NO_STD 1)
- endif (CMAKE_NO_STD_NAMESPACE)
+ endif ()
add_subdirectory (${HDF5_SOURCE_DIR}/c++ ${PROJECT_BINARY_DIR}/c++)
if (HDF5_BUILD_HL_LIB)
if (EXISTS "${HDF5_SOURCE_DIR}/hl/c++" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/hl/c++")
#-- Build the High Level Fortran source codes
add_subdirectory (${HDF5_SOURCE_DIR}/hl/c++ ${PROJECT_BINARY_DIR}/hl/c++)
- endif (EXISTS "${HDF5_SOURCE_DIR}/hl/c++" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/hl/c++")
- endif (HDF5_BUILD_HL_LIB)
- endif (HDF5_BUILD_CPP_LIB)
-endif (EXISTS "${HDF5_SOURCE_DIR}/c++" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/c++")
+ endif ()
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Check if Fortran's default real is double precision. If it is and HL is
@@ -923,8 +940,8 @@ endif (EXISTS "${HDF5_SOURCE_DIR}/c++" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/c++"
if (HDF5_BUILD_FORTRAN AND HDF5_BUILD_HL_LIB)
if (NOT FORTRAN_DEFAULT_REAL_NOT_DOUBLE)
message (FATAL_ERROR " **** Fortran high-level routines are not supported when the default REAL is DOUBLE PRECISION, use HDF5_BUILD_HL_LIB:BOOL=OFF **** ")
- endif (NOT FORTRAN_DEFAULT_REAL_NOT_DOUBLE)
-endif (HDF5_BUILD_FORTRAN AND HDF5_BUILD_HL_LIB)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to build HDF5 Java Library
@@ -933,8 +950,8 @@ if (EXISTS "${HDF5_SOURCE_DIR}/java" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/java")
option (HDF5_BUILD_JAVA "Build Java HDF5 Library" OFF)
if (HDF5_BUILD_JAVA)
add_subdirectory (${HDF5_SOURCE_DIR}/java ${PROJECT_BINARY_DIR}/java)
- endif (HDF5_BUILD_JAVA)
-endif (EXISTS "${HDF5_SOURCE_DIR}/java" AND IS_DIRECTORY "${HDF5_SOURCE_DIR}/java")
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Generate the H5pubconf.h file containing user settings needed by compilation
diff --git a/COPYING b/COPYING
index 2166ced..51cb4e6 100644
--- a/COPYING
+++ b/COPYING
@@ -1,13 +1,12 @@
-
Copyright Notice and License Terms for
HDF5 (Hierarchical Data Format 5) Software Library and Utilities
-----------------------------------------------------------------------------
HDF5 (Hierarchical Data Format 5) Software Library and Utilities
-Copyright 2006-2016 by The HDF Group.
+Copyright (c) 2006, The HDF Group.
NCSA HDF5 (Hierarchical Data Format 5) Software Library and Utilities
-Copyright 1998-2006 by the Board of Trustees of the University of Illinois.
+Copyright (c) 1998-2006, The Board of Trustees of the University of Illinois.
All rights reserved.
@@ -47,13 +46,21 @@ advised of the possibility of such damage.
-----------------------------------------------------------------------------
-----------------------------------------------------------------------------
+Limited portions of HDF5 were developed by Lawrence Berkeley National
+Laboratory (LBNL). LBNL's Copyright Notice and Licensing Terms can be
+found here: COPYING_LBNL_HDF5 file in this directory or at
+http://support.hdfgroup.org/ftp/HDF5/releases/COPYING_LBNL_HDF5.
+
+-----------------------------------------------------------------------------
+-----------------------------------------------------------------------------
+
Contributors: National Center for Supercomputing Applications (NCSA) at
the University of Illinois, Fortner Software, Unidata Program Center (netCDF),
The Independent JPEG Group (JPEG), Jean-loup Gailly and Mark Adler (gzip),
and Digital Equipment Corporation (DEC).
-----------------------------------------------------------------------------
-
+
Portions of HDF5 were developed with support from the Lawrence Berkeley
National Laboratory (LBNL) and the United States Department of Energy
under Prime Contract No. DE-AC02-05CH11231.
diff --git a/COPYING_LBNL_HDF5 b/COPYING_LBNL_HDF5
new file mode 100644
index 0000000..16fba5d
--- /dev/null
+++ b/COPYING_LBNL_HDF5
@@ -0,0 +1,49 @@
+Copyright Notice and License Terms for
+HDF5 (Hierarchical Data Format 5) Software Library and Utilities
+-----------------------------------------------------------------------------
+
+HDF5 (Hierarchical Data Format 5)
+Copyright (c) 2016, The Regents of the University of California, through
+Lawrence Berkeley National Laboratory (subject to receipt of any required
+approvals from the U.S. Dept. of Energy).
+
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions, and the following disclaimer.
+
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions, and the following disclaimer in the documentation
+ and/or materials provided with the distribution.
+
+3. Neither the name of the University of California, Lawrence Berkeley
+National Laboratory, U.S. Dept. of Energy nor the names of its contributors
+may be used to endorse or promote products derived from this software without
+specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
+THE POSSIBILITY OF SUCH DAMAGE.
+
+You are under no obligation whatsoever to provide any bug fixes, patches,
+or upgrades to the features, functionality or performance of the source
+code ("Enhancements") to anyone; however, if you choose to make your
+Enhancements available either publicly, or directly to Lawrence Berkeley
+National Laboratory, without imposing a separate written license agreement
+for such Enhancements, then you hereby grant the following license:
+a non-exclusive, royalty-free perpetual license to install, use, modify,
+prepare derivative works, incorporate into other computer software,
+distribute, and sublicense such enhancements or derivative works thereof,
+in binary and source code form.
+
diff --git a/CTestConfig.cmake b/CTestConfig.cmake
index 849d218..8a20eb4 100644
--- a/CTestConfig.cmake
+++ b/CTestConfig.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
## This file should be placed in the root directory of your project.
## Then modify the CMakeLists.txt file in the root directory of your
## project to incorporate the testing dashboard.
@@ -11,10 +22,10 @@ set (CTEST_DROP_METHOD "http")
if (CDASH_LOCAL)
set (CTEST_DROP_SITE "cdash-internal.hdfgroup.org")
set (CTEST_DROP_LOCATION "/submit.php?project=HDF5Trunk")
-else (CDASH_LOCAL)
+else ()
set (CTEST_DROP_SITE "cdash.hdfgroup.org")
set (CTEST_DROP_LOCATION "/submit.php?project=HDF5+Trunk")
-endif (CDASH_LOCAL)
+endif ()
set (CTEST_DROP_SITE_CDASH TRUE)
set (UPDATE_TYPE git)
diff --git a/MANIFEST b/MANIFEST
index 3141519..c34e614 100644
--- a/MANIFEST
+++ b/MANIFEST
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#------------------------------------------------------------------------------
# This is the list of files that are part of HDF5 source distribution.
@@ -26,6 +24,7 @@
./.h5chkright.ini _DO_NOT_DISTRIBUTE_
./ACKNOWLEDGMENTS
./COPYING
+./COPYING_LBNL_HDF5
./MANIFEST
./Makefile.dist
./Makefile.am
@@ -246,7 +245,6 @@
./fortran/src/HDF5.F90
./fortran/src/Makefile.am
./fortran/src/README
-./fortran/src/README_DEVELOPEMENT _DO_NOT_DISTRIBUTE_
./fortran/src/h5fc.in
./fortran/src/hdf5_fortrandll.def.in
@@ -374,6 +372,8 @@
./c++/src/H5Include.h
./c++/src/H5IntType.cpp
./c++/src/H5IntType.h
+./c++/src/H5LaccProp.cpp
+./c++/src/H5LaccProp.h
./c++/src/H5Library.cpp
./c++/src/H5Library.h
./c++/src/H5Location.cpp
@@ -431,7 +431,9 @@
./release_docs/COPYING
./release_docs/HISTORY-1_0-1_8_0_rc3.txt
-./release_docs/HISTORY-1_9.txt
+./release_docs/HISTORY-1_8.txt
+./release_docs/HISTORY-1_10.txt
+./release_docs/HISTORY-1_8_0-1_10_0.txt
./release_docs/INSTALL
./release_docs/INSTALL_CMake.txt
./release_docs/INSTALL_Cygwin.txt
@@ -502,10 +504,12 @@
./src/H5C.c
./src/H5Cdbg.c
./src/H5Cepoch.c
+./src/H5Cimage.c
./src/H5Clog.c
./src/H5Cmodule.h
./src/H5Cmpio.c
./src/H5Cpkg.h
+./src/H5Cprefetched.c
./src/H5Cprivate.h
./src/H5Cpublic.h
./src/H5Cquery.c
@@ -575,6 +579,7 @@
./src/H5Fmpi.c
./src/H5Fquery.c
./src/H5Fsfile.c
+./src/H5Fspace.c
./src/H5Fsuper.c
./src/H5Fsuper_cache.c
./src/H5Fpkg.h
@@ -735,6 +740,7 @@
./src/H5Obogus.c
./src/H5Obtreek.c
./src/H5Ocache.c
+./src/H5Ocache_image.c
./src/H5Ochunk.c
./src/H5Ocont.c
./src/H5Ocopy.c
@@ -788,8 +794,13 @@
./src/H5Ppublic.h
./src/H5Pstrcpl.c
./src/H5Ptest.c
+./src/H5PB.c
+./src/H5PBmodule.h
+./src/H5PBpkg.h
+./src/H5PBprivate.h
./src/H5PL.c
./src/H5PLmodule.h
+./src/H5PLpkg.h
./src/H5PLprivate.h
./src/H5PLpublic.h
./src/H5PLextern.h
@@ -879,13 +890,21 @@
./src/libhdf5.settings.in
./src/H5win32defs.h
+./test/AtomicWriterReader.txt
./test/COPYING
./test/H5srcdir.h
./test/H5srcdir_str.h.in
./test/Makefile.am
+./test/POSIX_Order_Write_Test_Report.docx
+./test/POSIX_Order_Write_Test_Report.pdf
+./test/SWMR_POSIX_Order_UG.txt
+./test/SWMR_UseCase_UG.txt
./test/accum.c
./test/accum_swmr_reader.c
+./test/aggr.h5
./test/app_ref.c
+./test/atomic_reader.c
+./test/atomic_writer.c
./test/bad_compound.h5
./test/be_data.h5
./test/be_extlink1.h5
@@ -899,6 +918,7 @@
./test/cache_api.c
./test/cache_common.c
./test/cache_common.h
+./test/cache_image.c
./test/cache_logging.c
./test/cache_tagging.c
./test/cmpd_dset.c
@@ -943,6 +963,11 @@
./test/filter_fail.c
./test/flush1.c
./test/flush2.c
+./test/flushrefresh.c
+./test/fsm_aggr_nopersist.h5
+./test/fsm_aggr_persist.h5
+./test/genall5.c
+./test/genall5.h
./test/gen_bad_ohdr.c
./test/gen_bad_compound.c
./test/gen_bogus.c
@@ -969,6 +994,12 @@
./test/getname.c
./test/gheap.c
./test/group_old.h5
+./test/h5fc_ext1_f.h5
+./test/h5fc_ext1_i.h5
+./test/h5fc_ext2_if.h5
+./test/h5fc_ext2_sf.h5
+./test/h5fc_ext3_isf.h5
+./test/h5fc_ext_none.h5
./test/h5test.c
./test/h5test.h
./test/hyperslab.c
@@ -985,9 +1016,13 @@
./test/multi_file_v16-r.h5
./test/multi_file_v16-s.h5
./test/noencoder.h5
+./test/none.h5
./test/ntypes.c
./test/ohdr.c
./test/objcopy.c
+./test/page_buffer.c
+./test/paged_nopersist.h5
+./test/paged_persist.h5
./test/plugin.c
./test/reserved.c
./test/pool.c
@@ -998,6 +1033,18 @@
./test/specmetaread.h5
./test/stab.c
./test/swmr.c
+./test/swmr_addrem_writer.c
+./test/swmr_check_compat_vfd.c
+./test/swmr_common.c
+./test/swmr_common.h
+./test/swmr_generator.c
+./test/swmr_reader.c
+./test/swmr_remove_reader.c
+./test/swmr_remove_writer.c
+./test/swmr_sparse_reader.c
+./test/swmr_sparse_writer.c
+./test/swmr_start_write.c
+./test/swmr_writer.c
./test/tarray.c
./test/tarrold.h5
./test/tattr.c
@@ -1010,12 +1057,16 @@
./test/testcheck_version.sh.in
./test/testerror.sh.in
./test/testlinks_env.sh.in
+./test/testflushrefresh.sh.in
./test/testframe.c
./test/testhdf5.c
./test/testhdf5.h
./test/testlibinfo.sh.in
./test/test_plugin.sh.in
+./test/test_usecases.sh.in
./test/testmeta.c
+./test/testswmr.sh.in
+./test/testvdsswmr.sh.in
./test/tfile.c
./test/tgenprop.c
./test/th5o.c
@@ -1046,14 +1097,24 @@
./test/tunicode.c
./test/tvlstr.c
./test/tvltypes.c
+./test/twriteorder.c
./test/unlink.c
./test/unregister.c
+./test/use_append_chunk.c
+./test/use_append_mchunks.c
+./test/use_common.c
+./test/use_disable_mdc_flushes.c
+./test/use.h
./test/vfd.c
./test/test_filters_le.h5
./test/test_filters_be.h5
./test/gen_filters.c
./test/chunk_info.c
./test/vds.c
+./test/vds_swmr.h
+./test/vds_swmr_gen.c
+./test/vds_swmr_reader.c
+./test/vds_swmr_writer.c
./test/testfiles/err_compat_1
./test/testfiles/err_compat_2
@@ -1160,6 +1221,7 @@
./testpar/COPYING
./testpar/Makefile.am
./testpar/t_cache.c
+./testpar/t_cache_image.c
./testpar/t_chunk_alloc.c
./testpar/t_coll_chunk.c
./testpar/t_dset.c
@@ -1196,7 +1258,9 @@
./tools/src/h5dump/h5dump_xml.c
./tools/src/h5dump/h5dump_xml.h
./tools/test/h5dump/Makefile.am
+./tools/test/h5dump/dynlib_dump.c
./tools/test/h5dump/h5dumpgentest.c
+./tools/test/h5dump/h5dump_plugin.sh.in
./tools/test/h5dump/testh5dump.sh.in
./tools/test/h5dump/testh5dumppbits.sh.in
./tools/test/h5dump/testh5dumpxml.sh.in
@@ -1268,7 +1332,9 @@
./tools/src/h5diff/h5diff_main.c
./tools/src/h5diff/ph5diff_main.c
./tools/test/h5diff/Makefile.am
+./tools/test/h5diff/dynlib_diff.c
./tools/test/h5diff/h5diffgentest.c
+./tools/test/h5diff/h5diff_plugin.sh.in
./tools/test/h5diff/testh5diff.sh.in
./tools/test/h5diff/testph5diff.sh.in
@@ -1349,6 +1415,8 @@
./tools/src/h5ls/Makefile.am
./tools/src/h5ls/h5ls.c
./tools/test/h5ls/Makefile.am
+./tools/test/h5ls/dynlib_ls.c
+./tools/test/h5ls/h5ls_plugin.sh.in
./tools/test/h5ls/testh5ls.sh.in
./tools/test/h5ls/testh5lsvds.sh.in
@@ -1399,6 +1467,18 @@
./tools/test/misc/testh5mkgrp.sh.in
./tools/test/misc/testh5repart.sh.in
./tools/test/misc/talign.c
+./tools/test/misc/testfiles/h5clear_missing_file.ddl
+./tools/test/misc/testfiles/h5clear_no_mdc_image.ddl
+./tools/test/misc/testfiles/h5clear_open_fail.ddl
+./tools/test/misc/testfiles/h5clear_usage.ddl
+./tools/test/misc/testfiles/h5clear_log_v3.h5
+./tools/test/misc/testfiles/h5clear_mdc_image.h5
+./tools/test/misc/testfiles/h5clear_sec2_v0.h5
+./tools/test/misc/testfiles/h5clear_sec2_v2.h5
+./tools/test/misc/testfiles/h5clear_sec2_v3.h5
+./tools/test/misc/testfiles/latest_h5clear_log_v3.h5
+./tools/test/misc/testfiles/latest_h5clear_sec2_v3.h5
+./tools/test/misc/testfiles/mod_h5clear_mdc_image.h5
./tools/test/misc/testfiles/h5mkgrp_help.txt
./tools/test/misc/testfiles/h5mkgrp_version.txt.in
./tools/test/misc/h5perf_gentest.c
@@ -1549,7 +1629,8 @@
./tools/testfiles/tbinregR.exp
./tools/testfiles/tbinregR.ddl
./tools/testfiles/tbitfields.h5
-./tools/testfiles/tbitnopaque.ddl
+./tools/testfiles/tbitnopaque_be.ddl
+./tools/testfiles/tbitnopaque_le.ddl
./tools/testfiles/tbitnopaque.h5
./tools/testfiles/tboot1.ddl
./tools/testfiles/tboot2.ddl
@@ -1588,7 +1669,6 @@
./tools/testfiles/tdset.h5
./tools/testfiles/tdset2.h5
./tools/testfiles/tdset_idx.ls
-./tools/testfiles/tdset_idx.ddl
./tools/testfiles/tdset_idx.h5
./tools/testfiles/tempty.ddl
./tools/testfiles/tempty.h5
@@ -1733,6 +1813,9 @@
./tools/testfiles/tstring2.ddl
./tools/testfiles/tstringe.ddl
./tools/testfiles/tszip.ddl
+./tools/testfiles/tudfilter.ddl
+./tools/testfiles/tudfilter.h5
+./tools/testfiles/tudfilter.ls
./tools/testfiles/tudlink.h5
./tools/testfiles/tudlink.h5.xml
./tools/testfiles/tudlink-1.ddl
@@ -1997,7 +2080,8 @@
./tools/testfiles/tarray6.h5.xml
./tools/testfiles/tarray7.h5.xml
./tools/testfiles/tattr.h5.xml
-./tools/testfiles/tbitfields.h5.xml
+./tools/testfiles/tbitfields_be.h5.xml
+./tools/testfiles/tbitfields_le.h5.xml
./tools/testfiles/tcompound.h5.xml
./tools/testfiles/tcompound2.h5.xml
./tools/testfiles/tcompound_complex.h5.xml
@@ -2103,6 +2187,10 @@
./tools/test/h5diff/testfiles/h5diff_57.txt
./tools/test/h5diff/testfiles/h5diff_58.txt
./tools/test/h5diff/testfiles/h5diff_59.txt
+./tools/test/h5diff/testfiles/h5diff_60.txt
+./tools/test/h5diff/testfiles/h5diff_61.txt
+./tools/test/h5diff/testfiles/h5diff_62.txt
+./tools/test/h5diff/testfiles/h5diff_63.txt
./tools/test/h5diff/testfiles/h5diff_600.txt
./tools/test/h5diff/testfiles/h5diff_601.txt
./tools/test/h5diff/testfiles/h5diff_603.txt
@@ -2152,6 +2240,8 @@
./tools/test/h5diff/testfiles/h5diff_709.txt
./tools/test/h5diff/testfiles/h5diff_710.txt
./tools/test/h5diff/testfiles/h5diff_80.txt
+./tools/test/h5diff/testfiles/h5diff_800.txt
+./tools/test/h5diff/testfiles/h5diff_801.txt
./tools/test/h5diff/testfiles/h5diff_90.txt
./tools/test/h5diff/testfiles/h5diff_100.txt
./tools/test/h5diff/testfiles/h5diff_101.txt
@@ -2214,7 +2304,6 @@
./tools/test/h5diff/testfiles/h5diff_457.txt
./tools/test/h5diff/testfiles/h5diff_458.txt
./tools/test/h5diff/testfiles/h5diff_459.txt
-./tools/test/h5diff/testfiles/h5diff_idx.txt
./tools/test/h5diff/testfiles/h5diff_465.txt
./tools/test/h5diff/testfiles/h5diff_466.txt
./tools/test/h5diff/testfiles/h5diff_467.txt
@@ -2263,8 +2352,7 @@
./tools/test/h5diff/testfiles/h5diff_basic2.h5
./tools/test/h5diff/testfiles/h5diff_dset1.h5
./tools/test/h5diff/testfiles/h5diff_dset2.h5
-./tools/test/h5diff/testfiles/h5diff_dset_idx1.h5
-./tools/test/h5diff/testfiles/h5diff_dset_idx2.h5
+./tools/test/h5diff/testfiles/h5diff_dset3.h5
./tools/test/h5diff/testfiles/h5diff_dtypes.h5
./tools/test/h5diff/testfiles/h5diff_empty.h5
./tools/test/h5diff/testfiles/h5diff_hyper1.h5
@@ -2305,6 +2393,12 @@
./tools/test/h5diff/testfiles/tmptest.he5
./tools/test/h5diff/testfiles/h5diff_tmp2.txt
./tools/test/h5diff/testfiles/tmpSingleSiteBethe.output.h5
+./tools/test/h5diff/testfiles/tudfilter.h5
+./tools/test/h5diff/testfiles/tudfilter2.h5
+./tools/test/h5diff/testfiles/h5diff_ud.txt
+./tools/test/h5diff/testfiles/h5diff_udfail.txt
+./tools/test/h5diff/testfiles/diff_strings1.h5
+./tools/test/h5diff/testfiles/diff_strings2.h5
#vds
./tools/test/h5diff/testfiles/h5diff_v1.txt
./tools/test/h5diff/testfiles/h5diff_v2.txt
@@ -2338,6 +2432,7 @@
./tools/test/h5repack/testfiles/h5repack_soffset.h5
./tools/test/h5repack/testfiles/h5repack_szip.h5
./tools/test/h5repack/testfiles/ublock.bin
+./tools/test/h5repack/testfiles/crtorder.tordergr.h5.ddl
./tools/test/h5repack/testfiles/deflate_limit.h5repack_layout.h5.ddl
./tools/test/h5repack/testfiles/plugin_none.h5repack_layout.UD.h5.tst
./tools/test/h5repack/testfiles/plugin_test.h5repack_layout.h5.tst
@@ -2962,7 +3057,6 @@
./config/cmake_ext_mod/HDFUseFortran.cmake
./config/cmake_ext_mod/NSIS.InstallOptions.ini.in
./config/cmake_ext_mod/NSIS.template.in
-./config/cmake_ext_mod/prunTest.cmake
./config/cmake_ext_mod/runTest.cmake
./config/cmake_ext_mod/version.plist.in
@@ -3062,14 +3156,19 @@
./tools/test/h5stat/CMakeTests.cmake
./tools/src/misc/CMakeLists.txt
./tools/test/misc/CMakeLists.txt
-./tools/test/misc/CMakeTests.cmake
+./tools/test/misc/CMakeTestsClear.cmake
+./tools/test/misc/CMakeTestsMkgrp.cmake
+./tools/test/misc/CMakeTestsRepart.cmake
./tools/test/misc/vds/CMakeLists.txt
./tools/test/perform/CMakeLists.txt
./tools/test/perform/CMakeTests.cmake
# CMake-specific User Scripts
+./config/cmake/CTestScript.cmake
+./config/cmake/HDF5_Examples_options.cmake
./config/cmake/scripts/CTestScript.cmake
./config/cmake/scripts/HDF5config.cmake
+./config/cmake/scripts/HDF5options.cmake
# Files generated by autogen
./aclocal.m4
diff --git a/Makefile.am b/Makefile.am
index bbab346..e3e5e3d 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/Makefile.dist b/Makefile.dist
index e38af21..9edb476 100644
--- a/Makefile.dist
+++ b/Makefile.dist
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This Makefile is a stub (copied from Makefile.dist) which will run
# configure and then invoke the same target in the new Makefile created
diff --git a/README.txt b/README.txt
index e51a486..6820267 100644
--- a/README.txt
+++ b/README.txt
@@ -1,4 +1,4 @@
-HDF5 version 1.9.236 currently under development
+HDF5 version 1.11.0 currently under development
Please refer to the release_docs/INSTALL file for installation instructions.
------------------------------------------------------------------------------
diff --git a/UserMacros.cmake b/UserMacros.cmake
index 65ea5d4..c578c91 100644
--- a/UserMacros.cmake
+++ b/UserMacros.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
########################################################
# Include file for user options
########################################################
@@ -7,16 +18,15 @@
#-----------------------------------------------------------------------------
# Option to Build with User Defined Values
#-----------------------------------------------------------------------------
-MACRO (MACRO_USER_DEFINED_LIBS)
+macro (MACRO_USER_DEFINED_LIBS)
set (USER_DEFINED_VALUE "FALSE")
-ENDMACRO (MACRO_USER_DEFINED_LIBS)
+endmacro ()
#-------------------------------------------------------------------------------
option (BUILD_USER_DEFINED_LIBS "Build With User Defined Values" OFF)
if (BUILD_USER_DEFINED_LIBS)
MACRO_USER_DEFINED_LIBS ()
-endif (BUILD_USER_DEFINED_LIBS)
+endif ()
#-----------------------------------------------------------------------------
#------------------- E X A M P L E E N D -----------------------------------
#-----------------------------------------------------------------------------
- \ No newline at end of file
diff --git a/acsite.m4 b/acsite.m4
index b144962..9d04d1a 100644
--- a/acsite.m4
+++ b/acsite.m4
@@ -1,17 +1,16 @@
dnl -------------------------------------------------------------------------
dnl -------------------------------------------------------------------------
dnl
+dnl Copyright by The HDF Group.
dnl Copyright by the Board of Trustees of the University of Illinois.
dnl All rights reserved.
dnl
dnl This file is part of HDF5. The full HDF5 copyright notice, including
dnl terms governing use, modification, and redistribution, is contained in
-dnl the files COPYING and Copyright.html. COPYING can be found at the root
-dnl of the source code distribution tree; Copyright.html can be found at the
-dnl root level of an installed copy of the electronic HDF5 document set and
-dnl is linked from the top-level documents page. It can also be found at
-dnl http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have
-dnl access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu.
+dnl the COPYING file, which can be found at the root of the source code
+dnl dnl distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+dnl dnl If you do not have access to either file, you may request a copy from
+dnl dnl help@hdfgroup.org.
dnl
dnl Macros for HDF5 Fortran
dnl
diff --git a/autogen.sh b/autogen.sh
index f3bd774..3d33c06 100755
--- a/autogen.sh
+++ b/autogen.sh
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic document set and is
-# linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access
-# to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# A script to reconfigure autotools for HDF5, and to recreate other
@@ -205,6 +203,39 @@ Darwin*)
;;
esac
+# Run scripts that process source.
+#
+# These should be run before the autotools so that failures here block
+# compilation.
+
+# Run trace script
+# The trace script adds H5TRACE macros to library source files. It should
+# have no effect on files that don't have HDF5 API macros in them.
+echo "Running trace script:"
+bin/trace src/H5*.c || exit 1
+echo
+
+# Run make_err
+# make_err automatically generates the H5E headers that create error message
+# types for HDF5.
+echo "Running error generation script:"
+bin/make_err src/H5err.txt || exit 1
+echo
+
+# Run make_vers
+# make_vers automatically generates the public headers that define the API version
+# macros for HDF5.
+echo "Running API version generation script:"
+bin/make_vers src/H5vers.txt || exit 1
+echo
+
+# Run make_overflow
+# make_overflow automatically generates macros for detecting overflows for type
+# conversion.
+echo "Running overflow macro generation script:"
+bin/make_overflow src/H5overflow.txt || exit 1
+echo
+
# Run autotools in order
#
# When available, we use the --force option to ensure all files are
@@ -267,35 +298,7 @@ fi
${autoconf_cmd} || exit 1
echo
-# Run scripts that process source.
-
-# Run trace script
-# The trace script adds H5TRACE macros to library source files. It should
-# have no effect on files that don't have HDF5 API macros in them.
-echo
-echo "Running trace script:"
-bin/trace src/H5*.c || exit 1
-
-# Run make_err
-# make_err automatically generates the H5E headers that create error message
-# types for HDF5.
-echo
-echo "Running error generation script:"
-bin/make_err src/H5err.txt || exit 1
-
-# Run make_vers
-# make_vers automatically generates the public headers that define the API version
-# macros for HDF5.
-echo
-echo "Running API version generation script:"
-bin/make_vers src/H5vers.txt || exit 1
-
-# Run make_overflow
-# make_overflow automatically generates macros for detecting overflows for type
-# conversion.
-echo
-echo "Running overflow macro generation script:"
-bin/make_overflow src/H5overflow.txt || exit 1
+echo "*** SUCCESS ***"
echo
exit 0
diff --git a/bin/COPYING b/bin/COPYING
index 6903daf..6497ace 100755
--- a/bin/COPYING
+++ b/bin/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/bin/bbrelease b/bin/bbrelease
index b06b243..395b23c 100755
--- a/bin/bbrelease
+++ b/bin/bbrelease
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Make a release of hdf5.
diff --git a/bin/buildhdf5 b/bin/buildhdf5
index 3a4b0d3..064000a 100755
--- a/bin/buildhdf5
+++ b/bin/buildhdf5
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Build HDF5 library by doing configure, make, and tests.
# Usage: See USAGE()
diff --git a/bin/checkapi b/bin/checkapi
index b4a08e8..6882dea 100755
--- a/bin/checkapi
+++ b/bin/checkapi
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
require 5.003;
diff --git a/bin/checkposix b/bin/checkposix
index db47ff3..7ab741c 100755
--- a/bin/checkposix
+++ b/bin/checkposix
@@ -8,12 +8,10 @@ require 5.003;
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Robb Matzke, matzke@llnl.gov
# 30 Aug 1997
diff --git a/bin/chkconfigure b/bin/chkconfigure
index abafae8..d03f421 100755
--- a/bin/chkconfigure
+++ b/bin/chkconfigure
@@ -5,14 +5,12 @@
##
## This file is part of HDF5. The full HDF5 copyright notice, including
## terms governing use, modification, and redistribution, is contained in
-## the files COPYING and Copyright.html. COPYING can be found at the root
-## of the source code distribution tree; Copyright.html can be found at the
-## root level of an installed copy of the electronic HDF5 document set and
-## is linked from the top-level documents page. It can also be found at
-## http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have
-## access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu.
+## the COPYING file, which can be found at the root of the source code
+## distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+## If you do not have access to either file, you may request a copy from
+## help@hdfgroup.org.
##
-# Check that all the configure files are properly genernated.
+# Check that all the configure files are properly generated.
#
# Programmer: Albert Cheng
# Created Date: 2004/12/07
diff --git a/bin/chkcopyright b/bin/chkcopyright
index f6e910d..d67e030 100755
--- a/bin/chkcopyright
+++ b/bin/chkcopyright
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Check Copyright notice.
diff --git a/bin/chkmanifest b/bin/chkmanifest
index a01217d..95eb8f0 100755
--- a/bin/chkmanifest
+++ b/bin/chkmanifest
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Check that all the files in MANIFEST exist and (if this is a
diff --git a/bin/debug-ohdr b/bin/debug-ohdr
index 170919a..5b0a4b3 100755
--- a/bin/debug-ohdr
+++ b/bin/debug-ohdr
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
require 5.003;
diff --git a/bin/dependencies b/bin/dependencies
index a82a3ee..82247da 100755
--- a/bin/dependencies
+++ b/bin/dependencies
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
my $depend_file;
my $new_depend_file;
diff --git a/bin/deploy b/bin/deploy
index ef30d21..73f4b25 100755
--- a/bin/deploy
+++ b/bin/deploy
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Deploy the HDF5 binary.
#
diff --git a/bin/distdep b/bin/distdep
index 0a22bd1..4643700 100755
--- a/bin/distdep
+++ b/bin/distdep
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Usage: $0 [<].depend
diff --git a/bin/errors b/bin/errors
index 6ce997b..3c99fdc 100755
--- a/bin/errors
+++ b/bin/errors
@@ -11,12 +11,10 @@ use Text::Tabs;
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Robb Matzke, matzke@llnl.gov
# 30 Aug 1997
diff --git a/bin/gcov_script b/bin/gcov_script
index ac82189..9a6512d 100755
--- a/bin/gcov_script
+++ b/bin/gcov_script
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# A script to generate coverage files for HDF5 using gcov.
# Configures, builds, and runs tests in-place; the output files will be placed
diff --git a/bin/genparser b/bin/genparser
index 71a8ae2..8cf6ec2 100755
--- a/bin/genparser
+++ b/bin/genparser
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic document set and is
-# linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access
-# to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This script runs flex/lex and bison/yacc to generate parser code for
diff --git a/bin/h5vers b/bin/h5vers
index 9babf97..5228903 100755
--- a/bin/h5vers
+++ b/bin/h5vers
@@ -12,12 +12,10 @@ use strict;
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Robb Matzke <matzke@llnl.gov>
# 17 July 1998
@@ -127,7 +125,7 @@ EOF
exit 1;
}
-# Parse arguments
+
my ($verbose, $set, $inc, $file, $rc);
my (@files) = ("H5public.h", "src/H5public.h", "../src/H5public.h");
while ($_ = shift) {
@@ -160,9 +158,10 @@ die "mutually exclusive options given\n" if $set && $inc;
# Determine file to use as H5public.h, README.txt,
# release_docs/RELEASE.txt, configure.ac, windows/src/H5pubconf.h
-# and config/lt_vers.am.
+# config/lt_vers.am and config/cmake/scripts/HDF5config.cmake.
# The README.txt, release_docs/RELEASE.txt, configure.ac,
-# windows/src/H5pubconf.h, and config/lt_vers.am
+# windows/src/H5pubconf.h, config/lt_vers.am and
+# config/cmake/scripts/HDF5config.cmake
# files are always in the directory above H5public.h
unless ($file) {
for (@files) {
@@ -175,6 +174,10 @@ die "unable to read file: $file\n" unless -r $file;
my $LT_VERS = $file;
$LT_VERS =~ s/[^\/]*$/..\/config\/lt_vers.am/;
die "unable to read file: $LT_VERS\n" unless -r $file;
+# config/cmake/scripts/HDF5config.cmake
+my $HDF5CONFIGCMAKE = $file;
+$HDF5CONFIGCMAKE =~ s/[^\/]*$/..\/config\/cmake\/scripts\/HDF5config.cmake/;
+die "unable to read file: $HDF5CONFIGCMAKE\n" unless -r $file;
# README.txt
my $README = $file;
$README =~ s/[^\/]*$/..\/README.txt/;
@@ -191,6 +194,15 @@ die "unable to read file: $CONFIGURE\n" unless -r $file;
my $CPP_DOC_CONFIG = $file;
$CPP_DOC_CONFIG =~ s/[^\/]*$/..\/c++\/src\/cpp_doc_config/;
die "unable to read file: $CPP_DOC_CONFIG\n" unless -r $file;
+my $H5_JAVA = $file;
+$H5_JAVA =~ s/[^\/]*$/..\/java\/src\/hdf\/hdf5lib\/H5.java/;
+die "unable to read file: $H5_JAVA\n" unless -r $file;
+my $TESTH5_JAVA = $file;
+$TESTH5_JAVA =~ s/[^\/]*$/..\/java\/test\/TestH5.java/;
+die "unable to read file: $TESTH5_JAVA\n" unless -r $file;
+my $REPACK_LAYOUT_PLUGIN_VERSION = $file;
+$REPACK_LAYOUT_PLUGIN_VERSION =~ s/[^\/]*$/..\/tools\/test\/h5repack\/testfiles\/h5repack_layout.h5-plugin_version_test.ddl/;
+die "unable to read file: $REPACK_LAYOUT_PLUGIN_VERSION\n" unless -r $file;
# Get the current version number.
open FILE, $file or die "unable to open $file: $!\n";
@@ -239,7 +251,8 @@ if ($set) {
$RELEASE = "";
$CONFIGURE = "";
$CPP_DOC_CONFIG = "";
- $LT_VERS = "";
+ $LT_VERS = "";
+ $HDF5CONFIGCMAKE = "";
@newver = @curver;
}
@@ -321,20 +334,88 @@ if ($RELEASE) {
# Update the c++/src/cpp_doc_config file
if ($CPP_DOC_CONFIG) {
my $data = read_file($CPP_DOC_CONFIG);
- my $version_string = sprintf("\"%d.%d.%d%s %s\"",
+ my $sub_rel_ver_str = (
+ $newver[3] eq ""
+ ? sprintf("%s", "")
+ : sprintf("%s", "-".$newver[3].", currently under development")
+ );
+ my $version_string = sprintf("\"%d.%d.%d%s%s\"",
@newver[0,1,2],
- $newver[3] eq "" ? "" : "-".$newver[3],
- "currently under development");
-
+ $sub_rel_ver_str);
$data =~ s/PROJECT_NUMBER\s*=.*/PROJECT_NUMBER = $version_string/;
write_file($CPP_DOC_CONFIG, $data);
}
-# helper function to read the file for updating c++/src/cpp_doc_config file.
+# Update the config/cmake/scripts/HDF5config.cmake file
+if ($HDF5CONFIGCMAKE) {
+ my $data = read_file($HDF5CONFIGCMAKE);
+# my $sub_rel_ver_str = "";
+ my $sub_rel_ver_str = (
+ $newver[3] eq ""
+ ? sprintf("\"%s\"", "")
+ : sprintf("\"%s\"", "-".$newver[3].", currently under development")
+ );
+ my $version_string = sprintf("\"%d.%d.%d\"", @newver[0,1,2]);
+
+ $data =~ s/set \(CTEST_SOURCE_VERSION .*\)/set \(CTEST_SOURCE_VERSION $version_string\)/;
+ $data =~ s/set \(CTEST_SOURCE_VERSEXT .*\)/set \(CTEST_SOURCE_VERSEXT $sub_rel_ver_str\)/;
+
+ write_file($HDF5CONFIGCMAKE, $data);
+}
+
+# Update the java/src/hdf/hdf5lib/H5.java file
+if ($H5_JAVA) {
+ my $data = read_file($H5_JAVA);
+# my $sub_rel_ver_str = "";
+ my $sub_rel_ver_str = (
+ $newver[3] eq ""
+ ? sprintf("\"%s\"", "")
+ : sprintf("\"%s\"", "-".$newver[3].", currently under development")
+ );
+ my $version_string1 = sprintf("%d.%d.%d", @newver[0,1,2]);
+ my $version_string2 = sprintf("%d, %d, %d", @newver[0,1,2]);
+
+ $data =~ s/\@version HDF5 .* <BR>/\@version HDF5 $version_string1 <BR>/;
+ $data =~ s/ public final static int LIB_VERSION\[\] = { \d*, \d*, \d* };/ public final static int LIB_VERSION[] = { $version_string2 };/;
+
+ write_file($H5_JAVA, $data);
+}
+
+# Update the java/test/TestH5.java file
+if ($TESTH5_JAVA) {
+ my $data = read_file($TESTH5_JAVA);
+# my $sub_rel_ver_str = "";
+ my $sub_rel_ver_str = (
+ $newver[3] eq ""
+ ? sprintf("\"%s\"", "")
+ : sprintf("\"%s\"", "-".$newver[3].", currently under development")
+ );
+ my $version_string1 = sprintf("%d, %d, %d", @newver[0,1,2]);
+ my $version_string2 = sprintf("int majnum = %d, minnum = %d, relnum = %d", @newver[0,1,2]);
+
+ $data =~ s/ int libversion\[\] = { .* };/ int libversion\[\] = { $version_string1 };/;
+ $data =~ s/ int majnum = \d*, minnum = \d*, relnum = \d*;/ $version_string2;/;
+
+ write_file($TESTH5_JAVA, $data);
+}
+
+# Update the tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl file
+if ($REPACK_LAYOUT_PLUGIN_VERSION) {
+ my $data = read_file($REPACK_LAYOUT_PLUGIN_VERSION);
+ my $version_string = sprintf("%d %d %d", @newver[0,1,2]);
+
+ $data =~ s/ PARAMS { 9 \d* \d* \d* }/ PARAMS { 9 $version_string }/g;
+
+ write_file($REPACK_LAYOUT_PLUGIN_VERSION, $data);
+}
+
+# helper function to read the file for updating c++/src/cpp_doc_config,
+# config/cmake/scripts/HDF5Config.cmake, and java files.
# The version string in that file is not at the top, so the string replacement
# is not for the first line, and reading/writing the entire file as one string
# facilitates the substring replacement.
+#Presumably these will also work for resetting the version in HDF5config.cmake.
sub read_file {
my ($filename) = @_;
@@ -346,7 +427,8 @@ sub read_file {
return $all;
}
-# helper function to write the file for updating c++/src/cpp_doc_config file.
+# helper function to write the file for updating c++/src/cpp_doc_config,
+# config/cmake/scripts/HDF5config.cmake and java files.
sub write_file {
my ($filename, $content) = @_;
diff --git a/bin/iostats b/bin/iostats
index f57c0d0..f054b9c 100755
--- a/bin/iostats
+++ b/bin/iostats
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Usage: pipe the output of Linux's `strace' program into the stdin of
diff --git a/bin/locate_sw b/bin/locate_sw
index c9a7924..bab7bd2 100755
--- a/bin/locate_sw
+++ b/bin/locate_sw
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Try to locate the software as named in argument.
diff --git a/bin/make_err b/bin/make_err
index 1b39d53fb..bfe8861 100755
--- a/bin/make_err
+++ b/bin/make_err
@@ -9,12 +9,10 @@ $indent=4;
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Create error headers
@@ -38,12 +36,10 @@ sub print_copyright ($) {
print $fh " * *\n";
print $fh " * This file is part of HDF5. The full HDF5 copyright notice, including *\n";
print $fh " * terms governing use, modification, and redistribution, is contained in *\n";
- print $fh " * the files COPYING and Copyright.html. COPYING can be found at the root *\n";
- print $fh " * of the source code distribution tree; Copyright.html can be found at the *\n";
- print $fh " * root level of an installed copy of the electronic HDF5 document set and *\n";
- print $fh " * is linked from the top-level documents page. It can also be found at *\n";
- print $fh " * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *\n";
- print $fh " * access to either file, you may request a copy from help\@hdfgroup.org. *\n";
+ print $fh " * the COPYING file, which can be found at the root of the source code *\n";
+ print $fh " * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *\n";
+ print $fh " * If you do not have access to either file, you may request a copy from *\n";
+ print $fh " * help\@hdfgroup.org. *\n";
print $fh " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */\n";
}
diff --git a/bin/make_overflow b/bin/make_overflow
index ced486a..ccd640e 100755
--- a/bin/make_overflow
+++ b/bin/make_overflow
@@ -14,12 +14,10 @@ my @ctypes = ( () );
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Create assignment overflow #ifdefs
@@ -67,12 +65,10 @@ sub print_copyright ($) {
print $fh " * *\n";
print $fh " * This file is part of HDF5. The full HDF5 copyright notice, including *\n";
print $fh " * terms governing use, modification, and redistribution, is contained in *\n";
- print $fh " * the files COPYING and Copyright.html. COPYING can be found at the root *\n";
- print $fh " * of the source code distribution tree; Copyright.html can be found at the *\n";
- print $fh " * root level of an installed copy of the electronic HDF5 document set and *\n";
- print $fh " * is linked from the top-level documents page. It can also be found at *\n";
- print $fh " * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *\n";
- print $fh " * access to either file, you may request a copy from help\@hdfgroup.org. *\n";
+ print $fh " * the COPYING file, which can be found at the root of the source code *\n";
+ print $fh " * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *\n";
+ print $fh " * If you do not have access to either file, you may request a copy from *\n";
+ print $fh " * help\@hdfgroup.org. *\n";
print $fh " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */\n";
}
diff --git a/bin/make_vers b/bin/make_vers
index 0bc3b62..7e7fba1 100755
--- a/bin/make_vers
+++ b/bin/make_vers
@@ -23,12 +23,10 @@ $indent = 2;
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Create public symbol version headers
@@ -52,12 +50,10 @@ sub print_copyright ($) {
print $fh " * *\n";
print $fh " * This file is part of HDF5. The full HDF5 copyright notice, including *\n";
print $fh " * terms governing use, modification, and redistribution, is contained in *\n";
- print $fh " * the files COPYING and Copyright.html. COPYING can be found at the root *\n";
- print $fh " * of the source code distribution tree; Copyright.html can be found at the *\n";
- print $fh " * root level of an installed copy of the electronic HDF5 document set and *\n";
- print $fh " * is linked from the top-level documents page. It can also be found at *\n";
- print $fh " * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *\n";
- print $fh " * access to either file, you may request a copy from help\@hdfgroup.org. *\n";
+ print $fh " * the COPYING file, which can be found at the root of the source code *\n";
+ print $fh " * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *\n";
+ print $fh " * If you do not have access to either file, you may request a copy from *\n";
+ print $fh " * help\@hdfgroup.org. *\n";
print $fh " * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */\n";
}
diff --git a/bin/mkdirs b/bin/mkdirs
index 694a754..43972b6 100755
--- a/bin/mkdirs
+++ b/bin/mkdirs
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This is a small program which will create directories n-levels deep.
# You just call it with something like:
diff --git a/bin/newer b/bin/newer
index 8ed9f87..8083cae 100755
--- a/bin/newer
+++ b/bin/newer
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Compare the modification time of file argument 1 against other file arguments.
# Return true (0) if argument 1 is newer than all others, otherwise return
diff --git a/bin/output_filter.sh b/bin/output_filter.sh
index 58f1c48..fb59dfd 100644
--- a/bin/output_filter.sh
+++ b/bin/output_filter.sh
@@ -3,12 +3,10 @@
##
## This file is part of HDF5. The full HDF5 copyright notice, including
## terms governing use, modification, and redistribution, is contained in
-## the files COPYING and Copyright.html. COPYING can be found at the root
-## of the source code distribution tree; Copyright.html can be found at the
-## root level of an installed copy of the electronic HDF5 document set and
-## is linked from the top-level documents page. It can also be found at
-## http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-## access to either file, you may request a copy from help@hdfgroup.org.
+## the COPYING file, which can be found at the root of the source code
+## distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+## If you do not have access to either file, you may request a copy from
+## help@hdfgroup.org.
# This contains function definitions of output filtering.
# This file should only be sourced in by another shell script.
@@ -17,6 +15,21 @@
# Created Date: 2011/5/3
+# Comment added to address HDFFV-8270:
+# As I understand it, the purpose of this file is to remove extraneous messages
+# that appear in stdout and stderr on some machines that have been tested outside
+# of the HDF Group realm. The purpose of this script is to filter those
+# extraneous messages from stdout and stderr so that when the output files are
+# compared to the expected output, the extra messages will not cause failures in
+# the tests. The system messages in the comments below are out of date, meaning
+# I suppose that while the script code to filter messages on the system was
+# correct correct when last used, the output in the comments doesn't match the
+# script code that follows. I don't currently have access to any of these
+# systems to see the current output and the effect of the script code. If using
+# this file in the future, please update the comments to match the scripts in use.
+# Larry Knox 2017/3/15
+
+
# Some systems will dump some messages to stdout for various reasons.
# Remove them from the stdout result file.
# $1 is the file name of the file to be filtered.
diff --git a/bin/pkgscrpts/h5rmflags b/bin/pkgscrpts/h5rmflags
index 32b8090..099956c 100755
--- a/bin/pkgscrpts/h5rmflags
+++ b/bin/pkgscrpts/h5rmflags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
## Remove paths to libraries used to build HDF5 when packaging HDF5
diff --git a/bin/pkgscrpts/makeHDF5BinaryTarfiles.pl b/bin/pkgscrpts/makeHDF5BinaryTarfiles.pl
index 32b3049..f4a9ebd 100755
--- a/bin/pkgscrpts/makeHDF5BinaryTarfiles.pl
+++ b/bin/pkgscrpts/makeHDF5BinaryTarfiles.pl
@@ -7,12 +7,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#
#
diff --git a/bin/release b/bin/release
index 08779cb..09c033d 100755
--- a/bin/release
+++ b/bin/release
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Make a release of hdf5.
@@ -40,9 +38,12 @@
USAGE()
{
cat << EOF
-Usage: $0 -d <dir> [-h] [--nocheck] [--private] <methods> ...
+Usage: $0 -d <dir> [--docver BRANCHNAME] [-h] [--nocheck] [--private] <methods> ...
-d DIR The name of the directory where the releas(es) should be
placed.
+ --docver BRANCHNAME This is added for 1.8 and beyond to get the correct
+ version of documentation files from the hdf5docs
+ repository. BRANCHNAME for v1.8 should be hdf5_1_8.
-h print the help page.
--nocheck Ignore errors in MANIFEST file.
--private Make a private release with today's date in version information.
@@ -54,8 +55,13 @@ for compressing the resulting tar archive (if none are given then
tar -- use tar and don't do any compressing.
gzip -- use gzip with "-9" and append ".gz" to the output name.
+ cmake-tgz -- create a tar file using the gzip default level with a build-unix.sh
+ command file and all other CMake files needed to build HDF5 source
+ using CMake on unix machines.
bzip2 -- use bzip2 with "-9" and append ".bz2" to the output name.
zip -- convert all text files to DOS style and form a zip file for Windows use.
+ cmake-zip -- convert all text files to DOS style and create a zip file inluding cmake
+ scripts and .bat files to build HDF5 source using CMake on Windows.
doc -- produce the latest doc tree in addition to the archive.
An md5 checksum is produced for each archive created and stored in the md5 file.
@@ -92,9 +98,9 @@ EOF
# Modifications
#
# Steps:
-# 1. untar the tarball in a temporay directory;
+# 1. untar the tarball in a temporary directory;
# Note: do this in a temporary directory to avoid changing
-# the original source directory which maybe around.
+# the original source directory which may be around.
# 2. convert all its text files to DOS (LF-CR) style;
# 3. form a zip file which is usable by Windows users.
#
@@ -111,7 +117,7 @@ tar2zip()
echo "usage: tar2zip <tarfilename> <zipfilename>"
return 1
fi
- ztmpdir=/tmp/tmpdir$$
+ ztmpdir=/tmp/ztmpdir$$
mkdir -p $ztmpdir
version=$1
tarfile=$2
@@ -150,6 +156,175 @@ tar2zip()
rm -rf $ztmpdir
}
+# Function name: tar2cmakezip
+# Convert the release tarball to a Windows zipball with files to run CMake build.
+#
+# Programmer: Larry Knox
+# Creation date: 2017-02-20
+#
+# Modifications
+#
+# Steps:
+# 1. untar the tarball in a temporary directory;
+# Note: do this in a temporary directory to avoid changing
+# the original source directory which may be around.
+# 2. add build-unix.sh script.
+# 3. add SZIP.tar.gz, ZLib.tar.gz and cmake files to top level directory.
+# 4. create gzipped tar file with these contents:
+# build-unix.sh script
+# hdf5-<version> source code directory extracted from tar file
+# CTestScript.cmake cmake file copied from <hdf5 source code>/config/cmake/scripts
+# HDF5config.cmake cmake file copied from <hdf5 source code>/config/cmake/scripts
+# HDF5options.cmake cmake file copied from <hdf5 source code>/config/cmake/scripts
+# SZip.tar.gz copied from /mnt/scr1/pre-release/hdf5/CMake
+# ZLib.tar.gz copied from /mnt/scr1/pre-release/hdf5/CMake
+
+
+# Parameters:
+# $1 version
+# $2 release tarball
+# $3 output zipball file name
+#
+# Returns 0 if successful; 1 otherwise
+#
+ # need function to create another temporary directory, extract the
+ # $tmpdir/$HDF5_VERS.tar into it, add (create) build-unix.sh,
+ # CTestScript.cmake, HDF5config.cmake, SZIP.tar.gz and ZLib.tar.gz,
+ # and then tar.gz it.
+tar2cmakezip()
+{
+ if [ $# -ne 3 ]; then
+ echo "usage: tar2cmakezip <tarfilename> <tgzfilename>"
+ return 1
+ fi
+ cmziptmpdir=/tmp/cmziptmpdir$$
+ mkdir -p $cmziptmpdir
+ version=$1
+ tarfile=$2
+ zipfile=$3
+
+ # step 1: untar tarball in cmgztmpdir
+ (cd $cmziptmpdir; tar xf -) < $tarfile
+ # sanity check
+ if [ ! -d $cmziptmpdir/$version ]; then
+ echo "untar did not create $cmziptmpdir/$version source dir"
+ # cleanup
+ rm -rf $cmziptmpdir
+ return 1
+ fi
+
+ # step 2: add batch file for building CMake on window
+ cp /mnt/scr1/pre-release/hdf5/CMake/build-VS2012-32.bat $cmziptmpdir
+ cp /mnt/scr1/pre-release/hdf5/CMake/build-VS2012-64.bat $cmziptmpdir
+ cp /mnt/scr1/pre-release/hdf5/CMake/build-VS2013-32.bat $cmziptmpdir
+ cp /mnt/scr1/pre-release/hdf5/CMake/build-VS2013-64.bat $cmziptmpdir
+ cp /mnt/scr1/pre-release/hdf5/CMake/build-VS2015-32.bat $cmziptmpdir
+ cp /mnt/scr1/pre-release/hdf5/CMake/build-VS2015-64.bat $cmziptmpdir
+
+ # step 3: add SZIP.tar.gz, ZLib.tar.gz and cmake files
+ cp /mnt/scr1/pre-release/hdf5/CMake/SZip.tar.gz $cmziptmpdir
+ cp /mnt/scr1/pre-release/hdf5/CMake/ZLib.tar.gz $cmziptmpdir
+ cp $cmziptmpdir/$version/config/cmake/scripts/CTestScript.cmake $cmziptmpdir
+ cp $cmziptmpdir/$version/config/cmake/scripts/HDF5config.cmake $cmziptmpdir
+ cp $cmziptmpdir/$version/config/cmake/scripts/HDF5options.cmake $cmziptmpdir
+
+ # step 4: convert text files
+ # There maybe a simpler way to do this.
+ # options used in unix2dos:
+ # -k Keep the date stamp
+ # -q quiet mode
+ # grep redirect output to /dev/null because -q or -s are not portable.
+ find $cmziptmpdir/$version | \
+ while read inf; do \
+ if file $inf | grep "$inf\: .*text" > /dev/null 2>&1 ; then \
+ unix2dos -q -k $inf; \
+ fi\
+ done
+ # step 3: make zipball
+ # -9 maximum compression
+ # -y Store symbolic links as such in the zip archive
+ # -r recursive
+ # -q quiet
+ (cd $cmziptmpdir; zip -9 -y -r -q CMake-$version.zip *)
+ mv $cmziptmpdir/CMake-$version.zip $zipfile
+
+ # cleanup
+ rm -rf $cmziptmpdir
+}
+
+# Function name: tar2cmaketgz
+# Convert the release tarball to a Windows zipball with files to run CMake build.
+#
+# Programmer: Larry Knox
+# Creation date: 2017-02-20
+#
+# Modifications
+#
+# Steps:
+# 1. untar the tarball in a temporary directory;
+# Note: do this in a temporary directory to avoid changing
+# the original source directory which may be around.
+# 2. add build-unix.sh script.
+# 3. add SZIP.tar.gz, ZLib.tar.gz and cmake files to top level directory.
+# 4. create gzipped tar file with these contents:
+# build-unix.sh script
+# hdf5-<version> source code directory extracted from tar file
+# CTestScript.cmake cmake file copied from <hdf5 source code>/config/cmake/scripts
+# HDF5config.cmake cmake file copied from <hdf5 source code>/config/cmake/scripts
+# HDF5options.cmake cmake file copied from <hdf5 source code>/config/cmake/scripts
+# SZip.tar.gz copied from /mnt/scr1/pre-release/hdf5/CMake
+# ZLib.tar.gz copied from /mnt/scr1/pre-release/hdf5/CMake
+
+
+# Parameters:
+# $1 version
+# $2 release tarball
+# $3 output zipball file name
+#
+# Returns 0 if successful; 1 otherwise
+#
+ # need function to create another temporary directory, extract the
+ # $tmpdir/$HDF5_VERS.tar into it, add (create) build-unix.sh,
+ # CTestScript.cmake, HDF5config.cmake, SZIP.tar.gz and ZLib.tar.gz,
+ # and then tar.gz it.
+tar2cmaketgz()
+{
+ if [ $# -ne 3 ]; then
+ echo "usage: tar2cmaketgz <tarfilename> <tgzfilename>"
+ return 1
+ fi
+ cmgztmpdir=/tmp/cmgztmpdir$$
+ mkdir -p $cmgztmpdir
+ version=$1
+ tarfile=$2
+ tgzfile=$3
+
+ # step 1: untar tarball in cmgztmpdir
+ (cd $cmgztmpdir; tar xf -) < $tarfile
+ # sanity check
+ if [ ! -d $cmgztmpdir/$version ]; then
+ echo "untar did not create $cmgztmpdir/$version source dir"
+ # cleanup
+ rm -rf $cmgztmpdir
+ return 1
+ fi
+
+
+ # step 2: add build-unix.sh script
+ (cd $cmgztmpdir; echo "ctest -S HDF5config.cmake,BUILD_GENERATOR=Unix -C Release -V -O hdf5.log" > build-unix.sh; chmod 755 build-unix.sh)
+
+ # step 3: add SZIP.tar.gz, ZLib.tar.gz and cmake files
+ cp /mnt/scr1/pre-release/hdf5/CMake/SZip.tar.gz $cmgztmpdir
+ cp /mnt/scr1/pre-release/hdf5/CMake/ZLib.tar.gz $cmgztmpdir
+ cp $cmgztmpdir/$version/config/cmake/scripts/CTestScript.cmake $cmgztmpdir
+ cp $cmgztmpdir/$version/config/cmake/scripts/HDF5config.cmake $cmgztmpdir
+ cp $cmgztmpdir/$version/config/cmake/scripts/HDF5options.cmake $cmgztmpdir
+ tar czf $DEST/CMake-$HDF5_VERS.tar.gz -C $cmgztmpdir . || exit 1
+
+ # cleanup
+ rm -rf $cmgztmpdir
+}
+
# This command must be run at the top level of the hdf5 source directory.
# Verify this requirement.
if [ ! \( -f configure.ac -a -f bin/release \) ]; then
@@ -170,6 +345,15 @@ pmode='no'
tmpdir="../#release_tmp.$$" # tmp work directory
DOC_URL=https://git@bitbucket.hdfgroup.org/scm/hdffv/hdf5doc.git
CPPLUS_RM_NAME=cpplus_RM
+MAINT_MODE_ENABLED=""
+
+# If maintainer mode is enabled in configure, it should be disabled for release,
+# and enabled again after release files have been created. If already disabled
+# there's no need to do either.
+MAINT_MODE_ENABLED=`grep ^AM_MAINTAINER_MODE ./configure.ac | grep enable`
+if [ "${MAINT_MODE_ENABLED}" != "" ]; then
+ bin/switch_maint_mode -disable ./configure.ac
+fi
# Restore previous Version information
RESTORE_VERSION()
@@ -203,6 +387,10 @@ while [ -n "$1" ]; do
--private)
pmode=yes
;;
+ --docver)
+ DOCVERSION=$1
+ shift
+ ;;
-*)
echo "Unknown switch: $arg" 1>&2
USAGE
@@ -240,6 +428,7 @@ if [ X$pmode = Xyes ]; then
# (h5vers does not correctly handle just m.n.r-$today.)
VERS=`echo $VERS | sed -e s/-.*//`-of$today
echo Private release of $VERS
+else
bin/h5vers -s $VERS
fi
@@ -282,6 +471,11 @@ for f in README.txt release_docs/RELEASE.txt; do
chmod 644 $f
done
+# trunk is different than branches.
+if [ "${DOCVERSION}" ]; then
+ DOC_URL="$DOC_URL -b ${DOCVERSION}"
+fi
+
# Create the tar file
test "$verbose" && echo " Running tar..." 1>&2
( \
@@ -304,6 +498,11 @@ for comp in $methods; do
gzip -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.gz
(cd $DEST; md5sum $HDF5_VERS.tar.gz >> $MD5file)
;;
+ cmake-tgz)
+ test "$verbose" && echo " Creating CMake tar.gz file..." 1>&2
+ tar2cmaketgz $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/CMake-$HDF5_VERS.tar.gz 1>&2
+ (cd $DEST; md5sum CMake-$HDF5_VERS.tar.gz >> $MD5file)
+ ;;
bzip2)
test "$verbose" && echo " Running bzip2..." 1>&2
bzip2 -9 <$tmpdir/$HDF5_VERS.tar >$DEST/$HDF5_VERS.tar.bz2
@@ -314,19 +513,27 @@ for comp in $methods; do
tar2zip $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/$HDF5_VERS.zip 1>&2
(cd $DEST; md5sum $HDF5_VERS.zip >> $MD5file)
;;
+ cmake-zip)
+ test "$verbose" && echo " Creating CMake-zip ball..." 1>&2
+ tar2cmakezip $HDF5_VERS $tmpdir/$HDF5_VERS.tar $DEST/CMake-$HDF5_VERS.zip 1>&2
+ (cd $DEST; md5sum CMake-$HDF5_VERS.zip >> $MD5file)
+ ;;
doc)
+ if [ "${DOCVERSION}" = "" ]; then
+ DOCVERSION=master
+ fi
test "$verbose" && echo " Creating docs..." 1>&2
# Check out docs from git repo
- (cd $tmpdir; git clone $DOC_URL > /dev/null) || exit 1
- # Create doxygen C++ RM
+ (cd $tmpdir; git clone -q $DOC_URL ${DOCVERSION} > /dev/null) || exit 1
+ # Create doxygen C++ RM
(cd c++/src && doxygen cpp_doc_config > /dev/null ) || exit 1
# Replace version of C++ RM with just-created version
- rm -rf $tmpdir/trunk/html/$CPPLUS_RM_NAME
- mv c++/src/$CPPLUS_RM_NAME $tmpdir/trunk/html/$CPPLUS_RM_NAME
- # Compress the docs and move them to the release area
- mv $tmpdir/trunk $tmpdir/${HDF5_VERS}_docs
- (cd $tmpdir && tar cf ${HDF5_VERS}_docs.tar ${HDF5_VERS}_docs)
- mv $tmpdir/${HDF5_VERS}_docs.tar $DEST
+ rm -rf $tmpdir/${DOCVERSION}/html/$CPPLUS_RM_NAME || exit 1
+ mv c++/src/$CPPLUS_RM_NAME $tmpdir/${DOCVERSION}/html/$CPPLUS_RM_NAME || exit 1
+ # Compress the docs and move them to the release area
+ mv $tmpdir/${DOCVERSION} $tmpdir/${HDF5_VERS}_docs || exit 1
+ (cd $tmpdir && tar cf ${HDF5_VERS}_docs.tar ${HDF5_VERS}_docs) || exit 1
+ mv $tmpdir/${HDF5_VERS}_docs.tar $DEST || exit 1
;;
*)
echo "***Error*** Unknown method $comp"
@@ -335,6 +542,12 @@ for comp in $methods; do
esac
done
+# If AM_MAINTAINER_MODE was enabled before running this script
+# restore it to "enabled".
+if [ "${MAINT_MODE_ENABLED}" != "" ]; then
+ bin/switch_maint_mode -enable ./configure.ac
+fi
+
# Copy the RELEASE.txt to the release area.
cp release_docs/RELEASE.txt $DEST/$HDF5_VERS-RELEASE.txt
diff --git a/bin/restore.sh b/bin/restore.sh
index 7f1c7bb..60ac661 100755
--- a/bin/restore.sh
+++ b/bin/restore.sh
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic document set and is
-# linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access
-# to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# A script to clean up the action of autogen.sh
diff --git a/bin/runtest b/bin/runtest
index 4356db3..2611f09 100755
--- a/bin/runtest
+++ b/bin/runtest
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# run the hdf5/bin/snapshot
diff --git a/bin/snapshot b/bin/snapshot
index a496edd..5c78fc8 100755
--- a/bin/snapshot
+++ b/bin/snapshot
@@ -6,20 +6,19 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
-
-# This script should be run nightly from cron. It checks out hdf5
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+
+# This script should be run nightly from cron. It checks out the source
# from the source repository and compares it against the previous
# snapshot. If anything significant changed then a new snapshot is
# created, the minor version number is incremented, and the change is
# checked back into the source repository.
#
+
# function definitions
TIMESTAMP()
{
@@ -28,7 +27,7 @@ TIMESTAMP()
EXIT_BANNER()
{
-TIMESTAMP "Exit $PROGNAME with status=$?"
+ TIMESTAMP "Exit $PROGNAME with status=$?"
}
# Show current total disk usage.
@@ -38,11 +37,178 @@ DISKUSAGE()
( read x y; echo "Disk Usage=$x KB" )
}
+# function provided for testing software downloaded as tar files. A version of
+# this function that properly extracts the downloaded files can be provided in
+# the snapshots-${sw}-overrides file.
+EXTRACT()
+{
+ echo "Error: ${SWVERSION} is in source repository - does not need extraction."
+}
+
+# Standard procedure for checking out or updating source code from an hdfgroup
+# git repository. Override the function for other repositories or procedures.
+SOURCE_CHECKOUT()
+{
+ if test -n $GIT_URL; then
+ if [ -n "$AUTOGEN" ]; then
+ echo "Creating fresh clone of $GIT_URL in $BASEDIR/current_src"
+ # Check out the current version from source repository.
+ (cd $BASEDIR; rm -rf current_src
+ if test -z $GIT_BRANCH; then
+ echo "Testing empty branch $GIT_BRANCH."
+ git clone $GIT_URL current_src
+ else
+ echo "Testing branch $GIT_BRANCH."
+ git clone $GIT_URL -b $GIT_BRANCH current_src
+ fi
+ ) || exit 1
+ else
+ echo "Creating fresh clone of $GIT_URL in $BASEDIR/current"
+ # Check out the current version from source repository.
+ (cd $BASEDIR; rm -rf current
+ if test -n $GIT_BRANCH; then
+ git clone $GIT_URL -b $GIT_BRANCH current
+ else
+ git clone $GIT_URL current
+ fi ) || exit 1
+ fi
+ else
+ echo "Warning! Source directory ("current") is not checked out from git."
+ fi
+}
+
+# Standard procedure for running the configure command in a build (test)
+# directory
+RUNCONFIGURE()
+{
+ if [ "${CURRENT}" != "${TESTDIR}" -a "$CPSRC" = "yes" ]; then
+ echo "Copying source files to ${TESTDIR}."
+ cp -pr ${CURRENT}/* ${TESTDIR}
+ cd ${TESTDIR}
+ ./${CONFIGURE}
+ elif [ -n "${AUTOGEN}" ]; then
+ ${CURRENTSRC}/${CONFIGURE}
+ else
+ ${CURRENT}/${CONFIGURE}
+ fi
+}
+
+# Sometimes "make distclean" doesn't adequately remove files from the previous
+# build. If a build (test) directory was used, its contents can be entirely
+# deleted to provide a clean start. If the test is building in the source
+# directory, the contents can't be deleted, so run "make distclean".
+DISTCLEAN()
+{
+ if [ "${srcdir}" = "yes" -a -n "${SRCDIRNAME}" -a -d ${BASEDIR}/TestDir/${SRCDIRNAME} ]; then
+ echo "Remove contents of $SRCDIRNAME.\n"
+ rm -rf ${BASEDIR}/TestDir/${SRCDIRNAME}/*
+ else
+ echo "$MAKE distclean"
+ (cd ${TESTDIR} && ${MAKE} distclean)
+ fi
+}
+
+# Several of the software packages tested do not support make check-install.
+# Those that support it should have a version of this function in their
+# override with the following lines:
+# TIMESTAMP "check-install $1"
+# ${MAKE} check-install $1
+CHECKINSTALL()
+{
+ echo "check-install is not supported for ${SWVERSION}"
+}
+
+# Function for hdf4 and hdf5 to override to check in changes after snapshot.
+# Safety measure to avoid unintended checkins to other repositories.
+COMMITSNAPSHOT()
+{
+ echo "original hdf5 script committed code changes back into git."
+}
+
+DISPLAYUSAGE()
+{
+ set -
+ cat <<EOF
+Usage: $PROGNAME [all] [checkout] [ftp <URL> [diff] [test] [srcdir] [release] [help]
+ [clean] [distclean] [echo] [deploy <dir>] [deploydir <dir>]
+ [zlib <zlib_path>] [releasedir <dir>] [srcdirname <dir>] [check-vfd]
+ [exec <command>] [module-load <module-list>] [op-configure <option>]
+ [--<option>]
+ all: Run all commands (checkout, test & release)
+ [Default is all]
+ checkout: Run source checkout
+ diff: Run diff on current and previous versions. Exit 0 if
+ no significant differences are found. Otherwise, non-zero.
+ deploy: deploy binary to directory <dir>
+ deploydir: use <dir> as the default directory for deployment
+ test: Run test
+ release: Run release
+ clean: Run make clean
+ distclean:Run make distclean
+ echo: Turn on echo mode (set -x)
+ setenv <name> <value>:
+ Set environment variable <name> to <value>.
+ setenvN <N> <name> <value> ...:
+ Set environment variable with <N> values.
+ E.g., setenvN 3 x a b c is same as setenv x="a b c".
+ srcdir: Use srcdir option (does not imply other commands)
+ "snapshot srcdir" is equivalent to "snapshot srcdir all"
+ "snapshot srcdir checkout" is equivalent to "snapshot checkout"
+ srcdirname <dir>:
+ Use <dir> as the srcdir testing directory if srcdir is choosen.
+ If <dir> starts with '-', it is append to the default name
+ E.g., "snapshot srcdir srcdirname -xx" uses hostname-xx
+ [Default is hostname]
+ help: Print this message
+ echo: Turn on shell echo
+ zlib <zlib_path>:
+ Use <zlib_path> as the ZLIB locations
+ [Default is $ZLIB_default]
+ releasedir <dir>:
+ Use <dir> as the release directory
+ [Default is $ReleaseDir_default]
+ check-vfd:
+ Run make check-vfd instead of just make check.
+ exttest <testscript>;
+ Run testscript;
+ exec <command>:
+ Run <command>;
+ module-load <module-list>:
+ Load modules in comma-separated <module-list>;
+ op-configure <option>:
+ Pass <option> to the configure command
+ E.g., "snapshot op-configure --enable-parallel"
+ configures for parallel mode
+ --<option>:
+ Pass --<option> to the configure command
+ E.g., "snapshot --enable-parallel"
+ configures for parallel mode
+EOF
+ exit $errcode
+}
# MAIN
# SGI /bin/sh replaces $0 as function name if used in a function.
# Set the name here to avoid that ambiguity and better style too.
PROGNAME=$0
+SNAPSHOTNAME=
+HDFREPOS=
+DOCVERSION=""
+MODULELIST=""
+
+if [ -f bin/snapshot_params ]; then
+ . bin/snapshot_params
+ echo "Added snapshot_params."
+fi
+if [ -z "$SWVER" -a -f bin/snapshot_version ]
+then
+ . bin/snapshot_version
+ echo "Added snapshot_version."
+fi
+if [ -n ${HDFREPOS} -a -f bin/snapshot-${HDFREPOS}-overrides ]; then
+ . bin/snapshot-${HDFREPOS}-overrides
+ echo "Added snapshot-${HDFREPOS}-overrides."
+fi
echo "====================================="
echo "$PROGNAME $*"
@@ -51,7 +217,7 @@ TIMESTAMP MAIN
uname -a
# setup exit banner message
-trap EXIT_BANNER 0
+trap EXIT_BANNER 0 1 2 9 15
# Dump environment variables before option parsing
echo ===Dumping environment variables before option parsing ===
@@ -67,8 +233,14 @@ ReleaseDir_default=release_dir
ZLIB_default=
ZLIB=$ZLIB_default
-# What compression methods to use?
-METHODS="gzip zip bzip2 doc"
+# What compression methods to use? (md5 does checksum). Doc was apparently
+# added as a compression method to create a separate tarfile containing the
+# documentation files for v 1.8 and above.
+if [ "${SWVERSION}" = "hdf5_1_6" ]; then
+ METHODS="gzip bzip2 md5"
+else
+ METHODS="gzip bzip2 doc"
+fi
# Use User's MAKE if set. Else use generic make.
MAKE=${MAKE:-make}
@@ -81,15 +253,38 @@ CHECKVAL=check
cmd="all"
test_opt=""
errcode=0
+AUTOGEN=""
+EXTTEST=""
+EXEC_CMD_ARG=""
while [ $# -gt 0 ] ; do
case "$1" in
all)
cmd="all"
;;
+ checkout-autogen)
+ cmdcheckout="checkout"
+ AUTOGEN="autogen"
+ cmd=""
+ ;;
checkout)
cmdcheckout="checkout"
cmd=""
;;
+ ftp)
+ echo "Setting ftp flags in snapshot script"
+ cmdcheckout="checkout"
+ cmdftp="ftp"
+ cmd=""
+ shift
+ if [ $# -lt 1 ]; then
+ echo "URL missing"
+ errcode=1
+ cmd="help"
+ break
+ fi
+ ftp_url="$1"
+ echo "ftp_url is $ftp_url"
+ ;;
diff)
cmddiff="diff"
cmd=""
@@ -164,6 +359,10 @@ while [ $# -gt 0 ] ; do
cmdrel="release"
cmd=""
;;
+ autogen-release)
+ cmdrel="autogen-release"
+ cmd=""
+ ;;
clean | distclean)
cmdclean="$1"
cmd=""
@@ -196,12 +395,43 @@ while [ $# -gt 0 ] ; do
fi
ReleaseDir="$1"
;;
+ exttest)
+ shift
+ if [ $# -lt 1 ]; then
+ echo "exttest script name missing"
+ errcode=1
+ cmd="help"
+ break
+ fi
+ cmd=""
+ EXTTEST="$1"
+ ;;
+ exec)
+ shift
+ if [ $# -lt 1 ]; then
+ echo "exec command name missing"
+ errcode=1
+ cmd="help"
+ break
+ fi
+ cmd=""
+ EXEC_CMD_ARG="$@"
+ # exit the parsing while loop since all arguments have been consummed.
+ break
+ ;;
check-vfd)
CHECKVAL=check-vfd
;;
- yodconfigure)
- YODCONFIGURE=yes
- ;;
+ module-load)
+ shift
+ if [ $# -lt 1 ]; then
+ echo "missing module list to load"
+ errcode=1
+ cmd="help"
+ break
+ fi
+ MODULELIST="$1"
+ ;;
--*)
OP_CONFIGURE="$OP_CONFIGURE $1"
;;
@@ -225,87 +455,39 @@ while [ $# -gt 0 ] ; do
shift
done
+if [ -n "$MODULELIST" ]; then
+ . ~/.bashrc
+ module use /opt/pkgs/modules/all
+ # load module command will take a space separated list of modules.
+ # If we have a comma separated list, convert ',' to ' '.
+ MODULELIST="$( echo -e "$MODULELIST" | tr ',' ' ' )"
+ module load $MODULELIST
+fi
+
# Dump environment variables after option parsing
echo ===Dumping environment variables after option parsing ===
printenv | sort
echo ===Done Dumping environment variables after option parsing ===
if [ "$cmd" = help ]; then
- set -
- cat <<EOF
-Usage: $PROGNAME [all] [checkout] [diff] [test] [srcdir] [release] [help]
- [clean] [distclean] [echo] [deploy <dir>] [deploydir <dir>]
- [zlib <zlib_path>] [releasedir <dir>] [srcdirname <dir>] [check-vfd]
- [op-configure <option>] [--<option>]
- all: Run all commands (checkout, test & release)
- [Default is all]
- checkout: Run source checkout
- diff: Run diff on current and previous versions. Exit 0 if
- no significant differences are found. Otherwise, non-zero.
- deploy: deploy binary to directory <dir>
- deploydir: use <dir> as the default directory for deployment
- test: Run test
- release: Run release
- clean: Run make clean
- distclean:Run make distclean
- echo: Turn on echo mode (set -x)
- setenv <name> <value>:
- Set environment variable <name> to <value>.
- setenvN <N> <name> <value> ...:
- Set environment variable with <N> values.
- E.g., setenvN 3 x a b c is same as setenv x="a b c".
- srcdir: Use srcdir option (does not imply other commands)
- "snapshot srcdir" is equivalent to "snapshot srcdir all"
- "snapshot srcdir checkout" is equivalent to "snapshot checkout"
- srcdirname <dir>:
- Use <dir> as the srcdir testing directory if srcdir is choosen.
- If <dir> starts with '-', it is append to the default name
- E.g., "snapshot srcdir srcdirname -xx" uses hostname-xx
- [Default is hostname]
- help: Print this message
- echo: Turn on shell echo
- zlib <zlib_path>:
- Use <zlib_path> as the ZLIB locations
- [Default is $ZLIB_default]
- releasedir <dir>:
- Use <dir> as the release directory
- [Default is $ReleaseDir_default]
- check-vfd:
- Run make check-vfd instead of just make check.
- op-configure <option>:
- Pass <option> to the configure command
- E.g., "snapshot op-configure --enable-parallel"
- configures for parallel mode
- --<option>:
- Pass --<option> to the configure command
- E.g., "snapshot --enable-parallel"
- configures for parallel mode
-EOF
- exit $errcode
+ DISPLAYUSAGE
fi
# Setup the proper configure option (--with-zlib) to use zlib library
# provide ZLIB is non-empty.
ZLIB=${ZLIB:+"--with-zlib="$ZLIB}
-if [ -n "$YODCONFIGURE" ]; then
- cp configure configure.yod
- bin/yodconfigure configure.yod
- CONFIGURE="./configure.yod"
-else
- CONFIGURE="./configure"
+# Adding --prefix as a configure option will put the path to the deploy
+# directory in the initial libhdf5*.la files
+if [ -n "$DEPLOYDIRNAME" ]; then
+ OP_CONFIGURE="$OP_CONFIGURE --prefix=${deploydir}/${DEPLOYDIRNAME}"
fi
-CONFIGURE="$CONFIGURE $ZLIB $OP_CONFIGURE"
+CONFIGURE="configure $OP_CONFIGURE"
+# echo "Configure command is $CONFIGURE"
# Execute the requests
snapshot=yes
-if [ -f bin/snapshot_version ]; then
- . bin/snapshot_version
-else
- H5VERSION=hdf5
-fi
-
-BASEDIR=${HOME}/snapshots-${H5VERSION}
+BASEDIR=${HOME}/snapshots-${SNAPSHOTNAME}
if [ ! -d ${BASEDIR} ]; then
echo "BASEDIR ($BASEDIR) does not exist"
exit 1
@@ -315,11 +497,6 @@ CURRENT=${BASEDIR}/current
PREVIOUS=${BASEDIR}/previous
ReleaseDir=${ReleaseDir:=${BASEDIR}/${ReleaseDir_default}}
HOSTNAME=`hostname | cut -f1 -d.` # no domain part
-if [ $H5VERSION != hdf5 ]; then
- SVNVERSION="hdf5/branches/$H5VERSION"
-else
- SVNVERSION=hdf5/trunk # use the default (trunk) version
-fi
# Try finding a version of diff that supports the -I option too.
DIFF=diff
@@ -333,19 +510,48 @@ done
#=============================
if [ "$cmd" = "all" -o -n "$cmdcheckout" ]; then
TIMESTAMP "checkout"
- # If there is a Makefile in ${CURRENT}, the last test done in it
- # has not been distclean'ed. They would interfere with other
- # --srcdir build since make considers the files in ${CURRENT}
- # take precedence over files in its own build-directory. Run
- # a "make distclean" to clean them all out. This is not really
- # part of the "checkout" functions but this is the most convenient
- # spot to do the distclean. We will also continue the checkout process
- # regardless of the return code of distclean.
- ( cd ${CURRENT}; test -f Makefile && ${MAKE} distclean)
-
- SVNROOT=http://svn.hdfgroup.uiuc.edu
- # Check out the current version from source repository.
- (cd $BASEDIR; svn -q co ${SVNROOT}/${SVNVERSION} current ) || exit 1
+ # ${BASEDIR}/bin is now updated from git by EveningMaint or DailyMaint
+ # to avoid updating the scripts in ${BASEDIR}/bin while they are running.
+
+ if [ -z "$AUTOGEN" ]; then
+ # If there is a Makefile in ${CURRENT}, the last test done in it
+ # has not been distclean'ed. They would interfere with other
+ # --srcdir build since make considers the files in ${CURRENT}
+ # take precedence over files in its own build-directory. Run
+ # a "make distclean" to clean them all out. This is not really
+ # part of the "checkout" functions but this is the most convenient
+ # spot to do the distclean. We will also continue the checkout process
+ # regardless of the return code of distclean.
+ ( cd ${CURRENT}; test -f Makefile && ${MAKE} distclean)
+ fi
+ # echo "cmdftp is $cmdftp; ftp_url is $ftp_url"
+ if [ -n "$cmdftp" ]; then
+ echo "Get the NetCDF4 source from their ftp server."
+ echo "Command executed is: 2>&1 wget -N $ftp_url"
+ cd ${BASEDIR};
+ WGET_OUTPUT="`2>&1 wget -N $ftp_url`"
+ errcode=$?
+ if [[ $errcode -ne 0 ]]; then
+ exit $errcode
+ fi
+
+ if [ $? -ne 0 ];then
+ echo $0: "$WGET_OUTPUT" Exiting.
+ exit 1
+ fi
+
+ # echo "Wget output was $WGET_OUTPUT"
+
+ if echo "$WGET_OUTPUT" | fgrep 'not retrieving' &> /dev/null
+ then
+ echo "Snapshot unchanged"
+ else
+ echo "New snapshot downloaded"
+ EXTRACT
+ fi
+ else
+ SOURCE_CHECKOUT
+ fi
fi # Do source checkout
@@ -353,7 +559,7 @@ fi # Do source checkout
# Run Test the HDF5 library
#=============================
if [ "$cmd" = "all" -o -n "$cmdtest" -o -n "$cmddiff" ]; then
- TIMESTAMP "diff"
+ TIMESTAMP "Run Tests"
# setup if srcdir is used.
if [ -z "$srcdir" ]; then
TESTDIR=${CURRENT}
@@ -369,10 +575,15 @@ if [ "$cmd" = "all" -o -n "$cmdtest" -o -n "$cmddiff" ]; then
esac
TESTDIR=${BASEDIR}/TestDir/${SRCDIRNAME}
test -d ${TESTDIR} || mkdir ${TESTDIR}
+ # set TESTDIR to use the direct path to the local test directory
+ # rather than the path through ${BASEDIR}.
+ cd ${TESTDIR}
+ TESTDIR=`pwd -P`
+ cd ${CURRENT}
fi
# Make sure current version exists and is clean
if [ -d ${TESTDIR} ]; then
- (cd ${TESTDIR} && ${MAKE} distclean)
+ DISTCLEAN
else
errcode=$?
snapshot=no
@@ -382,12 +593,17 @@ if [ "$cmd" = "all" -o -n "$cmdtest" -o -n "$cmddiff" ]; then
# Compare it with the previous version. Compare only files listed in
# the MANIFEST plus the MANIFEST itself.
if [ -d ${PREVIOUS} ]; then
- if (${DIFF} -c ${PREVIOUS}/MANIFEST ${CURRENT}/MANIFEST); then
+ if [ -z "${AUTOGEN}" ]; then
+ CURRENTSRC=${CURRENT}
+ else
+ CURRENTSRC=${BASEDIR}/current_src
+ fi
+ if (${DIFF} -c ${PREVIOUS}/MANIFEST ${CURRENTSRC}/MANIFEST); then
snapshot=no
- for src in `grep '^\.' ${CURRENT}/MANIFEST|expand|cut -f1 -d' '`; do
+ for src in `grep '^\.' ${CURRENTSRC}/MANIFEST|expand|cut -f1 -d' '`; do
if ${DIFF} -I H5_VERS_RELEASE -I " released on " \
-I " currently under development" \
- ${PREVIOUS}/$src ${CURRENT}/$src
+ ${PREVIOUS}/$src ${CURRENTSRC}/$src
then
: #continue
else
@@ -408,44 +624,123 @@ if [ "$cmd" = "all" -o -n "$cmdtest" -o -n "$cmddiff" ]; then
fi
fi
+ #=============================
+ # Execute command if defined
+ #=============================
+ #echo BEFORE EXEC command
+ #echo EXEC_CMD_ARG=${EXEC_CMD_ARG}
+
+ if [ -n "$EXEC_CMD_ARG" ]; then
+ TIMESTAMP ${EXEC_CMD_ARG}
+ TESTDIR=${BASEDIR}/TestDir/${SRCDIRNAME}
+ test -d ${TESTDIR} || mkdir ${TESTDIR}
+ if cd ${TESTDIR}; then
+ # clean up the directory before executing the command
+ # Do we need to clean first?
+ # rm -rf *
+ #
+ # If EXEC_CMD_ARG starts with a '/', it has an absolute path, else it is
+ # relative to the BASEDIR.
+ case "$EXEC_CMD_ARG" in
+ /*)
+ ${EXEC_CMD_ARG}
+ ;;
+ *)
+ ${BASEDIR}/${EXEC_CMD_ARG}
+ ;;
+ esac
+ errcode=$?
+ else
+ echo "${TESTDIR} not accessible"
+ errcode=1
+ fi
+ # exit snapshot since nothing else to do, for now.
+ exit $errcode
+ fi
+
# Build, run tests and install procedures
- if [ "$snapshot" = "yes" ]; then
+ if [ "$snapshot" = "yes" ] && [ "$NOMAKE" != "yes" ]; then
+ FAIL_SECTION=""
+ if [ -f ${TESTDIR}/failsection ]; then
+ rm ${TESTDIR}/failsection
+ fi
if (cd ${TESTDIR} && \
- TIMESTAMP "configure" && \
- ${srcdir:+${CURRENT}/}${CONFIGURE} && \
- TIMESTAMP "make" && \
+ TIMESTAMP "configure" && echo "configure" > ${TESTDIR}/failsection && \
+ RUNCONFIGURE && \
+ sleep 2 && \
+ TIMESTAMP "make" && echo "make" > ${TESTDIR}/failsection && \
${MAKE} && DISKUSAGE \
- TIMESTAMP ${CHECKVAL} && \
+ TIMESTAMP ${CHECKVAL} && echo "make check" > ${TESTDIR}/failsection && \
${MAKE} ${CHECKVAL} && DISKUSAGE \
- TIMESTAMP "install" && \
- ${MAKE} install-all && DISKUSAGE \
- TIMESTAMP "check-install" && \
- ${MAKE} check-install && DISKUSAGE \
- TIMESTAMP "uninstall" && \
- ${MAKE} uninstall-all && DISKUSAGE); then
+ TIMESTAMP "install" && echo "make install" > ${TESTDIR}/failsection && \
+ ${MAKE} install && DISKUSAGE \
+ TIMESTAMP "check-install" && echo "make check-install" > ${TESTDIR}/failsection && \
+ CHECKINSTALL && DISKUSAGE \
+ TIMESTAMP "uninstall" && echo "make uninstall" > ${TESTDIR}/failsection && \
+ ${MAKE} uninstall && DISKUSAGE); then
:
else
errcode=$?
+ FAIL_SECTION=`cat ${TESTDIR}/failsection`
+ echo "Failed running ${FAIL_SECTION}"
snapshot=no
exit $errcode
fi
+ elif [ $CPSRC ]; then
+ cp -pr ${CURRENT}/* ${TESTDIR}
+ else
+ cmdclean=""
fi
fi # Test the HDF5 library
+# Run external test if configured
#=============================
+#=============================
+#if [ -d "$CURRENT" ]; then
+if [ "$EXTTEST" != "" ]; then
+ TIMESTAMP ${EXTTEST}
+ TESTDIR=${BASEDIR}/TestDir/${SRCDIRNAME}
+ test -d ${TESTDIR} || mkdir ${TESTDIR}
+ cd ${TESTDIR}
+ sleep 1
+ TIMESTAMP $pwd
+ ls
+ ${BASEDIR}/${EXTTEST}
+ errcode=$?
+ exit $errcode
+fi
+
+#=============================
# Run deployment if requested.
#=============================
if [ -n "$DEPLOYDIRNAME" ]; then
+ # The daily tests deploy to .../hdf5/... or .../hdf4/... except on cobalt where the
+ # deploy directory is in .../HDF5/... lc will take care of this. If hdf4 or hdf5
+ # either upper or lower case isn't in the path, RELEASE.txt won't be found unless
+ # it is in $CURRENT.
+ POS4=`perl -e "print index(lc(\"${deploydir}/${DEPLOYDIRNAME}\"), 'hdf4')"`
+ POS5=`perl -e "print index(lc(\"${deploydir}/${DEPLOYDIRNAME}\"), 'hdf5')"`
+ if [ "${POS4}" -ge "0" ]; then
+ RELEASE_TXT_LOC="release_notes"
+ elif [ "${POS5}" -ge "0" ]; then
+ RELEASE_TXT_LOC="release_docs"
+ else
+ RELEASE_TXT_LOC=""
+ fi
+
if [ "$snapshot" = "yes" ]; then
TIMESTAMP "deploy"
if (cd ${TESTDIR} &&
- ${CURRENT}/bin/deploy ${deploydir}/${DEPLOYDIRNAME} && \
+ ${BASEDIR}/bin/deploy ${deploydir}/${DEPLOYDIRNAME} && \
TIMESTAMP "clean" && \
${MAKE} clean && \
TIMESTAMP "check-install prefix=${deploydir}/${DEPLOYDIRNAME}" && \
- ${MAKE} check-install prefix=${deploydir}/${DEPLOYDIRNAME}); then
- : #continue
+ CHECKINSTALL prefix=${deploydir}/${DEPLOYDIRNAME}); then
+ cd ${CURRENT}
+ cp ${RELEASE_TXT_LOC}/RELEASE.txt ${deploydir}/${DEPLOYDIRNAME}
+ cp COPYING ${deploydir}/${DEPLOYDIRNAME}
+ #: #continue
else
errcode=$?
exit $errcode
@@ -460,24 +755,41 @@ fi # Deploy
if [ "$cmd" = "all" -o -n "$cmdrel" ]; then
if [ "$snapshot" = "yes" ]; then
TIMESTAMP "release"
- (cd ${CURRENT} && ${MAKE} distclean)
+ DISTCLEAN
(
# Turn on exit on error in the sub-shell so that it does not
# commit source if errors encounter here.
set -e
- cd ${CURRENT}
- RELEASE_VERSION="`perl bin/h5vers -v`"
- echo "Making snapshot release ($RELEASE_VERSION) to ${ReleaseDir}..."
- bin/release -d $ReleaseDir $METHODS
- perl bin/h5vers -i
- svn -q commit -m "Snapshot $RELEASE_VERSION"
+ if [ "$cmdrel" = "autogen-release" ]; then
+ cd ${BASEDIR}/current_src
+ else
+ cd ${CURRENT}
+ fi
+ if [ "$HDFREPOS" = "hdf4" ]; then
+ RELEASE_VERSION="`perl bin/h4vers -v`"
+ echo "Making snapshot release ($RELEASE_VERSION) to ${ReleaseDir}..."
+ bin/release -d $ReleaseDir $METHODS
+ perl bin/h4vers -i
+ elif [ "$HDFREPOS" = "hdf5" ]; then
+ RELEASE_VERSION="`perl bin/h5vers -v`"
+ echo "Making snapshot release ($RELEASE_VERSION) to ${ReleaseDir}..."
+ if [ "${DOCVERSION}" ]; then
+ bin/release -d $ReleaseDir --docver ${DOCVERSION} $METHODS
+ else
+ bin/release -d $ReleaseDir $METHODS
+ fi
+ perl bin/h5vers -i
+ else
+ echo "need real release steps. For now, only move current version to previous"
+ fi
+ COMMITSNAPSHOT
)
errcode=$?
fi
# Replace the previous version with the current version.
# Should check if the errcode of the release process but there
- # are other failures after release was done (e.g. h5vers or svn failures)
+ # are other failures after release was done (e.g. h5vers or git failures)
# that should allow the replacement to occure.
rm -rf ${PREVIOUS}
mv ${CURRENT} ${PREVIOUS}
@@ -487,7 +799,7 @@ fi #Release snapshot
#=============================
# Clean the test area. Default is no clean.
#=============================
-if [ -n "$cmdclean" ]; then
+if [ -n "$cmdclean" ] && [ "$NOMAKE" != "yes" ]; then
TIMESTAMP "clean"
# setup if srcdir is used.
if [ -z "$srcdir" ]; then
diff --git a/bin/snapshot_version b/bin/snapshot_version
index 86f7142..8aad05d 100644
--- a/bin/snapshot_version
+++ b/bin/snapshot_version
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# default version for snapshot test
diff --git a/bin/switch_maint_mode b/bin/switch_maint_mode
index 2b62545..fb1568b 100755
--- a/bin/switch_maint_mode
+++ b/bin/switch_maint_mode
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Switch AM_MAINTAINER_MODE value in configure.ac
# Usage: See USAGE()
diff --git a/bin/timekeeper b/bin/timekeeper
index 7c50759..14adf2c 100755
--- a/bin/timekeeper
+++ b/bin/timekeeper
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
# As a time keeper of the remote daily test process launched by runtest.
# It sleeps for a certain time and then wakes up to hangup those processes
diff --git a/bin/trace b/bin/trace
index 05d3186..3f532ab 100755
--- a/bin/trace
+++ b/bin/trace
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
require 5.003;
$Source = "";
@@ -32,10 +30,10 @@ $Source = "";
"hbool_t" => "b",
"double" => "d",
"H5D_alloc_time_t" => "Da",
- "H5FD_mpio_collective_opt_t" => "Dc",
+ "H5FD_mpio_collective_opt_t" => "Dc",
"H5D_fill_time_t" => "Df",
"H5D_fill_value_t" => "DF",
- "H5FD_mpio_chunk_opt_t" => "Dh",
+ "H5FD_mpio_chunk_opt_t" => "Dh",
"H5D_mpio_actual_io_mode_t" => "Di",
"H5D_chunk_index_t" => "Dk",
"H5D_layout_t" => "Dl",
@@ -47,17 +45,19 @@ $Source = "";
"herr_t" => "e",
"H5E_direction_t" => "Ed",
"H5E_error_t" => "Ee",
- "H5E_type_t" => "Et",
- "H5F_close_degree_t" => "Fd",
- "H5F_file_space_type_t" => "Ff",
- "H5F_mem_t" => "Fm",
+ "H5E_type_t" => "Et",
+ "H5F_close_degree_t" => "Fd",
+ "H5F_fspace_strategy_t" => "Ff",
+ "H5F_file_space_type_t" => "Ff",
+ "H5F_mem_t" => "Fm",
"H5F_scope_t" => "Fs",
- "H5F_libver_t" => "Fv",
+ "H5F_fspace_type_t" => "Ft",
+ "H5F_libver_t" => "Fv",
"H5G_obj_t" => "Go",
"H5G_stat_t" => "Gs",
- "hsize_t" => "h",
+ "hsize_t" => "h",
"hssize_t" => "Hs",
- "H5E_major_t" => "i",
+ "H5E_major_t" => "i",
"H5E_minor_t" => "i",
"H5_iter_order_t" => "Io",
"H5_index_t" => "Ii",
@@ -68,15 +68,15 @@ $Source = "";
"unsigned int" => "Iu",
"uint32_t" => "Iu",
"H5I_type_t" => "It",
- "H5G_link_t" => "Ll", #Same as H5L_type_t now
- "H5L_type_t" => "Ll",
+ "H5G_link_t" => "Ll", #Same as H5L_type_t now
+ "H5L_type_t" => "Ll",
"MPI_Comm" => "Mc",
"MPI_Info" => "Mi",
"H5FD_mem_t" => "Mt",
"off_t" => "o",
"H5O_type_t" => "Ot",
"H5P_class_t" => "p",
- "hobj_ref_t" => "r",
+ "hobj_ref_t" => "r",
"H5R_type_t" => "Rt",
"char" => "s",
"unsigned char" => "s",
@@ -98,10 +98,11 @@ $Source = "";
"void" => "x",
"FILE" => "x",
"H5A_operator_t" => "x",
- "H5A_operator1_t" => "x",
- "H5A_operator2_t" => "x",
- "H5A_info_t" => "x",
+ "H5A_operator1_t" => "x",
+ "H5A_operator2_t" => "x",
+ "H5A_info_t" => "x",
"H5AC_cache_config_t" => "x",
+ "H5AC_cache_image_config_t" => "x",
"H5D_append_cb_t" => "x",
"H5D_gather_func_t" => "x",
"H5D_operator_t" => "x",
@@ -113,23 +114,23 @@ $Source = "";
"H5E_walk1_t" => "x",
"H5E_walk2_t" => "x",
"H5F_flush_cb_t" => "x",
- "H5F_info1_t" => "x",
- "H5F_info2_t" => "x",
+ "H5F_info1_t" => "x",
+ "H5F_info2_t" => "x",
"H5F_retry_info_t" => "x",
"H5FD_t" => "x",
"H5FD_class_t" => "x",
"H5FD_stream_fapl_t" => "x",
"H5FD_file_image_callbacks_t" => "x",
"H5G_iterate_t" => "x",
- "H5G_info_t" => "x",
- "H5I_free_t" => "x",
- "H5I_search_func_t" => "x",
- "H5L_class_t" => "x",
+ "H5G_info_t" => "x",
+ "H5I_free_t" => "x",
+ "H5I_search_func_t" => "x",
+ "H5L_class_t" => "x",
"H5L_elink_traverse_t" => "x",
"H5L_iterate_t" => "x",
"H5MM_allocate_t" => "x",
"H5MM_free_t" => "x",
- "H5O_info_t" => "x",
+ "H5O_info_t" => "x",
"H5O_iterate_t" => "x",
"H5O_mcdt_search_cb_t" => "x",
"H5P_cls_create_func_t" => "x",
@@ -160,6 +161,8 @@ $Source = "";
##############################################################################
# Print an error message.
#
+my $found_errors = 0;
+
sub errmesg ($$@) {
my ($file, $func, @mesg) = @_;
my ($mesg) = join "", @mesg;
@@ -169,6 +172,8 @@ sub errmesg ($$@) {
$lineno = tr/\n/\n/;
}
+ $found_errors = 1;
+
print "$file: in function \`$func\':\n";
print "$file:$lineno: $mesg\n";
}
@@ -239,34 +244,34 @@ sub rewrite_func ($$$$$) {
next;
}
unless ($arg=~/^(([a-z_A-Z]\w*\s+)+\**)
- ([a-z_A-Z]\w*)(\[.*?\])?
- (\s*\/\*\s*(in|out|in_out)\s*\*\/)?\s*$/x) {
- errmesg $file, $name, "unable to parse \`$arg\'";
- goto error;
+ ([a-z_A-Z]\w*)(\[.*?\])?
+ (\s*\/\*\s*(in|out|in_out)\s*\*\/)?\s*$/x) {
+ errmesg $file, $name, "unable to parse \`$arg\'";
+ goto error;
} else {
- my ($atype, $aname, $array, $adir) = ($1, $3, $4, $6);
- $names{$aname} = $argno++;
- $adir ||= "in";
- $atype =~ s/\s+$//;
- push @arg_name, $aname;
+ my ($atype, $aname, $array, $adir) = ($1, $3, $4, $6);
+ $names{$aname} = $argno++;
+ $adir ||= "in";
+ $atype =~ s/\s+$//;
+ push @arg_name, $aname;
- if ($adir eq "out") {
- push @arg_str, "x";
- } else {
- if (defined $array) {
- $atype .= "*";
- if ($array =~ /^\[\/\*([a-z_A-Z]\w*)\*\/\]$/) {
- my $asize = $1;
- if (exists $names{$asize}) {
- $atype .= '[a' . $names{$asize} . ']';
- } else {
- warn "bad array size: $asize";
- $atype .= "*";
- }
- }
- }
- push @arg_str, argstring $file, $name, $atype;
- }
+ if ($adir eq "out") {
+ push @arg_str, "x";
+ } else {
+ if (defined $array) {
+ $atype .= "*";
+ if ($array =~ /^\[\/\*([a-z_A-Z]\w*)\*\/\]$/) {
+ my $asize = $1;
+ if (exists $names{$asize}) {
+ $atype .= '[a' . $names{$asize} . ']';
+ } else {
+ warn "bad array size: $asize";
+ $atype .= "*";
+ }
+ }
+ }
+ push @arg_str, argstring $file, $name, $atype;
+ }
}
}
$trace = "H5TRACE" . scalar(@arg_str) . "(\"$rettype\", \"";
@@ -274,11 +279,11 @@ sub rewrite_func ($$$$$) {
my $len = 4 + length $trace;
for (@arg_name) {
if ($len + length >= 77) {
- $trace .= ",\n $_";
- $len = 13 + length;
+ $trace .= ",\n $_";
+ $len = 13 + length;
} else {
- $trace .= ", $_";
- $len += 1 + length;
+ $trace .= ", $_";
+ $len += 1 + length;
}
}
$trace .= ");\n";
@@ -338,5 +343,13 @@ for $file (@ARGV) {
}
}
-printf "Finished processing HDF5 API calls\n"
+if ($found_errors eq 1) {
+ printf "\n";
+ printf "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
+ printf "*** ERRORS FOUND *** ERRORS FOUND *** ERRORS FOUND ****\n";
+ printf "!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n";
+ exit 1;
+} else {
+ printf "Finished processing HDF5 API calls\n";
+}
diff --git a/bin/yodconfigure b/bin/yodconfigure
index 2bc67e2..44d7d99 100755
--- a/bin/yodconfigure
+++ b/bin/yodconfigure
@@ -1,16 +1,15 @@
#!/bin/sh
#
+# Copyright by The HDF Group.
# Copyright by the Board of Trustees of the University of Illinois.
# All rights reserved.
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Fix configure file so that it can launch configure testing executable
# via the proper launching command, e.g., yod. (Thus the name yodconfigure
diff --git a/c++/CMakeLists.txt b/c++/CMakeLists.txt
index 6f288a0..c9c4815 100644
--- a/c++/CMakeLists.txt
+++ b/c++/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_CPP)
#-----------------------------------------------------------------------------
@@ -25,7 +25,7 @@ INCLUDE_DIRECTORIES (${HDF5_BINARY_DIR})
if (H5_HAVE_PARALLEL)
add_definitions ("-DMPICH_SKIP_MPICXX")
add_definitions ("-DMPICH_IGNORE_CXX_SEEK")
-endif (H5_HAVE_PARALLEL)
+endif ()
add_subdirectory (${HDF5_CPP_SOURCE_DIR}/src ${HDF5_CPP_BINARY_DIR}/src)
@@ -34,11 +34,11 @@ add_subdirectory (${HDF5_CPP_SOURCE_DIR}/src ${HDF5_CPP_BINARY_DIR}/src)
#-----------------------------------------------------------------------------
if (HDF5_BUILD_EXAMPLES)
add_subdirectory (${HDF5_CPP_SOURCE_DIR}/examples ${HDF5_CPP_BINARY_DIR}/examples)
-endif (HDF5_BUILD_EXAMPLES)
+endif ()
#-----------------------------------------------------------------------------
# Build the CPP unit tests
#-----------------------------------------------------------------------------
if (BUILD_TESTING)
add_subdirectory (${HDF5_CPP_SOURCE_DIR}/test ${HDF5_CPP_BINARY_DIR}/test)
-endif (BUILD_TESTING)
+endif ()
diff --git a/c++/COPYING b/c++/COPYING
index 6903daf..6497ace 100644
--- a/c++/COPYING
+++ b/c++/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/c++/Makefile.am b/c++/Makefile.am
index 416060a..94fbefc 100644
--- a/c++/Makefile.am
+++ b/c++/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/c++/examples/CMakeLists.txt b/c++/examples/CMakeLists.txt
index cfcdd8d..e5b0b4a 100644
--- a/c++/examples/CMakeLists.txt
+++ b/c++/examples/CMakeLists.txt
@@ -1,11 +1,11 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
+PROJECT (HDF5_CPP_EXAMPLES)
+
# --------------------------------------------------------------------
# Notes: When creating examples they should be prefixed
# with "cpp_ex_". This allows for easier filtering of the examples.
# --------------------------------------------------------------------
-PROJECT (HDF5_CPP_EXAMPLES)
-
#-----------------------------------------------------------------------------
# Define examples
#-----------------------------------------------------------------------------
@@ -38,7 +38,7 @@ foreach (example ${examples})
TARGET_C_PROPERTIES (cpp_ex_${example} STATIC " " " ")
target_link_libraries (cpp_ex_${example} ${HDF5_CPP_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (cpp_ex_${example} PROPERTIES FOLDER examples/cpp)
-endforeach (example ${examples})
+endforeach ()
foreach (example ${tutr_examples})
add_executable (cpp_ex_${example} ${HDF5_CPP_EXAMPLES_SOURCE_DIR}/${example}.cpp)
@@ -46,8 +46,8 @@ foreach (example ${tutr_examples})
TARGET_C_PROPERTIES (cpp_ex_${example} STATIC " " " ")
target_link_libraries (cpp_ex_${example} ${HDF5_CPP_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (cpp_ex_${example} PROPERTIES FOLDER examples/cpp)
-endforeach (example ${tutr_examples})
+endforeach ()
if (BUILD_TESTING)
include (CMakeTests.cmake)
-endif (BUILD_TESTING)
+endif ()
diff --git a/c++/examples/CMakeTests.cmake b/c++/examples/CMakeTests.cmake
index 4db8c68..264a7e7 100644
--- a/c++/examples/CMakeTests.cmake
+++ b/c++/examples/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -8,7 +19,7 @@
add_test (
NAME CPP_ex-clear-objects
COMMAND ${CMAKE_COMMAND}
- -E remove
+ -E remove
Group.h5
SDS.h5
SDScompound.h5
@@ -17,16 +28,29 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (CPP_ex-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "CPP_ex-clear-objects")
foreach (example ${examples})
- add_test (NAME CPP_ex_${example} COMMAND $<TARGET_FILE:cpp_ex_${example}>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME CPP_ex_${example} COMMAND $<TARGET_FILE:cpp_ex_${example}>)
+ else ()
+ add_test (NAME CPP_ex_${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:cpp_ex_${example}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=cpp_ex_${example}.txt"
+ #-D "TEST_REFERENCE=cpp_ex_${example}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (CPP_ex_${example} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "CPP_ex_${example}")
- endforeach (example ${examples})
+ endforeach ()
#the following dependicies are handled by the order of the files
# SET_TESTS_PROPERTIES(CPP_ex_readdata PROPERTIES DEPENDS CPP_ex_create)
# SET_TESTS_PROPERTIES(CPP_ex_chunks PROPERTIES DEPENDS CPP_ex_extend_ds)
@@ -34,7 +58,7 @@
add_test (
NAME CPP_ex_tutr-clear-objects
COMMAND ${CMAKE_COMMAND}
- -E remove
+ -E remove
h5tutr_cmprss.h5
h5tutr_dset.h5
h5tutr_extend.h5
@@ -44,18 +68,30 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (CPP_ex_tutr-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "CPP_ex_tutr-clear-objects")
-
+
foreach (example ${tutr_examples})
- add_test (NAME CPP_ex_${example} COMMAND $<TARGET_FILE:cpp_ex_${example}>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME CPP_ex_${example} COMMAND $<TARGET_FILE:cpp_ex_${example}>)
+ else ()
+ add_test (NAME CPP_ex_${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:cpp_ex_${example}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=cpp_ex_${example}.txt"
+ #-D "TEST_REFERENCE=cpp_ex_${example}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (CPP_ex_${example} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "CPP_ex_${example}")
- endforeach (example ${tutr_examples})
+ endforeach ()
#the following dependicies are handled by the order of the files
# SET_TESTS_PROPERTIES(CPP_ex_h5tutr_crtatt PROPERTIES DEPENDS CPP_ex_h5tutr_crtdat)
# SET_TESTS_PROPERTIES(CPP_ex_h5tutr_rdwt PROPERTIES DEPENDS CPP_ex_h5tutr_crtdat)
# SET_TESTS_PROPERTIES(CPP_ex_h5tutr_crtgrpd PROPERTIES DEPENDS CPP_ex_h5tutr_crtgrpar)
- \ No newline at end of file
diff --git a/c++/examples/Makefile.am b/c++/examples/Makefile.am
index 2408217..51ab8e3 100644
--- a/c++/examples/Makefile.am
+++ b/c++/examples/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/c++/examples/chunks.cpp b/c++/examples/chunks.cpp
index 57b23fd..836c86d 100644
--- a/c++/examples/chunks.cpp
+++ b/c++/examples/chunks.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/compound.cpp b/c++/examples/compound.cpp
index 7e14244..2636d8e 100644
--- a/c++/examples/compound.cpp
+++ b/c++/examples/compound.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/create.cpp b/c++/examples/create.cpp
index 1930987..d4500f9 100644
--- a/c++/examples/create.cpp
+++ b/c++/examples/create.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/extend_ds.cpp b/c++/examples/extend_ds.cpp
index f3f9a90..0ecad1f 100644
--- a/c++/examples/extend_ds.cpp
+++ b/c++/examples/extend_ds.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5group.cpp b/c++/examples/h5group.cpp
index 58ee44d..3e3ab4d 100644
--- a/c++/examples/h5group.cpp
+++ b/c++/examples/h5group.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_cmprss.cpp b/c++/examples/h5tutr_cmprss.cpp
index 5460191..ee2c544 100644
--- a/c++/examples/h5tutr_cmprss.cpp
+++ b/c++/examples/h5tutr_cmprss.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_crtatt.cpp b/c++/examples/h5tutr_crtatt.cpp
index a53bb1c..db34353 100644
--- a/c++/examples/h5tutr_crtatt.cpp
+++ b/c++/examples/h5tutr_crtatt.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_crtdat.cpp b/c++/examples/h5tutr_crtdat.cpp
index b50b659..e36a215 100644
--- a/c++/examples/h5tutr_crtdat.cpp
+++ b/c++/examples/h5tutr_crtdat.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_crtgrp.cpp b/c++/examples/h5tutr_crtgrp.cpp
index 41bbf1b..5462cbf 100644
--- a/c++/examples/h5tutr_crtgrp.cpp
+++ b/c++/examples/h5tutr_crtgrp.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_crtgrpar.cpp b/c++/examples/h5tutr_crtgrpar.cpp
index a050e3c..93bd08d 100644
--- a/c++/examples/h5tutr_crtgrpar.cpp
+++ b/c++/examples/h5tutr_crtgrpar.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_crtgrpd.cpp b/c++/examples/h5tutr_crtgrpd.cpp
index 12de485..96805a7 100644
--- a/c++/examples/h5tutr_crtgrpd.cpp
+++ b/c++/examples/h5tutr_crtgrpd.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_extend.cpp b/c++/examples/h5tutr_extend.cpp
index 8f80266..703068a 100644
--- a/c++/examples/h5tutr_extend.cpp
+++ b/c++/examples/h5tutr_extend.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_rdwt.cpp b/c++/examples/h5tutr_rdwt.cpp
index 5c83dcb..5ab7ccc 100644
--- a/c++/examples/h5tutr_rdwt.cpp
+++ b/c++/examples/h5tutr_rdwt.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/h5tutr_subset.cpp b/c++/examples/h5tutr_subset.cpp
index bf57713..9ba2104 100644
--- a/c++/examples/h5tutr_subset.cpp
+++ b/c++/examples/h5tutr_subset.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/examples/readdata.cpp b/c++/examples/readdata.cpp
index 74b02f9..fd88776 100644
--- a/c++/examples/readdata.cpp
+++ b/c++/examples/readdata.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
//
diff --git a/c++/examples/run-c++-ex.sh.in b/c++/examples/run-c++-ex.sh.in
index eae2782..f0d3e93 100644
--- a/c++/examples/run-c++-ex.sh.in
+++ b/c++/examples/run-c++-ex.sh.in
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file: run-c++-ex.sh
diff --git a/c++/examples/testh5c++.sh.in b/c++/examples/testh5c++.sh.in
index 40d503f..42cb0df 100644
--- a/c++/examples/testh5c++.sh.in
+++ b/c++/examples/testh5c++.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5c++ compiler tool
# Created: Albert Cheng, 2007/3/14
diff --git a/c++/examples/writedata.cpp b/c++/examples/writedata.cpp
index c3a5f48..e14e578 100644
--- a/c++/examples/writedata.cpp
+++ b/c++/examples/writedata.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/c++/src/CMakeLists.txt b/c++/src/CMakeLists.txt
index 2db0a9f..b147cfa 100644
--- a/c++/src/CMakeLists.txt
+++ b/c++/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_CPP_SRC)
#-----------------------------------------------------------------------------
@@ -32,6 +32,7 @@ set (CPP_SOURCES
${HDF5_CPP_SRC_SOURCE_DIR}/H5Group.cpp
${HDF5_CPP_SRC_SOURCE_DIR}/H5IdComponent.cpp
${HDF5_CPP_SRC_SOURCE_DIR}/H5IntType.cpp
+ ${HDF5_CPP_SRC_SOURCE_DIR}/H5LaccProp.cpp
${HDF5_CPP_SRC_SOURCE_DIR}/H5Library.cpp
${HDF5_CPP_SRC_SOURCE_DIR}/H5Location.cpp
${HDF5_CPP_SRC_SOURCE_DIR}/H5Object.cpp
@@ -68,6 +69,7 @@ set (CPP_HDRS
${HDF5_CPP_SRC_SOURCE_DIR}/H5IdComponent.h
${HDF5_CPP_SRC_SOURCE_DIR}/H5Include.h
${HDF5_CPP_SRC_SOURCE_DIR}/H5IntType.h
+ ${HDF5_CPP_SRC_SOURCE_DIR}/H5LaccProp.h
${HDF5_CPP_SRC_SOURCE_DIR}/H5Library.h
${HDF5_CPP_SRC_SOURCE_DIR}/H5Location.h
${HDF5_CPP_SRC_SOURCE_DIR}/H5Object.h
@@ -102,7 +104,7 @@ if (BUILD_SHARED_LIBS)
INTERFACE_COMPILE_DEFINITIONS H5_BUILT_AS_DYNAMIC_LIB=1
)
set (install_targets ${install_targets} ${HDF5_CPP_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
@@ -122,7 +124,8 @@ install (
if (HDF5_EXPORTED_TARGETS)
if (BUILD_SHARED_LIBS)
INSTALL_TARGET_PDB (${HDF5_CPP_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} cpplibraries)
- endif (BUILD_SHARED_LIBS)
+ endif ()
+ INSTALL_TARGET_PDB (${HDF5_CPP_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} cpplibraries)
install (
TARGETS
@@ -135,4 +138,4 @@ if (HDF5_EXPORTED_TARGETS)
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT cpplibraries
INCLUDES DESTINATION include
)
-endif (HDF5_EXPORTED_TARGETS)
+endif ()
diff --git a/c++/src/H5AbstractDs.cpp b/c++/src/H5AbstractDs.cpp
index 49de4bb..b900b90 100644
--- a/c++/src/H5AbstractDs.cpp
+++ b/c++/src/H5AbstractDs.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -24,6 +22,7 @@
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataSpace.h"
@@ -33,16 +32,16 @@
namespace H5 {
//--------------------------------------------------------------------------
-// Function: AbstractDs default constructor
-///\brief Default constructor
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs default constructor
+///\brief Default constructor
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
AbstractDs::AbstractDs(){}
//--------------------------------------------------------------------------
-// Function: AbstractDs default constructor
-///\brief Creates an AbstractDs instance using an existing id.
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs default constructor
+///\brief Creates an AbstractDs instance using an existing id.
+// Programmer Binh-Minh Ribler - 2000
//
// *** Deprecation warning ***
// This constructor is no longer appropriate because the data member "id" had
@@ -53,275 +52,275 @@ AbstractDs::AbstractDs(){}
// Mar 2016 -BMR, AbstractDs::AbstractDs(const hid_t ds_id){}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getTypeClass
-///\brief Returns the class of the datatype that is used by this
-/// object, which can be a dataset or an attribute.
-///\return Datatype class identifier
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs::getTypeClass
+///\brief Returns the class of the datatype that is used by this
+/// object, which can be a dataset or an attribute.
+///\return Datatype class identifier
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5T_class_t AbstractDs::getTypeClass() const
{
- // Gets the datatype used by this dataset or attribute.
- // p_get_type calls either H5Dget_type or H5Aget_type depending on
- // which object invokes getTypeClass
- hid_t datatype_id;
- try {
- datatype_id = p_get_type(); // returned value is already validated
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getTypeClass", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getTypeClass", E.getDetailMsg());
- }
+ // Gets the datatype used by this dataset or attribute.
+ // p_get_type calls either H5Dget_type or H5Aget_type depending on
+ // which object invokes getTypeClass
+ hid_t datatype_id;
+ try {
+ datatype_id = p_get_type(); // returned value is already validated
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getTypeClass", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getTypeClass", E.getDetailMsg());
+ }
- // Gets the class of the datatype and validate it before returning
- H5T_class_t type_class = H5Tget_class(datatype_id);
+ // Gets the class of the datatype and validate it before returning
+ H5T_class_t type_class = H5Tget_class(datatype_id);
- // Close temporary datatype_id
- herr_t ret_value = H5Tclose(datatype_id);
- if (ret_value < 0)
- {
- if (fromClass() == "DataSet")
- throw DataTypeIException("DataSet::getTypeClass", "H5Tclose failed");
- else if (fromClass() == "Attribute")
- throw DataTypeIException("Attribute::getTypeClass", "H5Tclose failed");
- }
+ // Close temporary datatype_id
+ herr_t ret_value = H5Tclose(datatype_id);
+ if (ret_value < 0)
+ {
+ if (fromClass() == "DataSet")
+ throw DataTypeIException("DataSet::getTypeClass", "H5Tclose failed");
+ else if (fromClass() == "Attribute")
+ throw DataTypeIException("Attribute::getTypeClass", "H5Tclose failed");
+ }
- // Check on the returned type_class
- if (type_class == H5T_NO_CLASS)
- {
- if (fromClass() == "DataSet")
- throw DataTypeIException("DataSet::getTypeClass", "H5Tget_class returns H5T_NO_CLASS");
- else if (fromClass() == "Attribute")
- throw DataTypeIException("Attribute::getTypeClass", "H5Tget_class returns H5T_NO_CLASS");
- }
- return(type_class);
+ // Check on the returned type_class
+ if (type_class == H5T_NO_CLASS)
+ {
+ if (fromClass() == "DataSet")
+ throw DataTypeIException("DataSet::getTypeClass", "H5Tget_class returns H5T_NO_CLASS");
+ else if (fromClass() == "Attribute")
+ throw DataTypeIException("Attribute::getTypeClass", "H5Tget_class returns H5T_NO_CLASS");
+ }
+ return(type_class);
}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getDataType
-///\brief Returns the generic datatype of this abstract dataset, which
-/// can be a dataset or an attribute.
-///\return DataType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs::getDataType
+///\brief Returns the generic datatype of this abstract dataset, which
+/// can be a dataset or an attribute.
+///\return DataType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataType AbstractDs::getDataType() const
{
- // Gets the id of the datatype used by this dataset or attribute using
- // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
- // depending on which object invokes getDataType. Then, create and
- // return the DataType object
- try {
- DataType datatype;
- f_DataType_setId(&datatype, p_get_type());
- return(datatype);
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getDataType", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getDataType", E.getDetailMsg());
- }
+ // Gets the id of the datatype used by this dataset or attribute using
+ // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
+ // depending on which object invokes getDataType. Then, create and
+ // return the DataType object
+ try {
+ DataType datatype;
+ f_DataType_setId(&datatype, p_get_type());
+ return(datatype);
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getDataType", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getDataType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getArrayType
-///\brief Returns the array datatype of this abstract dataset which
-/// can be a dataset or an attribute.
-///\return ArrayType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Jul, 2005
+// Function: AbstractDs::getArrayType
+///\brief Returns the array datatype of this abstract dataset which
+/// can be a dataset or an attribute.
+///\return ArrayType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Jul, 2005
//--------------------------------------------------------------------------
ArrayType AbstractDs::getArrayType() const
{
- // Gets the id of the datatype used by this dataset or attribute using
- // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
- // depending on which object invokes getArrayType. Then, create and
- // return the ArrayType object
- try {
- // Create ArrayType and set values this way to work around the
- // problem described in the JIRA issue HDFFV-7947
- ArrayType arraytype;
- f_DataType_setId(&arraytype, p_get_type());
- return(arraytype);
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getArrayType", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getArrayType", E.getDetailMsg());
- }
+ // Gets the id of the datatype used by this dataset or attribute using
+ // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
+ // depending on which object invokes getArrayType. Then, create and
+ // return the ArrayType object
+ try {
+ // Create ArrayType and set values this way to work around the
+ // problem described in the JIRA issue HDFFV-7947
+ ArrayType arraytype;
+ f_DataType_setId(&arraytype, p_get_type());
+ return(arraytype);
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getArrayType", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getArrayType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getCompType
-///\brief Returns the compound datatype of this abstract dataset which
-/// can be a dataset or an attribute.
-///\return CompType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs::getCompType
+///\brief Returns the compound datatype of this abstract dataset which
+/// can be a dataset or an attribute.
+///\return CompType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
CompType AbstractDs::getCompType() const
{
- // Gets the id of the datatype used by this dataset or attribute using
- // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
- // depending on which object invokes getCompType. Then, create and
- // return the CompType object
- try {
- CompType comptype;
- f_DataType_setId(&comptype, p_get_type());
- return(comptype);
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getCompType", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getCompType", E.getDetailMsg());
- }
+ // Gets the id of the datatype used by this dataset or attribute using
+ // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
+ // depending on which object invokes getCompType. Then, create and
+ // return the CompType object
+ try {
+ CompType comptype;
+ f_DataType_setId(&comptype, p_get_type());
+ return(comptype);
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getCompType", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getCompType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getEnumType
-///\brief Returns the enumeration datatype of this abstract dataset which
-/// can be a dataset or an attribute.
-///\return EnumType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs::getEnumType
+///\brief Returns the enumeration datatype of this abstract dataset which
+/// can be a dataset or an attribute.
+///\return EnumType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
EnumType AbstractDs::getEnumType() const
{
- // Gets the id of the datatype used by this dataset or attribute using
- // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
- // depending on which object invokes getEnumType. Then, create and
- // return the EnumType object
- try {
- EnumType enumtype;
- f_DataType_setId(&enumtype, p_get_type());
- return(enumtype);
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getEnumType", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getEnumType", E.getDetailMsg());
- }
+ // Gets the id of the datatype used by this dataset or attribute using
+ // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
+ // depending on which object invokes getEnumType. Then, create and
+ // return the EnumType object
+ try {
+ EnumType enumtype;
+ f_DataType_setId(&enumtype, p_get_type());
+ return(enumtype);
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getEnumType", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getEnumType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getIntType
-///\brief Returns the integer datatype of this abstract dataset which
-/// can be a dataset or an attribute.
-///\return IntType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs::getIntType
+///\brief Returns the integer datatype of this abstract dataset which
+/// can be a dataset or an attribute.
+///\return IntType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
IntType AbstractDs::getIntType() const
{
- // Gets the id of the datatype used by this dataset or attribute using
- // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
- // depending on which object invokes getIntType. Then, create and
- // return the IntType object
- try {
- IntType inttype;
- f_DataType_setId(&inttype, p_get_type());
- return(inttype);
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getIntType", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getIntType", E.getDetailMsg());
- }
+ // Gets the id of the datatype used by this dataset or attribute using
+ // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
+ // depending on which object invokes getIntType. Then, create and
+ // return the IntType object
+ try {
+ IntType inttype;
+ f_DataType_setId(&inttype, p_get_type());
+ return(inttype);
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getIntType", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getIntType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getFloatType
-///\brief Returns the floating-point datatype of this abstract dataset,
-/// which can be a dataset or an attribute.
-///\return FloatType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs::getFloatType
+///\brief Returns the floating-point datatype of this abstract dataset,
+/// which can be a dataset or an attribute.
+///\return FloatType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FloatType AbstractDs::getFloatType() const
{
- // Gets the id of the datatype used by this dataset or attribute using
- // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
- // depending on which object invokes getFloatType. Then, create and
- // return the FloatType object
- try {
- FloatType floatype;
- f_DataType_setId(&floatype, p_get_type());
- return(floatype);
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getFloatType", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getFloatType", E.getDetailMsg());
- }
+ // Gets the id of the datatype used by this dataset or attribute using
+ // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
+ // depending on which object invokes getFloatType. Then, create and
+ // return the FloatType object
+ try {
+ FloatType floatype;
+ f_DataType_setId(&floatype, p_get_type());
+ return(floatype);
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getFloatType", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getFloatType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getStrType
-///\brief Returns the string datatype of this abstract dataset which
-/// can be a dataset or an attribute.
-///\return StrType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs::getStrType
+///\brief Returns the string datatype of this abstract dataset which
+/// can be a dataset or an attribute.
+///\return StrType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
StrType AbstractDs::getStrType() const
{
- // Gets the id of the datatype used by this dataset or attribute using
- // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
- // depending on which object invokes getStrType. Then, create and
- // return the StrType object
- try {
- StrType strtype;
- f_DataType_setId(&strtype, p_get_type());
- return(strtype);
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getStrType", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getStrType", E.getDetailMsg());
- }
+ // Gets the id of the datatype used by this dataset or attribute using
+ // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
+ // depending on which object invokes getStrType. Then, create and
+ // return the StrType object
+ try {
+ StrType strtype;
+ f_DataType_setId(&strtype, p_get_type());
+ return(strtype);
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getStrType", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getStrType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: AbstractDs::getVarLenType
-///\brief Returns the floating-point datatype of this abstract dataset,
-/// which can be a dataset or an attribute.
-///\return VarLenType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Jul, 2005
+// Function: AbstractDs::getVarLenType
+///\brief Returns the floating-point datatype of this abstract dataset,
+/// which can be a dataset or an attribute.
+///\return VarLenType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Jul, 2005
//--------------------------------------------------------------------------
VarLenType AbstractDs::getVarLenType() const
{
- // Gets the id of the datatype used by this dataset or attribute using
- // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
- // depending on which object invokes getVarLenType. Then, create and
- // return the VarLenType object
- try {
- VarLenType varlentype;
- f_DataType_setId(&varlentype, p_get_type());
- return(varlentype);
- }
- catch (DataSetIException& E) {
- throw DataTypeIException("DataSet::getVarLenType", E.getDetailMsg());
- }
- catch (AttributeIException& E) {
- throw DataTypeIException("Attribute::getVarLenType", E.getDetailMsg());
- }
+ // Gets the id of the datatype used by this dataset or attribute using
+ // p_get_type. p_get_type calls either H5Dget_type or H5Aget_type
+ // depending on which object invokes getVarLenType. Then, create and
+ // return the VarLenType object
+ try {
+ VarLenType varlentype;
+ f_DataType_setId(&varlentype, p_get_type());
+ return(varlentype);
+ }
+ catch (DataSetIException& E) {
+ throw DataTypeIException("DataSet::getVarLenType", E.getDetailMsg());
+ }
+ catch (AttributeIException& E) {
+ throw DataTypeIException("Attribute::getVarLenType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: AbstractDs destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: AbstractDs destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
AbstractDs::~AbstractDs() {}
diff --git a/c++/src/H5AbstractDs.h b/c++/src/H5AbstractDs.h
index 1b4775c..73a18b8 100644
--- a/c++/src/H5AbstractDs.h
+++ b/c++/src/H5AbstractDs.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __AbstractDs_H
@@ -37,44 +35,44 @@ class DataSpace;
*/
class H5_DLLCPP AbstractDs {
public:
- // Gets a copy the datatype of that this abstract dataset uses.
- // Note that this datatype is a generic one and can only be accessed
- // via generic member functions, i.e., member functions belong
- // to DataType. To get specific datatype, i.e. EnumType, FloatType,
- // etc..., use the specific functions, that follow, instead.
- DataType getDataType() const;
+ // Gets a copy the datatype of that this abstract dataset uses.
+ // Note that this datatype is a generic one and can only be accessed
+ // via generic member functions, i.e., member functions belong
+ // to DataType. To get specific datatype, i.e. EnumType, FloatType,
+ // etc..., use the specific functions, that follow, instead.
+ DataType getDataType() const;
- // Gets a copy of the specific datatype of this abstract dataset.
- ArrayType getArrayType() const;
- CompType getCompType() const;
- EnumType getEnumType() const;
- IntType getIntType() const;
- FloatType getFloatType() const;
- StrType getStrType() const;
- VarLenType getVarLenType() const;
+ // Gets a copy of the specific datatype of this abstract dataset.
+ ArrayType getArrayType() const;
+ CompType getCompType() const;
+ EnumType getEnumType() const;
+ IntType getIntType() const;
+ FloatType getFloatType() const;
+ StrType getStrType() const;
+ VarLenType getVarLenType() const;
- ///\brief Gets the size in memory of this abstract dataset.
- virtual size_t getInMemDataSize() const = 0;
+ ///\brief Gets the size in memory of this abstract dataset.
+ virtual size_t getInMemDataSize() const = 0;
- ///\brief Gets the dataspace of this abstract dataset - pure virtual.
- virtual DataSpace getSpace() const = 0;
+ ///\brief Gets the dataspace of this abstract dataset - pure virtual.
+ virtual DataSpace getSpace() const = 0;
- // Gets the class of the datatype that is used by this abstract
- // dataset.
- H5T_class_t getTypeClass() const;
+ // Gets the class of the datatype that is used by this abstract
+ // dataset.
+ H5T_class_t getTypeClass() const;
- ///\brief Returns the amount of storage size required - pure virtual.
- virtual hsize_t getStorageSize() const = 0;
+ ///\brief Returns the amount of storage size required - pure virtual.
+ virtual hsize_t getStorageSize() const = 0;
- // Returns this class name - pure virtual.
- virtual H5std_string fromClass() const = 0;
+ // Returns this class name - pure virtual.
+ virtual H5std_string fromClass() const = 0;
- // Destructor
- virtual ~AbstractDs();
+ // Destructor
+ virtual ~AbstractDs();
protected:
- // Default constructor
- AbstractDs();
+ // Default constructor
+ AbstractDs();
// *** Deprecation warning ***
// The following two constructors are no longer appropriate after the
@@ -83,14 +81,16 @@ class H5_DLLCPP AbstractDs {
// other will be removed from 1.10 release, and then from 1.8 if its
// removal does not raise any problems in two 1.10 releases.
- // Mar 2016 -BMR, AbstractDs(const hid_t h5_id);
+ // Mar 2016 -BMR, AbstractDs(const hid_t h5_id);
- // Copy constructor
- // AbstractDs( const AbstractDs& original );
+ // Copy constructor
+ // AbstractDs( const AbstractDs& original );
private:
- // This member function is implemented by DataSet and Attribute - pure virtual.
- virtual hid_t p_get_type() const = 0;
-};
-}
+ // This member function is implemented by DataSet and Attribute - pure virtual.
+ virtual hid_t p_get_type() const = 0;
+
+}; // end of AbstractDs
+} // namespace H5
+
#endif // __AbstractDs_H
diff --git a/c++/src/H5Alltypes.h b/c++/src/H5Alltypes.h
index df7213a..d67f3fe 100644
--- a/c++/src/H5Alltypes.h
+++ b/c++/src/H5Alltypes.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
// This header file simply serves as a container to hold the
diff --git a/c++/src/H5ArrayType.cpp b/c++/src/H5ArrayType.cpp
index 0bdd09c..9d4a973 100644
--- a/c++/src/H5ArrayType.cpp
+++ b/c++/src/H5ArrayType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -21,6 +19,7 @@
#include "H5PropList.h"
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -29,59 +28,97 @@
namespace H5 {
//--------------------------------------------------------------------------
-// Function: ArrayType default constructor
-///\brief Default constructor: Creates a stub ArrayType
-// Programmer Binh-Minh Ribler - May 2004
+// Function: ArrayType default constructor
+///\brief Default constructor: Creates a stub ArrayType
+// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
ArrayType::ArrayType() : DataType() {}
//--------------------------------------------------------------------------
-// Function: ArrayType overloaded constructor
-///\brief Creates an ArrayType object using an existing id.
-///\param existing_id - IN: Id of an existing datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - May 2004
+// Function: ArrayType overloaded constructor
+///\brief Creates an ArrayType object using an existing id.
+///\param existing_id - IN: Id of an existing datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
-ArrayType::ArrayType( const hid_t existing_id ) : DataType( existing_id ) {}
+ArrayType::ArrayType(const hid_t existing_id) : DataType(existing_id) {}
//--------------------------------------------------------------------------
-// Function: ArrayType copy constructor
-///\brief Copy constructor: makes a copy of the original ArrayType object.
-// Programmer Binh-Minh Ribler - May 2004
+// Function: ArrayType copy constructor
+///\brief Copy constructor: makes a copy of the original ArrayType object.
+// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
ArrayType::ArrayType(const ArrayType& original) : DataType(original) {}
//--------------------------------------------------------------------------
-// Function: ArrayType overloaded constructor
-///\brief Creates a new array data type based on the specified
-/// \a base_type.
-///\param base_type - IN: Existing datatype
-///\param ndims - IN: Rank of the array, [0..H5S_MAX_RANK]
-///\param dims - IN: Size of each array dimension
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - May 2004
+// Function: ArrayType overloaded constructor
+///\brief Creates a new array data type based on the specified
+/// \a base_type.
+///\param base_type - IN: Existing datatype
+///\param ndims - IN: Rank of the array, [0..H5S_MAX_RANK]
+///\param dims - IN: Size of each array dimension
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
ArrayType::ArrayType(const DataType& base_type, int ndims, const hsize_t* dims) : DataType()
{
// Call C API to create an array data type
hid_t new_type_id = H5Tarray_create2(base_type.getId(), ndims, dims);
if (new_type_id < 0)
- throw DataTypeIException("ArrayType constructor", "H5Tarray_create2 failed");
+ throw DataTypeIException("ArrayType constructor", "H5Tarray_create2 failed");
// Set the id for this object
id = new_type_id;
}
//--------------------------------------------------------------------------
-// Function: ArrayType::operator=
-///\brief Assignment operator
-///\param rhs - IN: Reference to the existing array datatype
-///\return Reference to ArrayType instance
-///\exception H5::DataTypeIException
+// Function: ArrayType overloaded constructor
+///\brief Creates an ArrayType instance by opening an HDF5 array datatype
+/// given its name, provided as a C character string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Array type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
// Description
-// Closes the id on the lhs object first with setId, then copies
-// each data member from the rhs object. (Issue HDFFV-9562)
-// Programmer Binh-Minh Ribler - Mar 2016
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openArrayType(const char*) to
+// improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+ArrayType::ArrayType(const H5Location& loc, const char *dtype_name) : DataType()
+{
+ id = p_opentype(loc, dtype_name);
+}
+
+//--------------------------------------------------------------------------
+// Function: ArrayType overloaded constructor
+///\brief Creates an ArrayType instance by opening an HDF5 array datatype
+/// given its name, provided as an \c H5std_string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Array type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openArrayType(const H5std_string&)
+// to improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+ArrayType::ArrayType(const H5Location& loc, const H5std_string& dtype_name) : DataType()
+{
+ id = p_opentype(loc, dtype_name.c_str());
+}
+
+//--------------------------------------------------------------------------
+// Function: ArrayType::operator=
+///\brief Assignment operator
+///\param rhs - IN: Reference to the existing array datatype
+///\return Reference to ArrayType instance
+///\exception H5::DataTypeIException
+// Description
+// Closes the id on the lhs object first with setId, then copies
+// each data member from the rhs object. (Issue HDFFV-9562)
+// Programmer Binh-Minh Ribler - Mar 2016
// Modification
//--------------------------------------------------------------------------
ArrayType& ArrayType::operator=(const ArrayType& rhs)
@@ -102,14 +139,14 @@ ArrayType& ArrayType::operator=(const ArrayType& rhs)
}
//--------------------------------------------------------------------------
-// Function: ArrayType::getArrayNDims
-///\brief Returns the number of dimensions for an array datatype.
-///\return Number of dimensions
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - May 2004
+// Function: ArrayType::getArrayNDims
+///\brief Returns the number of dimensions for an array datatype.
+///\return Number of dimensions
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - May 2004
// Modification
-// Apr, 2016
-// Became const.
+// Apr, 2016
+// Became const.
//--------------------------------------------------------------------------
int ArrayType::getArrayNDims() const
{
@@ -117,38 +154,38 @@ int ArrayType::getArrayNDims() const
int ndims = H5Tget_array_ndims(id);
if (ndims < 0)
{
- throw DataTypeIException("ArrayType::getArrayNDims", "H5Tget_array_ndims failed");
+ throw DataTypeIException("ArrayType::getArrayNDims", "H5Tget_array_ndims failed");
}
return(ndims);
}
//--------------------------------------------------------------------------
-// Function: ArrayType::getArrayDims
-///\brief Retrieves the size of all dimensions of an array datatype.
-///\param dims - OUT: Sizes of dimensions
-///\return Number of dimensions
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - May 2004
+// Function: ArrayType::getArrayDims
+///\brief Retrieves the size of all dimensions of an array datatype.
+///\param dims - OUT: Sizes of dimensions
+///\return Number of dimensions
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - May 2004
// Modification
-// Apr, 2016
-// Became const.
+// Apr, 2016
+// Became const.
//--------------------------------------------------------------------------
int ArrayType::getArrayDims(hsize_t* dims) const
{
// Get the dimensions
int ndims = H5Tget_array_dims2(id, dims);
if (ndims < 0)
- throw DataTypeIException("ArrayType::getArrayDims", "H5Tget_array_dims2 failed");
+ throw DataTypeIException("ArrayType::getArrayDims", "H5Tget_array_dims2 failed");
// Return the number of dimensions
return(ndims);
}
//--------------------------------------------------------------------------
-// Function: ArrayType destructor
-///\brief Properly terminates access to this array datatype.
-// Programmer Binh-Minh Ribler - May 2004
+// Function: ArrayType destructor
+///\brief Properly terminates access to this array datatype.
+// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
ArrayType::~ArrayType() {}
diff --git a/c++/src/H5ArrayType.h b/c++/src/H5ArrayType.h
index 0e9ef46..ffb8712 100644
--- a/c++/src/H5ArrayType.h
+++ b/c++/src/H5ArrayType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5ArrayType_H
@@ -27,35 +25,41 @@ namespace H5 {
*/
class H5_DLLCPP ArrayType : public DataType {
public:
- // Constructor that creates a new array data type based on the
- // specified base type.
- ArrayType(const DataType& base_type, int ndims, const hsize_t* dims);
+ // Constructor that creates a new array data type based on the
+ // specified base type.
+ ArrayType(const DataType& base_type, int ndims, const hsize_t* dims);
- // Assignment operator
- ArrayType& operator=(const ArrayType& rhs);
+ // Assignment operator
+ ArrayType& operator=(const ArrayType& rhs);
- // Returns the number of dimensions of this array datatype.
- int getArrayNDims() const;
- //int getArrayNDims(); // removed 1.8.18 and 1.10.1
+ // Constructors that open an array datatype, given a location.
+ ArrayType(const H5Location& loc, const char* name);
+ ArrayType(const H5Location& loc, const H5std_string& name);
- // Returns the sizes of dimensions of this array datatype.
- int getArrayDims(hsize_t* dims) const;
- //int getArrayDims(hsize_t* dims); // removed 1.8.18 and 1.10.1
+ // Returns the number of dimensions of this array datatype.
+ int getArrayNDims() const;
+ //int getArrayNDims(); // removed 1.8.18 and 1.10.1
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("ArrayType"); }
+ // Returns the sizes of dimensions of this array datatype.
+ int getArrayDims(hsize_t* dims) const;
+ //int getArrayDims(hsize_t* dims); // removed 1.8.18 and 1.10.1
- // Copy constructor: makes copy of the original object.
- ArrayType( const ArrayType& original );
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("ArrayType"); }
- // Constructor that takes an existing id
- ArrayType( const hid_t existing_id );
+ // Copy constructor: makes copy of the original object.
+ ArrayType(const ArrayType& original);
- // Noop destructor
- virtual ~ArrayType();
+ // Constructor that takes an existing id
+ ArrayType(const hid_t existing_id);
+
+ // Noop destructor
+ virtual ~ArrayType();
+
+ // Default constructor
+ ArrayType();
+
+}; // end of ArrayType
+} // namespace H5
- // Default constructor
- ArrayType();
-};
-}
#endif // __H5ArrayType_H
diff --git a/c++/src/H5AtomType.cpp b/c++/src/H5AtomType.cpp
index fa47e0c..9fe7608 100644
--- a/c++/src/H5AtomType.cpp
+++ b/c++/src/H5AtomType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -21,6 +19,7 @@
#include "H5PropList.h"
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -30,265 +29,265 @@ namespace H5 {
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: AtomType default constructor [protected]
-// Purpose Default constructor: creates a stub atomic datatype.
-// Programmer Binh-Minh Ribler - 2000
+// Function: AtomType default constructor [protected]
+// Purpose Default constructor: creates a stub atomic datatype.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
AtomType::AtomType() : DataType() {}
//--------------------------------------------------------------------------
-// Function: AtomType overloaded constructor [protected]
-// Purpose Creates an AtomType object using an existing id.
-// Parameter existing_id - IN: Id of an existing datatype
-// Exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AtomType overloaded constructor [protected]
+// Purpose Creates an AtomType object using an existing id.
+// Parameter existing_id - IN: Id of an existing datatype
+// Exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-AtomType::AtomType( const hid_t existing_id ) : DataType( existing_id ) {}
+AtomType::AtomType(const hid_t existing_id) : DataType(existing_id) {}
//--------------------------------------------------------------------------
-// Function: AtomType copy constructor
-///\brief Copy constructor: makes a copy of the original AtomType object.
-// Programmer Binh-Minh Ribler - 2000
+// Function: AtomType copy constructor
+///\brief Copy constructor: makes a copy of the original AtomType object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-AtomType::AtomType( const AtomType& original ) : DataType( original ) {}
+AtomType::AtomType(const AtomType& original) : DataType(original) {}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: AtomType::setSize
-///\brief Sets the total size for an atomic datatype.
-///\param size - IN: Size to set
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AtomType::setSize
+///\brief Sets the total size for an atomic datatype.
+///\param size - IN: Size to set
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void AtomType::setSize( size_t size ) const
+void AtomType::setSize(size_t size) const
{
- // Call C routine H5Tset_size to set the total size
- herr_t ret_value = H5Tset_size( id, size );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("setSize"), "H5Tset_size failed");
- }
+ // Call C routine H5Tset_size to set the total size
+ herr_t ret_value = H5Tset_size(id, size);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("setSize"), "H5Tset_size failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: AtomType::getOrder
-///\brief Returns the byte order of an atomic datatype.
-///\return Byte order, which can be:
-/// \li \c H5T_ORDER_LE
-/// \li \c H5T_ORDER_BE
-/// \li \c H5T_ORDER_VAX
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Mar, 2005
+// Function: AtomType::getOrder
+///\brief Returns the byte order of an atomic datatype.
+///\return Byte order, which can be:
+/// \li \c H5T_ORDER_LE
+/// \li \c H5T_ORDER_BE
+/// \li \c H5T_ORDER_VAX
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Mar, 2005
//--------------------------------------------------------------------------
H5T_order_t AtomType::getOrder() const
{
- // Call C routine to get the byte ordering
- H5T_order_t type_order = H5Tget_order( id );
+ // Call C routine to get the byte ordering
+ H5T_order_t type_order = H5Tget_order(id);
- // return a byte order constant if successful
- if( type_order == H5T_ORDER_ERROR )
- {
- throw DataTypeIException(inMemFunc("getOrder"),
- "H5Tget_order returns H5T_ORDER_ERROR");
- }
- return( type_order );
+ // return a byte order constant if successful
+ if (type_order == H5T_ORDER_ERROR)
+ {
+ throw DataTypeIException(inMemFunc("getOrder"),
+ "H5Tget_order returns H5T_ORDER_ERROR");
+ }
+ return(type_order);
}
//--------------------------------------------------------------------------
-// Function: AtomType::getOrder
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes a reference to a \c H5std_string for the buffer that
-/// provide the text description of the returned byte order.
-/// The text description can be either of the following:
-/// "Little endian byte ordering (0)";
-/// "Big endian byte ordering (1)";
-/// "VAX mixed byte ordering (2)";
-///\param order_string - OUT: Text description of the returned byte order
-///\return Byte order, which can be:
-/// \li \c H5T_ORDER_LE
-/// \li \c H5T_ORDER_BE
-/// \li \c H5T_ORDER_VAX
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AtomType::getOrder
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes a reference to a \c H5std_string for the buffer that
+/// provide the text description of the returned byte order.
+/// The text description can be either of the following:
+/// "Little endian byte ordering (0)";
+/// "Big endian byte ordering (1)";
+/// "VAX mixed byte ordering (2)";
+///\param order_string - OUT: Text description of the returned byte order
+///\return Byte order, which can be:
+/// \li \c H5T_ORDER_LE
+/// \li \c H5T_ORDER_BE
+/// \li \c H5T_ORDER_VAX
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5T_order_t AtomType::getOrder( H5std_string& order_string ) const
+H5T_order_t AtomType::getOrder(H5std_string& order_string) const
{
- // Call the overloaded to get the type order without text
- H5T_order_t type_order = getOrder();
+ // Call the overloaded to get the type order without text
+ H5T_order_t type_order = getOrder();
- // Then provide the text and return the type order
- if( type_order == H5T_ORDER_LE )
- order_string = "Little endian byte ordering (0)";
- else if( type_order == H5T_ORDER_BE )
- order_string = "Big endian byte ordering (1)";
- else if( type_order == H5T_ORDER_VAX )
- order_string = "VAX mixed byte ordering (2)";
- return( type_order );
+ // Then provide the text and return the type order
+ if (type_order == H5T_ORDER_LE)
+ order_string = "Little endian byte ordering (0)";
+ else if (type_order == H5T_ORDER_BE)
+ order_string = "Big endian byte ordering (1)";
+ else if (type_order == H5T_ORDER_VAX)
+ order_string = "VAX mixed byte ordering (2)";
+ return(type_order);
}
//--------------------------------------------------------------------------
-// Function: AtomType::setOrder
-///\brief Sets the byte ordering of an atomic datatype.
-///\param order - IN: Byte ordering constant, which can be:
-/// \li \c H5T_ORDER_LE
-/// \li \c H5T_ORDER_BE
-/// \li \c H5T_ORDER_VAX
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: AtomType::setOrder
+///\brief Sets the byte ordering of an atomic datatype.
+///\param order - IN: Byte ordering constant, which can be:
+/// \li \c H5T_ORDER_LE
+/// \li \c H5T_ORDER_BE
+/// \li \c H5T_ORDER_VAX
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void AtomType::setOrder( H5T_order_t order ) const
+void AtomType::setOrder(H5T_order_t order) const
{
- // Call C routine to set the byte ordering
- herr_t ret_value = H5Tset_order( id, order );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("setOrder"), "H5Tset_order failed");
- }
+ // Call C routine to set the byte ordering
+ herr_t ret_value = H5Tset_order(id, order);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("setOrder"), "H5Tset_order failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: AtomType::getPrecision
-///\brief Returns the precision of an atomic datatype.
-///\return Number of significant bits
-///\exception H5::DataTypeIException
+// Function: AtomType::getPrecision
+///\brief Returns the precision of an atomic datatype.
+///\return Number of significant bits
+///\exception H5::DataTypeIException
///\par Description
-/// The precision is the number of significant bits which,
-/// unless padding is present, is 8 times larger than the
-/// value returned by \c DataType::getSize().
-// Programmer Binh-Minh Ribler - 2000
+/// The precision is the number of significant bits which,
+/// unless padding is present, is 8 times larger than the
+/// value returned by \c DataType::getSize().
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
size_t AtomType::getPrecision() const
{
- size_t num_signi_bits = H5Tget_precision( id ); // C routine
+ size_t num_signi_bits = H5Tget_precision(id); // C routine
- // returns number of significant bits if successful
- if( num_signi_bits == 0 )
- {
- throw DataTypeIException(inMemFunc("getPrecision"),
- "H5Tget_precision returns invalid number of significant bits");
- }
- return( num_signi_bits );
+ // returns number of significant bits if successful
+ if (num_signi_bits == 0)
+ {
+ throw DataTypeIException(inMemFunc("getPrecision"),
+ "H5Tget_precision returns invalid number of significant bits");
+ }
+ return(num_signi_bits);
}
//--------------------------------------------------------------------------
-// Function: AtomType::setPrecision
-///\brief Sets the precision of an atomic datatype.
-///\param precision - IN: Number of bits of precision
-///\exception H5::DataTypeIException
+// Function: AtomType::setPrecision
+///\brief Sets the precision of an atomic datatype.
+///\param precision - IN: Number of bits of precision
+///\exception H5::DataTypeIException
///\par Description
-/// For information, please see C layer Reference Manuat at:
+/// For information, please see C layer Reference Manuat at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5T.html#Datatype-SetPrecision
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void AtomType::setPrecision( size_t precision ) const
+void AtomType::setPrecision(size_t precision) const
{
- // Call C routine to set the datatype precision
- herr_t ret_value = H5Tset_precision( id, precision );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("setPrecision"), "H5Tset_precision failed");
- }
+ // Call C routine to set the datatype precision
+ herr_t ret_value = H5Tset_precision(id, precision);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("setPrecision"), "H5Tset_precision failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: AtomType::getOffset
-///\brief Retrieves the bit offset of the first significant bit.
-///\return Offset value
-///\exception H5::DataTypeIException
+// Function: AtomType::getOffset
+///\brief Retrieves the bit offset of the first significant bit.
+///\return Offset value
+///\exception H5::DataTypeIException
///\par Description
-/// For information, please see C layer Reference Manuat at:
+/// For information, please see C layer Reference Manuat at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5T.html#Datatype-GetOffset
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// 12/05/00: due to C API change
-// - return type changed from size_t to int
-// - offset = -1 when failure occurs vs. 0
+// 12/05/00: due to C API change
+// - return type changed from size_t to int
+// - offset = -1 when failure occurs vs. 0
//--------------------------------------------------------------------------
int AtomType::getOffset() const
{
- int offset = H5Tget_offset( id ); // C routine
+ int offset = H5Tget_offset(id); // C routine
- // returns a non-negative offset value if successful
- if( offset == -1 )
- {
- throw DataTypeIException(inMemFunc("getOffset"),
- "H5Tget_offset returns a negative offset value");
- }
- return( offset );
+ // returns a non-negative offset value if successful
+ if (offset == -1)
+ {
+ throw DataTypeIException(inMemFunc("getOffset"),
+ "H5Tget_offset returns a negative offset value");
+ }
+ return(offset);
}
//--------------------------------------------------------------------------
-// Function: AtomType::setOffset
-///\brief Sets the bit offset of the first significant bit.
-///\param offset - IN: Offset of first significant bit
-///\exception H5::DataTypeIException
+// Function: AtomType::setOffset
+///\brief Sets the bit offset of the first significant bit.
+///\param offset - IN: Offset of first significant bit
+///\exception H5::DataTypeIException
///\par Description
-/// For information, please see C layer Reference Manuat at:
+/// For information, please see C layer Reference Manuat at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5T.html#Datatype-SetOffset
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void AtomType::setOffset( size_t offset ) const
+void AtomType::setOffset(size_t offset) const
{
- // Call C routine to set the bit offset
- herr_t ret_value = H5Tset_offset( id, offset );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("setOffset"), "H5Tset_offset failed");
- }
+ // Call C routine to set the bit offset
+ herr_t ret_value = H5Tset_offset(id, offset);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("setOffset"), "H5Tset_offset failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: AtomType::getPad
-///\brief Retrieves the padding type of the least and most-significant
-/// bit padding.
-///\param lsb - OUT: Least-significant bit padding type
-///\param msb - OUT: Most-significant bit padding type
-///\exception H5::DataTypeIException
+// Function: AtomType::getPad
+///\brief Retrieves the padding type of the least and most-significant
+/// bit padding.
+///\param lsb - OUT: Least-significant bit padding type
+///\param msb - OUT: Most-significant bit padding type
+///\exception H5::DataTypeIException
///\par Description
-/// Possible values for \a lsb and \a msb include:
-/// \li \c H5T_PAD_ZERO (0) - Set background to zeros.
-/// \li \c H5T_PAD_ONE (1) - Set background to ones.
-/// \li \c H5T_PAD_BACKGROUND (2) - Leave background alone.
-// Programmer Binh-Minh Ribler - 2000
+/// Possible values for \a lsb and \a msb include:
+/// \li \c H5T_PAD_ZERO (0) - Set background to zeros.
+/// \li \c H5T_PAD_ONE (1) - Set background to ones.
+/// \li \c H5T_PAD_BACKGROUND (2) - Leave background alone.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void AtomType::getPad( H5T_pad_t& lsb, H5T_pad_t& msb ) const
+void AtomType::getPad(H5T_pad_t& lsb, H5T_pad_t& msb) const
{
- // Call C routine to get the padding type
- herr_t ret_value = H5Tget_pad( id, &lsb, &msb );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("getPad"), "H5Tget_pad failed");
- }
+ // Call C routine to get the padding type
+ herr_t ret_value = H5Tget_pad(id, &lsb, &msb);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("getPad"), "H5Tget_pad failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: AtomType::setPad
-///\brief Sets the least and most-significant bits padding types.
-///\param lsb - IN: Least-significant bit padding type
-///\param msb - IN: Most-significant bit padding type
-///\exception H5::DataTypeIException
+// Function: AtomType::setPad
+///\brief Sets the least and most-significant bits padding types.
+///\param lsb - IN: Least-significant bit padding type
+///\param msb - IN: Most-significant bit padding type
+///\exception H5::DataTypeIException
///\par Description
-/// Valid values for \a lsb and \a msb include:
-/// \li \c H5T_PAD_ZERO (0) - Set background to zeros.
-/// \li \c H5T_PAD_ONE (1) - Set background to ones.
-/// \li \c H5T_PAD_BACKGROUND (2) - Leave background alone.
-// Programmer Binh-Minh Ribler - 2000
+/// Valid values for \a lsb and \a msb include:
+/// \li \c H5T_PAD_ZERO (0) - Set background to zeros.
+/// \li \c H5T_PAD_ONE (1) - Set background to ones.
+/// \li \c H5T_PAD_BACKGROUND (2) - Leave background alone.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void AtomType::setPad( H5T_pad_t lsb, H5T_pad_t msb ) const
+void AtomType::setPad(H5T_pad_t lsb, H5T_pad_t msb) const
{
- // Call C routine to set the padding type
- herr_t ret_value = H5Tset_pad( id, lsb, msb );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("setPad"), "H5Tset_pad failed");
- }
+ // Call C routine to set the padding type
+ herr_t ret_value = H5Tset_pad(id, lsb, msb);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("setPad"), "H5Tset_pad failed");
+ }
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: AtomType destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: AtomType destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
AtomType::~AtomType() {}
#endif // DOXYGEN_SHOULD_SKIP_THIS
diff --git a/c++/src/H5AtomType.h b/c++/src/H5AtomType.h
index d84b53f..0be3cb5 100644
--- a/c++/src/H5AtomType.h
+++ b/c++/src/H5AtomType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5AtomType_H
@@ -30,54 +28,56 @@ namespace H5 {
*/
class H5_DLLCPP AtomType : public DataType {
public:
- // Returns the byte order of an atomic datatype.
- H5T_order_t getOrder() const;
- H5T_order_t getOrder( H5std_string& order_string ) const;
+ // Returns the byte order of an atomic datatype.
+ H5T_order_t getOrder() const;
+ H5T_order_t getOrder(H5std_string& order_string) const;
- // Sets the byte ordering of an atomic datatype.
- void setOrder( H5T_order_t order ) const;
+ // Sets the byte ordering of an atomic datatype.
+ void setOrder(H5T_order_t order) const;
- // Retrieves the bit offset of the first significant bit.
- // 12/05/00 - changed return type to int from size_t - C API
- int getOffset() const;
+ // Retrieves the bit offset of the first significant bit.
+ // 12/05/00 - changed return type to int from size_t - C API
+ int getOffset() const;
- // Sets the bit offset of the first significant bit.
- void setOffset( size_t offset ) const;
+ // Sets the bit offset of the first significant bit.
+ void setOffset(size_t offset) const;
- // Retrieves the padding type of the least and most-significant bit padding.
- void getPad( H5T_pad_t& lsb, H5T_pad_t& msb ) const;
+ // Retrieves the padding type of the least and most-significant bit padding.
+ void getPad(H5T_pad_t& lsb, H5T_pad_t& msb) const;
- // Sets the least and most-significant bits padding types
- void setPad( H5T_pad_t lsb, H5T_pad_t msb ) const;
+ // Sets the least and most-significant bits padding types
+ void setPad(H5T_pad_t lsb, H5T_pad_t msb) const;
- // Returns the precision of an atomic datatype.
- size_t getPrecision() const;
+ // Returns the precision of an atomic datatype.
+ size_t getPrecision() const;
- // Sets the precision of an atomic datatype.
- void setPrecision( size_t precision ) const;
+ // Sets the precision of an atomic datatype.
+ void setPrecision(size_t precision) const;
- // Sets the total size for an atomic datatype.
- void setSize( size_t size ) const;
+ // Sets the total size for an atomic datatype.
+ void setSize(size_t size) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("AtomType"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("AtomType"); }
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Copy constructor - makes copy of the original object
- AtomType( const AtomType& original );
+ // Copy constructor - makes copy of the original object
+ AtomType(const AtomType& original);
- // Noop destructor
- virtual ~AtomType();
+ // Noop destructor
+ virtual ~AtomType();
#endif // DOXYGEN_SHOULD_SKIP_THIS
protected:
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Default constructor
- AtomType();
+ // Default constructor
+ AtomType();
- // Constructor that takes an existing id
- AtomType( const hid_t existing_id );
+ // Constructor that takes an existing id
+ AtomType(const hid_t existing_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+
+}; // end of AtomType
+} // namespace H5
+
#endif // __H5AtomType_H
diff --git a/c++/src/H5Attribute.cpp b/c++/src/H5Attribute.cpp
index 4daf174..cbf3029 100644
--- a/c++/src/H5Attribute.cpp
+++ b/c++/src/H5Attribute.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef OLD_HEADER_FILENAME
@@ -20,6 +18,7 @@
#endif
#include <string>
+#include "H5private.h" // for HDfree
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
@@ -28,15 +27,13 @@
#include "H5FcreatProp.h"
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5AbstractDs.h"
#include "H5DataType.h"
#include "H5DataSpace.h"
-#include "H5Group.h"
-#include "H5File.h"
#include "H5Attribute.h"
-#include "H5private.h" // for HDfree
namespace H5 {
using std::cerr;
@@ -45,17 +42,17 @@ using std::endl;
class H5_DLLCPP H5Object; // forward declaration for UserData4Aiterate
//--------------------------------------------------------------------------
-// Function: Attribute default constructor
-///\brief Default constructor: Creates a stub attribute
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: Attribute default constructor
+///\brief Default constructor: Creates a stub attribute
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
Attribute::Attribute() : AbstractDs(), H5Location(), id(H5I_INVALID_HID) {}
//--------------------------------------------------------------------------
-// Function: Attribute copy constructor
-///\brief Copy constructor: makes a copy of the original Attribute object.
-///\param original - IN: Original Attribute object to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: Attribute copy constructor
+///\brief Copy constructor: makes a copy of the original Attribute object.
+///\param original - IN: Original Attribute object to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
Attribute::Attribute(const Attribute& original) : AbstractDs(), H5Location(), id(original.id)
{
@@ -63,12 +60,12 @@ Attribute::Attribute(const Attribute& original) : AbstractDs(), H5Location(), id
}
//--------------------------------------------------------------------------
-// Function: Attribute overloaded constructor
-///\brief Creates an Attribute object using the id of an existing
-/// attribute.
-///\param existing_id - IN: Id of an existing attribute
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: Attribute overloaded constructor
+///\brief Creates an Attribute object using the id of an existing
+/// attribute.
+///\param existing_id - IN: Id of an existing attribute
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
Attribute::Attribute(const hid_t existing_id) : AbstractDs(), H5Location(), id(existing_id)
{
@@ -76,30 +73,30 @@ Attribute::Attribute(const hid_t existing_id) : AbstractDs(), H5Location(), id(e
}
//--------------------------------------------------------------------------
-// Function: Attribute::write
-///\brief Writes data to this attribute.
-///\param mem_type - IN: Attribute datatype (in memory)
-///\param buf - IN: Data to be written
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: Attribute::write
+///\brief Writes data to this attribute.
+///\param mem_type - IN: Attribute datatype (in memory)
+///\param buf - IN: Data to be written
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void Attribute::write( const DataType& mem_type, const void *buf ) const
+void Attribute::write(const DataType& mem_type, const void *buf) const
{
- herr_t ret_value = H5Awrite( id, mem_type.getId(), buf );
- if( ret_value < 0 )
- {
- throw AttributeIException("Attribute::write", "H5Awrite failed");
- }
+ herr_t ret_value = H5Awrite(id, mem_type.getId(), buf);
+ if (ret_value < 0)
+ {
+ throw AttributeIException("Attribute::write", "H5Awrite failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: Attribute::write
-///\brief This is an overloaded member function, provided for convenience.
-/// It writes a \a H5std_string to this attribute.
-///\param mem_type - IN: Attribute datatype (in memory)
-///\param strg - IN: Data to be written
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - Apr, 2003
+// Function: Attribute::write
+///\brief This is an overloaded member function, provided for convenience.
+/// It writes a \a H5std_string to this attribute.
+///\param mem_type - IN: Attribute datatype (in memory)
+///\param strg - IN: Data to be written
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - Apr, 2003
//--------------------------------------------------------------------------
void Attribute::write(const DataType& mem_type, const H5std_string& strg) const
{
@@ -108,7 +105,7 @@ void Attribute::write(const DataType& mem_type, const H5std_string& strg) const
htri_t is_variable_len = H5Tis_variable_str(mem_type.getId());
if (is_variable_len < 0)
{
- throw AttributeIException("Attribute::write", "H5Tis_variable_str failed");
+ throw AttributeIException("Attribute::write", "H5Tis_variable_str failed");
}
// Convert string to C-string
const char* strg_C;
@@ -118,55 +115,55 @@ void Attribute::write(const DataType& mem_type, const H5std_string& strg) const
// Pass string in differently depends on variable or fixed length
if (!is_variable_len)
{
- ret_value = H5Awrite(id, mem_type.getId(), strg_C);
+ ret_value = H5Awrite(id, mem_type.getId(), strg_C);
}
else
{
- // passing third argument by address
- ret_value = H5Awrite(id, mem_type.getId(), &strg_C);
+ // passing third argument by address
+ ret_value = H5Awrite(id, mem_type.getId(), &strg_C);
}
if (ret_value < 0)
{
- throw AttributeIException("Attribute::write", "H5Awrite failed");
+ throw AttributeIException("Attribute::write", "H5Awrite failed");
}
}
//--------------------------------------------------------------------------
-// Function: Attribute::read
-///\brief Reads data from this attribute.
-///\param mem_type - IN: Attribute datatype (in memory)
-///\param buf - OUT: Buffer for read data
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: Attribute::read
+///\brief Reads data from this attribute.
+///\param mem_type - IN: Attribute datatype (in memory)
+///\param buf - OUT: Buffer for read data
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void Attribute::read( const DataType& mem_type, void *buf ) const
+void Attribute::read(const DataType& mem_type, void *buf) const
{
- herr_t ret_value = H5Aread( id, mem_type.getId(), buf );
- if( ret_value < 0 )
- {
- throw AttributeIException("Attribute::read", "H5Aread failed");
- }
+ herr_t ret_value = H5Aread(id, mem_type.getId(), buf);
+ if (ret_value < 0)
+ {
+ throw AttributeIException("Attribute::read", "H5Aread failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: Attribute::read
-///\brief This is an overloaded member function, provided for convenience.
-/// It reads a \a H5std_string from this attribute.
-///\param mem_type - IN: Attribute datatype (in memory)
-///\param strg - IN: Buffer for read string
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - Apr, 2003
+// Function: Attribute::read
+///\brief This is an overloaded member function, provided for convenience.
+/// It reads a \a H5std_string from this attribute.
+///\param mem_type - IN: Attribute datatype (in memory)
+///\param strg - IN: Buffer for read string
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - Apr, 2003
// Modification
-// Mar 2008
-// Corrected a misunderstanding that H5Aread would allocate
-// space for the buffer. Obtained the attribute size and
-// allocated memory properly. -BMR
-// Apr 2009
-// Used getInMemDataSize to get attribute data size. -BMR
-// Jul 2009
-// Divided into specific private functions for fixed- and
-// variable-len string data: p_read_fixed_len and
-// p_read_variable_len. This should improve readability. -BMR
+// Mar 2008
+// Corrected a misunderstanding that H5Aread would allocate
+// space for the buffer. Obtained the attribute size and
+// allocated memory properly. -BMR
+// Apr 2009
+// Used getInMemDataSize to get attribute data size. -BMR
+// Jul 2009
+// Divided into specific private functions for fixed- and
+// variable-len string data: p_read_fixed_len and
+// p_read_variable_len. This should improve readability. -BMR
//--------------------------------------------------------------------------
void Attribute::read(const DataType& mem_type, H5std_string& strg) const
{
@@ -189,11 +186,11 @@ void Attribute::read(const DataType& mem_type, H5std_string& strg) const
}
//--------------------------------------------------------------------------
-// Function: Attribute::getInMemDataSize
-///\brief Gets the size in memory of the attribute's data.
-///\return Size of data (in memory)
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - Apr 2009
+// Function: Attribute::getInMemDataSize
+///\brief Gets the size in memory of the attribute's data.
+///\return Size of data (in memory)
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - Apr 2009
//--------------------------------------------------------------------------
size_t Attribute::getInMemDataSize() const
{
@@ -201,9 +198,9 @@ size_t Attribute::getInMemDataSize() const
// Get the data type of this attribute
hid_t mem_type_id = H5Aget_type(id);
- if( mem_type_id < 0 )
+ if (mem_type_id < 0)
{
- throw AttributeIException(func, "H5Aget_type failed");
+ throw AttributeIException(func, "H5Aget_type failed");
}
// Get the data type's size by first getting its native type then getting
@@ -211,22 +208,22 @@ size_t Attribute::getInMemDataSize() const
hid_t native_type = H5Tget_native_type(mem_type_id, H5T_DIR_DEFAULT);
if (native_type < 0)
{
- throw AttributeIException(func, "H5Tget_native_type failed");
+ throw AttributeIException(func, "H5Tget_native_type failed");
}
size_t type_size = H5Tget_size(native_type);
if (type_size == 0)
{
- throw AttributeIException(func, "H5Tget_size failed");
+ throw AttributeIException(func, "H5Tget_size failed");
}
// Close the native type and the datatype of this attribute.
if (H5Tclose(native_type) < 0)
{
- throw DataSetIException(func, "H5Tclose(native_type) failed");
+ throw DataSetIException(func, "H5Tclose(native_type) failed");
}
if (H5Tclose(mem_type_id) < 0)
{
- throw DataSetIException(func, "H5Tclose(mem_type_id) failed");
+ throw DataSetIException(func, "H5Tclose(mem_type_id) failed");
}
// Get number of elements of the attribute by first getting its dataspace
@@ -234,18 +231,18 @@ size_t Attribute::getInMemDataSize() const
hid_t space_id = H5Aget_space(id);
if (space_id < 0)
{
- throw AttributeIException(func, "H5Aget_space failed");
+ throw AttributeIException(func, "H5Aget_space failed");
}
hssize_t num_elements = H5Sget_simple_extent_npoints(space_id);
if (num_elements < 0)
{
- throw AttributeIException(func, "H5Sget_simple_extent_npoints failed");
+ throw AttributeIException(func, "H5Sget_simple_extent_npoints failed");
}
// Close the dataspace
if (H5Sclose(space_id) < 0)
{
- throw DataSetIException(func, "H5Sclose failed");
+ throw DataSetIException(func, "H5Sclose failed");
}
// Calculate and return the size of the data
@@ -254,46 +251,46 @@ size_t Attribute::getInMemDataSize() const
}
//--------------------------------------------------------------------------
-// Function: Attribute::getSpace
-///\brief Gets a copy of the dataspace for this attribute.
-///\return Dataspace instance
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: Attribute::getSpace
+///\brief Gets a copy of the dataspace for this attribute.
+///\return Dataspace instance
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataSpace Attribute::getSpace() const
{
// Calls C function H5Aget_space to get the id of the dataspace
- hid_t dataspace_id = H5Aget_space( id );
+ hid_t dataspace_id = H5Aget_space(id);
// If the dataspace id is valid, create and return the DataSpace object
- if( dataspace_id > 0 )
- {
- DataSpace dataspace;
- f_DataSpace_setId(&dataspace, dataspace_id);
- return(dataspace);
- }
+ if (dataspace_id > 0)
+ {
+ DataSpace dataspace;
+ f_DataSpace_setId(&dataspace, dataspace_id);
+ return(dataspace);
+ }
else
- {
- throw AttributeIException("Attribute::getSpace", "H5Aget_space failed");
- }
+ {
+ throw AttributeIException("Attribute::getSpace", "H5Aget_space failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: Attribute::getName
-///\brief Gets the name of this attribute, returning its length.
-///\param attr_name - OUT: Buffer for the name string as char*
-///\param buf_size - IN: Length of the buffer, default to 0
-///\return Actual length of the attribute name
-///\exception H5::AttributeIException
+// Function: Attribute::getName
+///\brief Gets the name of this attribute, returning its length.
+///\param attr_name - OUT: Buffer for the name string as char*
+///\param buf_size - IN: Length of the buffer, default to 0
+///\return Actual length of the attribute name
+///\exception H5::AttributeIException
///\par Description
-/// This function retrieves \a buf_size chars of the attribute's
-/// name including null termination. Thus, if the actual length
-/// of the name is more than buf_size-1, the retrieved name will
-/// be truncated to accommodate the null terminator.
-/// To get length of the attribute's name for buffer allocation,
-/// an application can call this function passing in NULL for the
-/// first argument and ignore the second argument.
-// Programmer Binh-Minh Ribler - Mar, 2014
+/// This function retrieves \a buf_size chars of the attribute's
+/// name including null termination. Thus, if the actual length
+/// of the name is more than buf_size-1, the retrieved name will
+/// be truncated to accommodate the null terminator.
+/// To get length of the attribute's name for buffer allocation,
+/// an application can call this function passing in NULL for the
+/// first argument and ignore the second argument.
+// Programmer Binh-Minh Ribler - Mar, 2014
//--------------------------------------------------------------------------
ssize_t Attribute::getName(char* attr_name, size_t buf_size) const
{
@@ -303,25 +300,25 @@ ssize_t Attribute::getName(char* attr_name, size_t buf_size) const
// If H5Aget_name returns a negative value, raise an exception
if (name_size < 0)
{
- throw AttributeIException("Attribute::getName", "H5Aget_name failed");
+ throw AttributeIException("Attribute::getName", "H5Aget_name failed");
}
else if (name_size == 0)
{
- throw AttributeIException("Attribute::getName", "Attribute must have a name, name length is 0");
+ throw AttributeIException("Attribute::getName", "Attribute must have a name, name length is 0");
}
// Return length of the name
return(name_size);
}
//--------------------------------------------------------------------------
-// Function: Attribute::getName
-///\brief Returns the name of this attribute as an \a H5std_string.
-///\return Name of the attribute
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: Attribute::getName
+///\brief Returns the name of this attribute as an \a H5std_string.
+///\return Name of the attribute
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - May, 2004
// Modification
-// Mar 2014 - BMR
-// Revised to use the modified getName() above
+// Mar 2014 - BMR
+// Revised to use the modified getName() above
//--------------------------------------------------------------------------
H5std_string Attribute::getName() const
{
@@ -333,11 +330,11 @@ H5std_string Attribute::getName() const
// If H5Aget_name failed, throw exception
if (name_size < 0)
{
- throw AttributeIException("Attribute::getName", "H5Aget_name failed");
+ throw AttributeIException("Attribute::getName", "H5Aget_name failed");
}
else if (name_size == 0)
{
- throw AttributeIException("Attribute::getName", "Attribute must have a name, name length is 0");
+ throw AttributeIException("Attribute::getName", "Attribute must have a name, name length is 0");
}
// Attribute's name exists, retrieve it
else if (name_size > 0)
@@ -360,44 +357,44 @@ H5std_string Attribute::getName() const
}
//--------------------------------------------------------------------------
-// Function: Attribute::getName
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an integer
-/// specifying a desired length to be retrieved of the name.
-///\return Name (or part of name) of the attribute
-///\param len - IN: Desired length of the name
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: Attribute::getName
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an integer
+/// specifying a desired length to be retrieved of the name.
+///\return Name (or part of name) of the attribute
+///\param len - IN: Desired length of the name
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Mar 2014 - BMR
-// Revised to use the new getName() below
+// Mar 2014 - BMR
+// Revised to use the new getName() below
//--------------------------------------------------------------------------
H5std_string Attribute::getName(size_t len) const
{
H5std_string attr_name;
ssize_t name_size = getName(attr_name, len);
if (name_size < 0)
- return("");
+ return("");
else
- return(attr_name);
+ return(attr_name);
}
//--------------------------------------------------------------------------
-// Function: Attribute::getName
-///\brief Gets the name of this attribute, returning its length.
-///\param attr_name - OUT: Buffer for the name string as \a H5std_string
-///\param len - IN: Desired length of the name, default to 0
-///\return Actual length of the attribute name
-///\exception H5::AttributeIException
+// Function: Attribute::getName
+///\brief Gets the name of this attribute, returning its length.
+///\param attr_name - OUT: Buffer for the name string as \a H5std_string
+///\param len - IN: Desired length of the name, default to 0
+///\return Actual length of the attribute name
+///\exception H5::AttributeIException
///\par Description
-/// This function retrieves the attribute's name as a string. The
-/// buf_size can specify a specific length or default to 0, in
-/// which case the entire name will be retrieved.
-// Programmer Binh-Minh Ribler - Nov, 2001
+/// This function retrieves the attribute's name as a string. The
+/// buf_size can specify a specific length or default to 0, in
+/// which case the entire name will be retrieved.
+// Programmer Binh-Minh Ribler - Nov, 2001
// Modification
-// Mar 2014 - BMR
-// Added to replace getName(size_t, H5std_string&) so that it'll
-// allow the argument "len" to be skipped.
+// Mar 2014 - BMR
+// Added to replace getName(size_t, H5std_string&) so that it'll
+// allow the argument "len" to be skipped.
//--------------------------------------------------------------------------
ssize_t Attribute::getName(H5std_string& attr_name, size_t len) const
{
@@ -407,7 +404,7 @@ ssize_t Attribute::getName(H5std_string& attr_name, size_t len) const
if (len == 0)
{
attr_name = getName();
- name_size = attr_name.length();
+ name_size = attr_name.length();
}
// If length is provided, get that number of characters in name
else
@@ -431,33 +428,33 @@ ssize_t Attribute::getName(H5std_string& attr_name, size_t len) const
}
//--------------------------------------------------------------------------
-// Function: Attribute::getName
-// Purpose This function is replaced by the previous function, which
-// provides more convenient prototype. It will be removed
-// in future release.
-// Param len - IN: Desired length of the name
-// Param attr_name - OUT: Buffer for the name string
-// Return Actual length of the attribute name
-// Exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - Nov, 2001
+// Function: Attribute::getName
+// Purpose This function is replaced by the previous function, which
+// provides more convenient prototype. It will be removed
+// in future release.
+// Param len - IN: Desired length of the name
+// Param attr_name - OUT: Buffer for the name string
+// Return Actual length of the attribute name
+// Exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - Nov, 2001
// Modification
-// Modified to call its replacement. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Modified to call its replacement. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
-//ssize_t Attribute::getName( size_t len, H5std_string& attr_name ) const
+//ssize_t Attribute::getName(size_t len, H5std_string& attr_name) const
//{
// return (getName(attr_name, len));
//}
//--------------------------------------------------------------------------
-// Function: Attribute::getStorageSize
-///\brief Returns the amount of storage size required for this attribute.
-///\return Size of the storage or 0, for no data
-///\exception H5::AttributeIException
-// Note: H5Dget_storage_size returns 0 when there is no data. This
-// function should have no failure. (from SLU)
-// Programmer Binh-Minh Ribler - Mar, 2005
+// Function: Attribute::getStorageSize
+///\brief Returns the amount of storage size required for this attribute.
+///\return Size of the storage or 0, for no data
+///\exception H5::AttributeIException
+// Note: H5Dget_storage_size returns 0 when there is no data. This
+// function should have no failure. (from SLU)
+// Programmer Binh-Minh Ribler - Mar, 2005
//--------------------------------------------------------------------------
hsize_t Attribute::getStorageSize() const
{
@@ -467,19 +464,19 @@ hsize_t Attribute::getStorageSize() const
//--------------------------------------------------------------------------
// Function: Attribute::getId
-///\brief Get the id of this attribute
-///\return Attribute identifier
+///\brief Get the id of this attribute
+///\return Attribute identifier
// Description:
-// Class hierarchy is revised to address bugzilla 1068. Class
-// AbstractDS and Attribute are moved out of H5Object. In
-// addition, member IdComponent::id is moved into subclasses, and
-// IdComponent::getId now becomes pure virtual function.
+// Class hierarchy is revised to address bugzilla 1068. Class
+// AbstractDS and Attribute are moved out of H5Object. In
+// addition, member IdComponent::id is moved into subclasses, and
+// IdComponent::getId now becomes pure virtual function.
// Programmer Binh-Minh Ribler - May, 2008
// Modification
-// Aug 2016 - BMR
-// Note that Attribute is now inheriting from H5Location, because
-// an attribute id can be used to specify a location in HDF5
-// library.
+// Aug 2016 - BMR
+// Note that Attribute is now inheriting from H5Location, because
+// an attribute id can be used to specify a location in HDF5
+// library.
//--------------------------------------------------------------------------
hid_t Attribute::getId() const
{
@@ -487,36 +484,36 @@ hid_t Attribute::getId() const
}
//--------------------------------------------------------------------------
-// Function: Attribute::p_get_type (private)
-// Purpose Gets the datatype of this attribute.
-// Return Id of the datatype
-// Exception H5::AttributeIException
+// Function: Attribute::p_get_type (private)
+// Purpose Gets the datatype of this attribute.
+// Return Id of the datatype
+// Exception H5::AttributeIException
// Description
-// This private function is used in AbstractDs.
-// Programmer Binh-Minh Ribler - 2000
+// This private function is used in AbstractDs.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
hid_t Attribute::p_get_type() const
{
- hid_t type_id = H5Aget_type( id );
- if( type_id > 0 )
- return( type_id );
- else
- {
- throw AttributeIException("", "H5Aget_type failed");
- }
+ hid_t type_id = H5Aget_type(id);
+ if (type_id > 0)
+ return(type_id);
+ else
+ {
+ throw AttributeIException("", "H5Aget_type failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: Attribute::p_read_fixed_len (private)
-// brief Reads a fixed length \a H5std_string from an attribute.
-// param mem_type - IN: Attribute datatype (in memory)
-// param strg - IN: Buffer for read string
-// exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - Jul, 2009
+// Function: Attribute::p_read_fixed_len (private)
+// brief Reads a fixed length \a H5std_string from an attribute.
+// param mem_type - IN: Attribute datatype (in memory)
+// param strg - IN: Buffer for read string
+// exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - Jul, 2009
// Modification
-// Jul 2009
-// Separated the fixed length case from the original
-// Attribute::read
+// Jul 2009
+// Separated the fixed length case from the original
+// Attribute::read
//--------------------------------------------------------------------------
void Attribute::p_read_fixed_len(const DataType& mem_type, H5std_string& strg) const
{
@@ -528,31 +525,31 @@ void Attribute::p_read_fixed_len(const DataType& mem_type, H5std_string& strg) c
// If there is data, allocate buffer and read it.
if (attr_size > 0)
{
- char *strg_C = new char[attr_size+1];
- herr_t ret_value = H5Aread(id, mem_type.getId(), strg_C);
- if( ret_value < 0 )
- {
- delete []strg_C; // de-allocate for fixed-len string
- throw AttributeIException("Attribute::read", "H5Aread failed");
- }
- // Get string from the C char* and release resource allocated locally
- strg_C[attr_size] = '\0';
- strg = strg_C;
- delete []strg_C;
+ char *strg_C = new char[attr_size+1];
+ herr_t ret_value = H5Aread(id, mem_type.getId(), strg_C);
+ if (ret_value < 0)
+ {
+ delete []strg_C; // de-allocate for fixed-len string
+ throw AttributeIException("Attribute::read", "H5Aread failed");
+ }
+ // Get string from the C char* and release resource allocated locally
+ strg_C[attr_size] = '\0';
+ strg = strg_C;
+ delete []strg_C;
}
}
//--------------------------------------------------------------------------
-// Function: Attribute::p_read_variable_len (private)
-// brief Reads a variable length \a H5std_string from an attribute.
-// param mem_type - IN: Attribute datatype (in memory)
-// param strg - IN: Buffer for read string
-// exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - Jul, 2009
+// Function: Attribute::p_read_variable_len (private)
+// brief Reads a variable length \a H5std_string from an attribute.
+// param mem_type - IN: Attribute datatype (in memory)
+// param strg - IN: Buffer for read string
+// exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - Jul, 2009
// Modification
-// Jul 2009
-// Separated the variable length case from the original
-// Attribute::read. -BMR
+// Jul 2009
+// Separated the variable length case from the original
+// Attribute::read. -BMR
//--------------------------------------------------------------------------
void Attribute::p_read_variable_len(const DataType& mem_type, H5std_string& strg) const
{
@@ -562,9 +559,9 @@ void Attribute::p_read_variable_len(const DataType& mem_type, H5std_string& strg
// Read attribute, no allocation for variable-len string; C library will
herr_t ret_value = H5Aread(id, mem_type.getId(), &strg_C);
- if( ret_value < 0 )
+ if (ret_value < 0)
{
- throw AttributeIException("Attribute::read", "H5Aread failed");
+ throw AttributeIException("Attribute::read", "H5Aread failed");
}
// Get string from the C char* and release resource allocated by C API
@@ -578,65 +575,65 @@ void Attribute::p_read_variable_len(const DataType& mem_type, H5std_string& strg
///\brief Sets the identifier of this object to a new value.
///
///\exception H5::IdComponentException when the attempt to close the HDF5
-/// object fails
+/// object fails
// Description:
-// The underlaying reference counting in the C library ensures
-// that the current valid id of this object is properly closed.
-// Then the object's id is reset to the new id.
+// The underlaying reference counting in the C library ensures
+// that the current valid id of this object is properly closed.
+// Then the object's id is reset to the new id.
// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void Attribute::p_setId(const hid_t new_id)
{
// handling references to this old id
try {
- close();
+ close();
}
catch (Exception& close_error) {
- throw AttributeIException("Attribute::p_setId", close_error.getDetailMsg());
+ throw AttributeIException("Attribute::p_setId", close_error.getDetailMsg());
}
- // reset object's id to the given id
- id = new_id;
+ // reset object's id to the given id
+ id = new_id;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: Attribute::close
-///\brief Closes this attribute.
+// Function: Attribute::close
+///\brief Closes this attribute.
///
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - Mar 9, 2005
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - Mar 9, 2005
//--------------------------------------------------------------------------
void Attribute::close()
{
if (p_valid_id(id))
{
- herr_t ret_value = H5Aclose(id);
- if( ret_value < 0 )
- {
- throw AttributeIException("Attribute::close", "H5Aclose failed");
- }
- // reset the id
- id = H5I_INVALID_HID;
+ herr_t ret_value = H5Aclose(id);
+ if (ret_value < 0)
+ {
+ throw AttributeIException("Attribute::close", "H5Aclose failed");
+ }
+ // reset the id
+ id = H5I_INVALID_HID;
}
}
//--------------------------------------------------------------------------
-// Function: Attribute destructor
-///\brief Properly terminates access to this attribute.
-// Programmer Binh-Minh Ribler - 2000
+// Function: Attribute destructor
+///\brief Properly terminates access to this attribute.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Jun 1, 2004
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Jun 1, 2004
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
Attribute::~Attribute()
{
try {
- close();
+ close();
}
catch (Exception& close_error) {
- cerr << "Attribute::~Attribute - " << close_error.getDetailMsg() << endl;
+ cerr << "Attribute::~Attribute - " << close_error.getDetailMsg() << endl;
}
}
diff --git a/c++/src/H5Attribute.h b/c++/src/H5Attribute.h
index c27bbdf..41e74d9 100644
--- a/c++/src/H5Attribute.h
+++ b/c++/src/H5Attribute.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5Attribute_H
@@ -32,75 +30,76 @@ namespace H5 {
class H5_DLLCPP Attribute : public AbstractDs, public H5Location {
public:
- // Copy constructor: makes a copy of an existing Attribute object.
- Attribute( const Attribute& original );
+ // Copy constructor: makes a copy of an existing Attribute object.
+ Attribute(const Attribute& original);
- // Default constructor
- Attribute();
+ // Default constructor
+ Attribute();
- // Creates a copy of an existing attribute using the attribute id
- Attribute( const hid_t attr_id );
+ // Creates a copy of an existing attribute using the attribute id
+ Attribute(const hid_t attr_id);
- // Closes this attribute.
- virtual void close();
+ // Closes this attribute.
+ virtual void close();
- // Gets the name of this attribute.
- ssize_t getName(char* attr_name, size_t buf_size = 0) const;
- H5std_string getName(size_t len) const;
- H5std_string getName() const;
- ssize_t getName(H5std_string& attr_name, size_t len = 0) const;
- // The overloaded function below is replaced by the one above and it
- // is kept for backward compatibility purpose.
- ssize_t getName( size_t buf_size, H5std_string& attr_name ) const;
+ // Gets the name of this attribute.
+ ssize_t getName(char* attr_name, size_t buf_size = 0) const;
+ H5std_string getName(size_t len) const;
+ H5std_string getName() const;
+ ssize_t getName(H5std_string& attr_name, size_t len = 0) const;
+ // The overloaded function below is replaced by the one above and it
+ // is kept for backward compatibility purpose.
+ ssize_t getName(size_t buf_size, H5std_string& attr_name) const;
- // Gets a copy of the dataspace for this attribute.
- virtual DataSpace getSpace() const;
+ // Gets a copy of the dataspace for this attribute.
+ virtual DataSpace getSpace() const;
- // Returns the amount of storage size required for this attribute.
- virtual hsize_t getStorageSize() const;
+ // Returns the amount of storage size required for this attribute.
+ virtual hsize_t getStorageSize() const;
- // Returns the in memory size of this attribute's data.
- virtual size_t getInMemDataSize() const;
+ // Returns the in memory size of this attribute's data.
+ virtual size_t getInMemDataSize() const;
- // Reads data from this attribute.
- void read( const DataType& mem_type, void *buf ) const;
- void read( const DataType& mem_type, H5std_string& strg ) const;
+ // Reads data from this attribute.
+ void read(const DataType& mem_type, void *buf) const;
+ void read(const DataType& mem_type, H5std_string& strg) const;
- // Writes data to this attribute.
- void write(const DataType& mem_type, const void *buf ) const;
- void write(const DataType& mem_type, const H5std_string& strg ) const;
+ // Writes data to this attribute.
+ void write(const DataType& mem_type, const void *buf) const;
+ void write(const DataType& mem_type, const H5std_string& strg) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("Attribute"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("Attribute"); }
- // Gets the attribute id.
- virtual hid_t getId() const;
+ // Gets the attribute id.
+ virtual hid_t getId() const;
- // Destructor: properly terminates access to this attribute.
- virtual ~Attribute();
+ // Destructor: properly terminates access to this attribute.
+ virtual ~Attribute();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
protected:
- // Sets the attribute id.
- virtual void p_setId(const hid_t new_id);
+ // Sets the attribute id.
+ virtual void p_setId(const hid_t new_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
private:
- hid_t id; // HDF5 attribute id
+ hid_t id; // HDF5 attribute id
- // This function contains the common code that is used by
- // getTypeClass and various API functions getXxxType
- // defined in AbstractDs for generic datatype and specific
- // sub-types
- virtual hid_t p_get_type() const;
+ // This function contains the common code that is used by
+ // getTypeClass and various API functions getXxxType
+ // defined in AbstractDs for generic datatype and specific
+ // sub-types
+ virtual hid_t p_get_type() const;
- // Reads variable or fixed len strings from this attribute.
- void p_read_variable_len(const DataType& mem_type, H5std_string& strg) const;
- void p_read_fixed_len(const DataType& mem_type, H5std_string& strg) const;
+ // Reads variable or fixed len strings from this attribute.
+ void p_read_variable_len(const DataType& mem_type, H5std_string& strg) const;
+ void p_read_fixed_len(const DataType& mem_type, H5std_string& strg) const;
- // Friend function to set Attribute id. For library use only.
- friend void f_Attribute_setId(Attribute* attr, hid_t new_id);
+ // Friend function to set Attribute id. For library use only.
+ friend void f_Attribute_setId(Attribute* attr, hid_t new_id);
+
+}; // end of Attribute
+} // namespace H5
-};
-}
#endif // __H5Attribute_H
diff --git a/c++/src/H5Classes.h b/c++/src/H5Classes.h
index bed0cae..f0f6359 100644
--- a/c++/src/H5Classes.h
+++ b/c++/src/H5Classes.h
@@ -6,42 +6,41 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5Classes_H
#define __H5Classes_H
namespace H5 {
- class Exception;
- class IdComponent;
- class H5Location;
- class H5Object;
- class PropList;
- class FileCreatPropList;
- class FileAccPropList;
- class DSetCreatPropList;
- class DSetMemXferPropList;
- class DTypePropList;
- class DataType;
- class DataSpace;
- class AtomType;
- class PredType;
- class EnumType;
- class IntType;
- class FloatType;
- class StrType;
- class CompType;
- class AbstractDs;
- class DataSet;
- class Group;
- class H5File;
- class Attribute;
- class H5Library;
+ class Exception;
+ class IdComponent;
+ class H5Location;
+ class H5Object;
+ class PropList;
+ class FileCreatPropList;
+ class FileAccPropList;
+ class LinkAccPropList;
+ class DSetCreatPropList;
+ class DSetMemXferPropList;
+ class DTypePropList;
+ class DataType;
+ class DataSpace;
+ class AtomType;
+ class PredType;
+ class EnumType;
+ class IntType;
+ class FloatType;
+ class StrType;
+ class CompType;
+ class AbstractDs;
+ class DataSet;
+ class Group;
+ class H5File;
+ class Attribute;
+ class H5Library;
}
#endif // __H5Classes_H
diff --git a/c++/src/H5CommonFG.cpp b/c++/src/H5CommonFG.cpp
index a1e0c3f..c9c203a 100644
--- a/c++/src/H5CommonFG.cpp
+++ b/c++/src/H5CommonFG.cpp
@@ -5,35 +5,30 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
#include "H5Include.h"
+#include "H5private.h" // for HDstrcpy
#include "H5Exception.h"
#include "H5IdComponent.h"
+#include "H5DataSpace.h"
#include "H5PropList.h"
-#include "H5FaccProp.h"
-#include "H5FcreatProp.h"
-#include "H5OcreatProp.h"
#include "H5DxferProp.h"
+#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
-#include "H5CommonFG.h"
-#include "H5Group.h"
+#include "H5Alltypes.h"
#include "H5AbstractDs.h"
-#include "H5DataSpace.h"
#include "H5DataSet.h"
-#include "H5File.h"
-#include "H5Alltypes.h"
-#include "H5private.h" // for HDstrcpy
+#include "H5CommonFG.h"
// There are a few comments that are common to most of the functions
// defined in this file so they are listed here.
@@ -43,28 +38,345 @@
// to call the right getId() - although, as the structure of the
// library at this time, getId() is basically the IdComponent::getId()
// - when a failure returned by the C API, the functions will call
-// throwwException, which is a pure virtual function and is implemented
+// throwException, which is a pure virtual function and is implemented
// by H5File to throw a FileIException and by Group to throw a
// GroupIException.
// December 2000
namespace H5 {
+//--------------------------------------------------------------------------
+// Function: CommonFG::openDataType
+///\brief Opens the named generic datatype at this location.
+///\param name - IN: Name of the datatype to open
+///\return DataType instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+DataType CommonFG::openDataType(const char* name) const
+{
+ // Call C function H5Topen2 to open the named datatype in this group,
+ // given either the file or group id
+ hid_t type_id = H5Topen2(getLocId(), name, H5P_DEFAULT);
+
+ // If the datatype's opening failed, throw an exception
+ if (type_id < 0)
+ throwException("openDataType", "H5Topen2 failed");
+
+ // No failure, create and return the DataType object
+ DataType data_type(type_id);
+ return(data_type);
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openDataType
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+DataType CommonFG::openDataType(const H5std_string& name) const
+{
+ return(openDataType(name.c_str()));
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openArrayType
+///\brief Opens the named array datatype at this location.
+///\param name - IN: Name of the array datatype to open
+///\return ArrayType instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - Jul, 2005
+//--------------------------------------------------------------------------
+ArrayType CommonFG::openArrayType(const char* name) const
+{
+ // Call C function H5Topen2 to open the named datatype in this group,
+ // given either the file or group id
+ hid_t type_id = H5Topen2(getLocId(), name, H5P_DEFAULT);
+
+ // If the datatype's opening failed, throw an exception
+ if (type_id < 0)
+ throwException("openArrayType", "H5Topen2 failed");
+
+ // No failure, create and return the ArrayType object
+ ArrayType array_type;
+ f_DataType_setId(&array_type, type_id);
+ return(array_type);
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openArrayType
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - Jul, 2005
+//--------------------------------------------------------------------------
+ArrayType CommonFG::openArrayType(const H5std_string& name) const
+{
+ return(openArrayType(name.c_str()));
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openCompType
+///\brief Opens the named compound datatype at this location.
+///\param name - IN: Name of the compound datatype to open
+///\return CompType instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+CompType CommonFG::openCompType(const char* name) const
+{
+ // Call C function H5Topen2 to open the named datatype in this group,
+ // given either the file or group id
+ hid_t type_id = H5Topen2(getLocId(), name, H5P_DEFAULT);
+
+ // If the datatype's opening failed, throw an exception
+ if (type_id < 0)
+ throwException("openCompType", "H5Topen2 failed");
+
+ // No failure, create and return the CompType object
+ CompType comp_type;
+ f_DataType_setId(&comp_type, type_id);
+ return(comp_type);
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openCompType
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+CompType CommonFG::openCompType(const H5std_string& name) const
+{
+ return(openCompType(name.c_str()));
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openEnumType
+///\brief Opens the named enumeration datatype at this location.
+///\param name - IN: Name of the enumeration datatype to open
+///\return EnumType instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+EnumType CommonFG::openEnumType(const char* name) const
+{
+ // Call C function H5Topen2 to open the named datatype in this group,
+ // given either the file or group id
+ hid_t type_id = H5Topen2(getLocId(), name, H5P_DEFAULT);
+
+ // If the datatype's opening failed, throw an exception
+ if (type_id < 0)
+ throwException("openEnumType", "H5Topen2 failed");
+
+ // No failure, create and return the EnumType object
+ EnumType enum_type;
+ f_DataType_setId(&enum_type, type_id);
+ return(enum_type);
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openEnumType
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+EnumType CommonFG::openEnumType(const H5std_string& name) const
+{
+ return(openEnumType(name.c_str()));
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openIntType
+///\brief Opens the named integer datatype at this location.
+///\param name - IN: Name of the integer datatype to open
+///\return IntType instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+IntType CommonFG::openIntType(const char* name) const
+{
+ // Call C function H5Topen2 to open the named datatype in this group,
+ // given either the file or group id
+ hid_t type_id = H5Topen2(getLocId(), name, H5P_DEFAULT);
+
+ // If the datatype's opening failed, throw an exception
+ if (type_id < 0)
+ throwException("openIntType", "H5Topen2 failed");
+
+ // No failure, create and return the IntType object
+ IntType int_type;
+ f_DataType_setId(&int_type, type_id);
+ return(int_type);
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openIntType
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+IntType CommonFG::openIntType(const H5std_string& name) const
+{
+ return(openIntType(name.c_str()));
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openFloatType
+///\brief Opens the named floating-point datatype at this location.
+///\param name - IN: Name of the floating-point datatype to open
+///\return FloatType instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+FloatType CommonFG::openFloatType(const char* name) const
+{
+ // Call C function H5Topen2 to open the named datatype in this group,
+ // given either the file or group id
+ hid_t type_id = H5Topen2(getLocId(), name, H5P_DEFAULT);
+
+ // If the datatype's opening failed, throw an exception
+ if (type_id < 0)
+ throwException("openFloatType", "H5Topen2 failed");
+
+ // No failure, create and return the FloatType object
+ FloatType float_type;
+ f_DataType_setId(&float_type, type_id);
+ return(float_type);
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openFloatType
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+FloatType CommonFG::openFloatType(const H5std_string& name) const
+{
+ return(openFloatType(name.c_str()));
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openStrType
+///\brief Opens the named string datatype at this location.
+///\param name - IN: Name of the string datatype to open
+///\return StrType instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+StrType CommonFG::openStrType(const char* name) const
+{
+ // Call C function H5Topen2 to open the named datatype in this group,
+ // given either the file or group id
+ hid_t type_id = H5Topen2(getLocId(), name, H5P_DEFAULT);
+
+ // If the datatype's opening failed, throw an exception
+ if (type_id < 0)
+ throwException("openStrType", "H5Topen2 failed");
+
+ // No failure, create and return the StrType object
+ StrType str_type;
+ f_DataType_setId(&str_type, type_id);
+ return(str_type);
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openStrType
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+StrType CommonFG::openStrType(const H5std_string& name) const
+{
+ return(openStrType(name.c_str()));
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openVarLenType
+///\brief Opens the named variable length datatype at this location.
+///\param name - IN: Name of the variable length datatype to open
+///\return VarLenType instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - Jul, 2005
+//--------------------------------------------------------------------------
+VarLenType CommonFG::openVarLenType(const char* name) const
+{
+ // Call C function H5Topen2 to open the named datatype in this group,
+ // given either the file or group id
+ hid_t type_id = H5Topen2(getLocId(), name, H5P_DEFAULT);
+
+ // If the datatype's opening failed, throw an exception
+ if (type_id < 0)
+ throwException("openVarLenType", "H5Topen2 failed");
+
+ // No failure, create and return the VarLenType object
+ VarLenType varlen_type;
+ f_DataType_setId(&varlen_type, type_id);
+ return(varlen_type);
+}
+
+//--------------------------------------------------------------------------
+// Function: CommonFG::openVarLenType
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - Jul, 2005
+//--------------------------------------------------------------------------
+VarLenType CommonFG::openVarLenType(const H5std_string& name) const
+{
+ return(openVarLenType(name.c_str()));
+}
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: CommonFG default constructor
-///\brief Default constructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: CommonFG default constructor
+///\brief Default constructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
CommonFG::CommonFG() {}
//--------------------------------------------------------------------------
-// Function: CommonFG destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: CommonFG destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
CommonFG::~CommonFG() {}
-#endif // DOXYGEN_SHOULD_SKIP_THIS
+//--------------------------------------------------------------------------
+// Function: f_DataType_setId - friend
+// Purpose: This function is friend to class H5::DataType so that it
+// can set DataType::id in order to work around a problem
+// described in the JIRA issue HDFFV-7947.
+// Applications shouldn't need to use it.
+// param dtype - IN/OUT: DataType object to be changed
+// param new_id - IN: New id to set
+// Programmer Binh-Minh Ribler - 2015
+//--------------------------------------------------------------------------
+void f_DataType_setId(DataType* dtype, hid_t new_id)
+{
+ dtype->p_setId(new_id);
+}
+//--------------------------------------------------------------------------
+// Function: f_DataSet_setId - friend
+// Purpose: This function is friend to class H5::DataSet so that it
+// can set DataSet::id in order to work around a problem
+// described in the JIRA issue HDFFV-7947.
+// Applications shouldn't need to use it.
+// param dset - IN/OUT: DataSet object to be changed
+// param new_id - IN: New id to set
+// Programmer Binh-Minh Ribler - 2015
+//--------------------------------------------------------------------------
+void f_DataSet_setId(DataSet* dset, hid_t new_id)
+{
+ dset->p_setId(new_id);
}
+
+#endif // DOXYGEN_SHOULD_SKIP_THIS
+
+} // end namespace
diff --git a/c++/src/H5CommonFG.h b/c++/src/H5CommonFG.h
index a68d586..3cbad76 100644
--- a/c++/src/H5CommonFG.h
+++ b/c++/src/H5CommonFG.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __CommonFG_H
@@ -19,28 +17,73 @@
namespace H5 {
+// Class forwarding
+class Group;
+class H5File;
+class ArrayType;
+class VarLenType;
+
/*! \class CommonFG
- \brief \a CommonFG was an abstract base class of H5File and H5Group.
+ \brief \a CommonFG is an abstract base class of H5File and H5Group.
- It provided common operations of H5File and H5Group.
- In release 1.10.1, the class structure is modified.
- As a result, member functions of CommonFG are moved to Group.
+ It provides common operations of H5File and H5Group.
*/
class H5_DLLCPP CommonFG {
public:
+ // Opens a generic named datatype in this location.
+ DataType openDataType(const char* name) const;
+ DataType openDataType(const H5std_string& name) const;
+
+ // Opens a named array datatype in this location.
+ ArrayType openArrayType(const char* name) const;
+ ArrayType openArrayType(const H5std_string& name) const;
+
+ // Opens a named compound datatype in this location.
+ CompType openCompType(const char* name) const;
+ CompType openCompType(const H5std_string& name) const;
+
+ // Opens a named enumeration datatype in this location.
+ EnumType openEnumType(const char* name) const;
+ EnumType openEnumType(const H5std_string& name) const;
+
+ // Opens a named integer datatype in this location.
+ IntType openIntType(const char* name) const;
+ IntType openIntType(const H5std_string& name) const;
+
+ // Opens a named floating-point datatype in this location.
+ FloatType openFloatType(const char* name) const;
+ FloatType openFloatType(const H5std_string& name) const;
+
+ // Opens a named string datatype in this location.
+ StrType openStrType(const char* name) const;
+ StrType openStrType(const H5std_string& name) const;
+
+ // Opens a named variable length datatype in this location.
+ VarLenType openVarLenType(const char* name) const;
+ VarLenType openVarLenType(const H5std_string& name) const;
+
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Default constructor.
- CommonFG();
+ /// For subclasses, H5File and Group, to return the correct
+ /// object id, i.e. file or group id.
+ virtual hid_t getLocId() const = 0;
+
- // Noop destructor.
- virtual ~CommonFG();
+ /// For subclasses, H5File and Group, to throw appropriate exception.
+ virtual void throwException(const H5std_string& func_name, const H5std_string& msg) const = 0;
+
+ // Default constructor.
+ CommonFG();
+
+ // Noop destructor.
+ virtual ~CommonFG();
protected:
- virtual void p_setId(const hid_t new_id) = 0;
+ virtual void p_setId(const hid_t new_id) = 0;
#endif // DOXYGEN_SHOULD_SKIP_THIS
-}; // end of CommonFG declaration
-}
+}; // end of CommonFG
+} // namespace H5
+
#endif // __CommonFG_H
diff --git a/c++/src/H5CompType.cpp b/c++/src/H5CompType.cpp
index 784d171..f7862c9 100644
--- a/c++/src/H5CompType.cpp
+++ b/c++/src/H5CompType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -22,178 +20,216 @@
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5Alltypes.h"
#include "H5AbstractDs.h"
#include "H5DataSpace.h"
#include "H5DataSet.h"
-#include "H5private.h"
namespace H5 {
//--------------------------------------------------------------------------
-// Function: CompType default constructor
-///\brief Default constructor: Creates a stub compound datatype
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType default constructor
+///\brief Default constructor: Creates a stub compound datatype
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
CompType::CompType() : DataType() {}
//--------------------------------------------------------------------------
-// Function: CompType copy constructor
-///\brief Copy constructor: makes copy of the original CompType object
-///\param original - IN: Original CompType instance
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType copy constructor
+///\brief Copy constructor: makes copy of the original CompType object
+///\param original - IN: Original CompType instance
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-CompType::CompType( const CompType& original ) : DataType( original ) {}
+CompType::CompType(const CompType& original) : DataType(original) {}
//--------------------------------------------------------------------------
-// Function: CompType overloaded constructor
-///\brief Creates a CompType object using the id of an existing datatype.
-///\param existing_id - IN: Id of an existing compound datatype
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType overloaded constructor
+///\brief Creates a CompType object using the id of an existing datatype.
+///\param existing_id - IN: Id of an existing compound datatype
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-CompType::CompType( const hid_t existing_id ) : DataType( existing_id ) {}
+CompType::CompType(const hid_t existing_id) : DataType(existing_id) {}
//--------------------------------------------------------------------------
-// Function: CompType overloaded constructor
-///\brief Creates an empty compound datatype given a size, in bytes.
-///\param size - IN: Number of bytes in the datatype to create
-///\exception H5::DataTypeIException
+// Function: CompType overloaded constructor
+///\brief Creates an empty compound datatype given a size, in bytes.
+///\param size - IN: Number of bytes in the datatype to create
+///\exception H5::DataTypeIException
// Description
-// The DataType constructor calls the C API H5Tcreate to create
-// the compound datatype.
-// Programmer Binh-Minh Ribler - 2000
+// The DataType constructor calls the C API H5Tcreate to create
+// the compound datatype.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-CompType::CompType( size_t size ) : DataType( H5T_COMPOUND, size ) {}
+CompType::CompType(size_t size) : DataType(H5T_COMPOUND, size) {}
//--------------------------------------------------------------------------
-// Function: CompType overloaded constructor
-///\brief Gets the compound datatype of the specified dataset.
-///\param dataset - IN: Dataset that this enum datatype associates with
-///\return CompType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType overloaded constructor
+///\brief Gets the compound datatype of the specified dataset.
+///\param dataset - IN: Dataset that this enum datatype associates with
+///\return CompType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-CompType::CompType( const DataSet& dataset ) : DataType()
+CompType::CompType(const DataSet& dataset) : DataType()
{
- // Calls C function H5Dget_type to get the id of the datatype
- id = H5Dget_type( dataset.getId() );
-
- // If the datatype id is invalid, throw exception
- if( id < 0 )
- {
- throw DataSetIException("CompType constructor", "H5Dget_type failed");
- }
+ // Calls C function H5Dget_type to get the id of the datatype
+ id = H5Dget_type(dataset.getId());
+
+ // If the datatype id is invalid, throw exception
+ if (id < 0)
+ {
+ throw DataSetIException("CompType constructor", "H5Dget_type failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getNmembers
-///\brief Returns the number of members in this compound datatype.
-///\return Number of members
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType overloaded constructor
+///\brief Creates an CompType instance by opening an HDF5 compound
+/// given its name, provided as a C character string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Compound type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openCompType(const char*) to
+// improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+CompType::CompType(const H5Location& loc, const char *dtype_name) : DataType()
+{
+ id = p_opentype(loc, dtype_name);
+}
+
+//--------------------------------------------------------------------------
+// Function: CompType overloaded constructor
+///\brief Creates an CompType instance by opening an HDF5 compound
+/// datatype given its name, provided as an \c H5std_string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Compound type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openCompType(const H5Location&)
+// to improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+CompType::CompType(const H5Location& loc, const H5std_string& dtype_name) : DataType()
+{
+ id = p_opentype(loc, dtype_name.c_str());
+}
+
+//--------------------------------------------------------------------------
+// Function: CompType::getNmembers
+///\brief Returns the number of members in this compound datatype.
+///\return Number of members
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
int CompType::getNmembers() const
{
- int num_members = H5Tget_nmembers( id );
- if( num_members < 0 )
- {
- throw DataTypeIException("CompType::getNmembers",
- "H5Tget_nmembers returns negative number of members");
- }
- return( num_members );
+ int num_members = H5Tget_nmembers(id);
+ if (num_members < 0)
+ {
+ throw DataTypeIException("CompType::getNmembers",
+ "H5Tget_nmembers returns negative number of members");
+ }
+ return(num_members);
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberName
-///\brief Returns the name of a member in this compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return Name of member
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberName
+///\brief Returns the name of a member in this compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return Name of member
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5std_string CompType::getMemberName( unsigned member_num ) const
+H5std_string CompType::getMemberName(unsigned member_num) const
{
- char* member_name_C = H5Tget_member_name( id, member_num );
- if( member_name_C == NULL ) // NULL means failure
+ char* member_name_C = H5Tget_member_name(id, member_num);
+ if (member_name_C == NULL) // NULL means failure
{
- throw DataTypeIException("CompType::getMemberName",
- "H5Tget_member_name returns NULL for member name");
+ throw DataTypeIException("CompType::getMemberName",
+ "H5Tget_member_name returns NULL for member name");
}
H5std_string member_name = H5std_string(member_name_C); // convert C string to string
H5free_memory(member_name_C); // free the C string
- return( member_name ); // return the member name string
+ return(member_name); // return the member name string
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberIndex
-///\brief Returns the index of a member in this compound datatype.
-///\param name - IN: Name of the member
-///\return Index of member
-///\exception H5::DataTypeIException
+// Function: CompType::getMemberIndex
+///\brief Returns the index of a member in this compound datatype.
+///\param name - IN: Name of the member
+///\return Index of member
+///\exception H5::DataTypeIException
///\par Description
-/// Members are stored in no particular order with numbers 0
-/// through N-1, where N is the value returned by the member
-/// function \c CompType::getNmembers.
-// Programmer Binh-Minh Ribler - May 16, 2002
+/// Members are stored in no particular order with numbers 0
+/// through N-1, where N is the value returned by the member
+/// function \c CompType::getNmembers.
+// Programmer Binh-Minh Ribler - May 16, 2002
//--------------------------------------------------------------------------
int CompType::getMemberIndex(const char* name) const
{
- int member_index = H5Tget_member_index(id, name);
- if( member_index < 0 )
- {
- throw DataTypeIException("CompType::getMemberIndex",
- "H5Tget_member_index returns negative value");
- }
- return( member_index );
+ int member_index = H5Tget_member_index(id, name);
+ if (member_index < 0)
+ {
+ throw DataTypeIException("CompType::getMemberIndex",
+ "H5Tget_member_index returns negative value");
+ }
+ return(member_index);
}
int CompType::getMemberIndex(const H5std_string& name) const
{
- return(getMemberIndex(name.c_str()));
+ return(getMemberIndex(name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberOffset
-///\brief Returns the byte offset of the beginning of a member with
-/// respect to the beginning of the compound data type datum.
-///\param member_num - IN: Zero-based index of the member
-///\return Byte offset
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberOffset
+///\brief Returns the byte offset of the beginning of a member with
+/// respect to the beginning of the compound data type datum.
+///\param member_num - IN: Zero-based index of the member
+///\return Byte offset
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
// Description
-/// Members are stored in no particular order with numbers 0
-/// through N-1, where N is the value returned by the member
-/// function \c CompType::getNmembers.
+/// Members are stored in no particular order with numbers 0
+/// through N-1, where N is the value returned by the member
+/// function \c CompType::getNmembers.
//
-// Note that byte offset being returned as 0 doesn't indicate
-// a failure. (According to Quincey)
+// Note that byte offset being returned as 0 doesn't indicate
+// a failure. (According to Quincey)
//--------------------------------------------------------------------------
-size_t CompType::getMemberOffset( unsigned member_num ) const
+size_t CompType::getMemberOffset(unsigned member_num) const
{
- size_t offset = H5Tget_member_offset( id, member_num );
- return( offset );
+ size_t offset = H5Tget_member_offset(id, member_num);
+ return(offset);
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberClass
-///\brief Gets the type class of the specified member.
-///\param member_num - IN: Zero-based index of the member
-///\return Type class of the member
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberClass
+///\brief Gets the type class of the specified member.
+///\param member_num - IN: Zero-based index of the member
+///\return Type class of the member
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Modified to use H5Tget_member_class instead. - Jul, 2005
+// Modified to use H5Tget_member_class instead. - Jul, 2005
//--------------------------------------------------------------------------
-H5T_class_t CompType::getMemberClass( unsigned member_num ) const
+H5T_class_t CompType::getMemberClass(unsigned member_num) const
{
- H5T_class_t member_class = H5Tget_member_class(id, member_num);
- if( member_class == H5T_NO_CLASS )
- {
- throw DataTypeIException("CompType::getMemberClass",
- "H5Tget_member_class returns H5T_NO_CLASS");
- }
- return(member_class);
+ H5T_class_t member_class = H5Tget_member_class(id, member_num);
+ if (member_class == H5T_NO_CLASS)
+ {
+ throw DataTypeIException("CompType::getMemberClass",
+ "H5Tget_member_class returns H5T_NO_CLASS");
+ }
+ return(member_class);
}
// This private member function calls the C API to get the identifier
@@ -202,184 +238,184 @@ H5T_class_t CompType::getMemberClass( unsigned member_num ) const
// the sub-types.
hid_t CompType::p_get_member_type(unsigned member_num) const
{
- // get the id of the specified member first
- hid_t member_type_id = H5Tget_member_type( id, member_num );
- if( member_type_id > 0 )
- return( member_type_id );
- else
- {
- // p_get_member_type is private, caller will catch this exception
- // then throw another with appropriate API name
- throw DataTypeIException("", "H5Tget_member_type failed");
- }
+ // get the id of the specified member first
+ hid_t member_type_id = H5Tget_member_type(id, member_num);
+ if (member_type_id > 0)
+ return(member_type_id);
+ else
+ {
+ // p_get_member_type is private, caller will catch this exception
+ // then throw another with appropriate API name
+ throw DataTypeIException("", "H5Tget_member_type failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberDataType
-///\brief Returns the generic datatype of the specified member in this
-/// compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return DataType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberDataType
+///\brief Returns the generic datatype of the specified member in this
+/// compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return DataType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DataType CompType::getMemberDataType( unsigned member_num ) const
+DataType CompType::getMemberDataType(unsigned member_num) const
{
- try {
- DataType datatype;
- f_DataType_setId(&datatype, p_get_member_type(member_num));
- return(datatype);
- }
- catch (DataTypeIException& E) {
- throw DataTypeIException("CompType::getMemberDataType", E.getDetailMsg());
- }
+ try {
+ DataType datatype;
+ f_DataType_setId(&datatype, p_get_member_type(member_num));
+ return(datatype);
+ }
+ catch (DataTypeIException& E) {
+ throw DataTypeIException("CompType::getMemberDataType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberArrayType
-///\brief Returns the array datatype of the specified member in this
-/// compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return ArrayType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Jul, 2005
+// Function: CompType::getMemberArrayType
+///\brief Returns the array datatype of the specified member in this
+/// compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return ArrayType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Jul, 2005
//--------------------------------------------------------------------------
-ArrayType CompType::getMemberArrayType( unsigned member_num ) const
+ArrayType CompType::getMemberArrayType(unsigned member_num) const
{
- try {
- ArrayType arraytype(p_get_member_type(member_num));
- f_DataType_setId(&arraytype, p_get_member_type(member_num));
- return(arraytype);
- }
- catch (DataTypeIException& E) {
- throw DataTypeIException("CompType::getMemberArrayType", E.getDetailMsg());
- }
+ try {
+ ArrayType arraytype(p_get_member_type(member_num));
+ f_DataType_setId(&arraytype, p_get_member_type(member_num));
+ return(arraytype);
+ }
+ catch (DataTypeIException& E) {
+ throw DataTypeIException("CompType::getMemberArrayType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberCompType
-///\brief Returns the compound datatype of the specified member in this
-/// compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return CompType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberCompType
+///\brief Returns the compound datatype of the specified member in this
+/// compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return CompType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-CompType CompType::getMemberCompType( unsigned member_num ) const
+CompType CompType::getMemberCompType(unsigned member_num) const
{
try {
CompType comptype(p_get_member_type(member_num));
- f_DataType_setId(&comptype, p_get_member_type(member_num));
+ f_DataType_setId(&comptype, p_get_member_type(member_num));
return(comptype);
- }
- catch (DataTypeIException& E) {
- throw DataTypeIException("CompType::getMemberCompType", E.getDetailMsg());
- }
+ }
+ catch (DataTypeIException& E) {
+ throw DataTypeIException("CompType::getMemberCompType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberEnumType
-///\brief Returns the enumeration datatype of the specified member in
-/// this compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return EnumType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberEnumType
+///\brief Returns the enumeration datatype of the specified member in
+/// this compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return EnumType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-EnumType CompType::getMemberEnumType( unsigned member_num ) const
+EnumType CompType::getMemberEnumType(unsigned member_num) const
{
try {
EnumType enumtype(p_get_member_type(member_num));
- f_DataType_setId(&enumtype, p_get_member_type(member_num));
+ f_DataType_setId(&enumtype, p_get_member_type(member_num));
return(enumtype);
- }
- catch (DataTypeIException& E) {
- throw DataTypeIException("CompType::getMemberEnumType", E.getDetailMsg());
- }
+ }
+ catch (DataTypeIException& E) {
+ throw DataTypeIException("CompType::getMemberEnumType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberIntType
-///\brief Returns the integer datatype of the specified member in this
-/// compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return IntType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberIntType
+///\brief Returns the integer datatype of the specified member in this
+/// compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return IntType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-IntType CompType::getMemberIntType( unsigned member_num ) const
+IntType CompType::getMemberIntType(unsigned member_num) const
{
try {
IntType inttype(p_get_member_type(member_num));
- f_DataType_setId(&inttype, p_get_member_type(member_num));
+ f_DataType_setId(&inttype, p_get_member_type(member_num));
return(inttype);
- }
- catch (DataTypeIException& E) {
- throw DataTypeIException("CompType::getMemberIntType", E.getDetailMsg());
- }
+ }
+ catch (DataTypeIException& E) {
+ throw DataTypeIException("CompType::getMemberIntType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberFloatType
-///\brief Returns the floating-point datatype of the specified member
-/// in this compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return FloatType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberFloatType
+///\brief Returns the floating-point datatype of the specified member
+/// in this compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return FloatType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-FloatType CompType::getMemberFloatType( unsigned member_num ) const
+FloatType CompType::getMemberFloatType(unsigned member_num) const
{
try {
FloatType floatype(p_get_member_type(member_num));
- f_DataType_setId(&floatype, p_get_member_type(member_num));
+ f_DataType_setId(&floatype, p_get_member_type(member_num));
return(floatype);
- }
- catch (DataTypeIException& E) {
- throw DataTypeIException("CompType::getMemberFloatType", E.getDetailMsg());
- }
+ }
+ catch (DataTypeIException& E) {
+ throw DataTypeIException("CompType::getMemberFloatType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberStrType
-///\brief Returns the string datatype of the specified member in this
-/// compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return StrType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::getMemberStrType
+///\brief Returns the string datatype of the specified member in this
+/// compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return StrType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-StrType CompType::getMemberStrType( unsigned member_num ) const
+StrType CompType::getMemberStrType(unsigned member_num) const
{
try {
StrType strtype(p_get_member_type(member_num));
- f_DataType_setId(&strtype, p_get_member_type(member_num));
+ f_DataType_setId(&strtype, p_get_member_type(member_num));
return(strtype);
- }
- catch (DataTypeIException& E) {
- throw DataTypeIException("CompType::getMemberStrType", E.getDetailMsg());
- }
+ }
+ catch (DataTypeIException& E) {
+ throw DataTypeIException("CompType::getMemberStrType", E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::getMemberVarLenType
-///\brief Returns the variable length datatype of the specified member
-/// in this compound datatype.
-///\param member_num - IN: Zero-based index of the member
-///\return VarLenType instance
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Jul, 2005
+// Function: CompType::getMemberVarLenType
+///\brief Returns the variable length datatype of the specified member
+/// in this compound datatype.
+///\param member_num - IN: Zero-based index of the member
+///\return VarLenType instance
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Jul, 2005
//--------------------------------------------------------------------------
-VarLenType CompType::getMemberVarLenType( unsigned member_num ) const
+VarLenType CompType::getMemberVarLenType(unsigned member_num) const
{
try {
VarLenType varlentype(p_get_member_type(member_num));
- f_DataType_setId(&varlentype, p_get_member_type(member_num));
+ f_DataType_setId(&varlentype, p_get_member_type(member_num));
return(varlentype);
- }
- catch (DataTypeIException& E) {
- throw DataTypeIException("CompType::getMemberVarLenType", E.getDetailMsg());
- }
+ }
+ catch (DataTypeIException& E) {
+ throw DataTypeIException("CompType::getMemberVarLenType", E.getDetailMsg());
+ }
}
/* old style of getMemberType - using overloads; new style above
@@ -388,27 +424,27 @@ VarLenType CompType::getMemberVarLenType( unsigned member_num ) const
May, 2004: These should be reconsidered to provide more convenience.
// Returns the datatype of the specified member in this compound datatype.
// Several overloading of getMemberType are for different datatypes
-void CompType::getMemberType( unsigned member_num, EnumType& enumtype ) const
+void CompType::getMemberType(unsigned member_num, EnumType& enumtype) const
{
p_get_member_type(member_num, enumtype);
}
-void CompType::getMemberType( unsigned member_num, CompType& comptype ) const
+void CompType::getMemberType(unsigned member_num, CompType& comptype) const
{
p_get_member_type(member_num, comptype);
}
-void CompType::getMemberType( unsigned member_num, IntType& inttype ) const
+void CompType::getMemberType(unsigned member_num, IntType& inttype) const
{
p_get_member_type(member_num, inttype);
}
-void CompType::getMemberType( unsigned member_num, FloatType& floatype ) const
+void CompType::getMemberType(unsigned member_num, FloatType& floatype) const
{
p_get_member_type(member_num, floatype);
}
-void CompType::getMemberType( unsigned member_num, StrType& strtype ) const
+void CompType::getMemberType(unsigned member_num, StrType& strtype) const
{
p_get_member_type(member_num, strtype);
}
@@ -416,55 +452,55 @@ void CompType::getMemberType( unsigned member_num, StrType& strtype ) const
*/
//--------------------------------------------------------------------------
-// Function: CompType::insertMember
-///\brief Inserts a new member to this compound datatype.
-///\param name - IN: Name of the new member
-///\param offset - IN: Offset in memory structure of the field to insert
-///\param new_member - IN: New member to be inserted
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: CompType::insertMember
+///\brief Inserts a new member to this compound datatype.
+///\param name - IN: Name of the new member
+///\param offset - IN: Offset in memory structure of the field to insert
+///\param new_member - IN: New member to be inserted
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void CompType::insertMember( const H5std_string& name, size_t offset, const DataType& new_member ) const
+void CompType::insertMember(const H5std_string& name, size_t offset, const DataType& new_member) const
{
- // Convert string to C-string
- const char* name_C;
- name_C = name.c_str(); // name_C refers to the contents of name as a C-str
-
- hid_t new_member_id = new_member.getId(); // get new_member id for C API
-
- // Call C routine H5Tinsert to add the new member
- herr_t ret_value = H5Tinsert( id, name_C, offset, new_member_id );
- if( ret_value < 0 )
- {
- throw DataTypeIException("CompType::insertMember", "H5Tinsert failed");
- }
+ // Convert string to C-string
+ const char* name_C;
+ name_C = name.c_str(); // name_C refers to the contents of name as a C-str
+
+ hid_t new_member_id = new_member.getId(); // get new_member id for C API
+
+ // Call C routine H5Tinsert to add the new member
+ herr_t ret_value = H5Tinsert(id, name_C, offset, new_member_id);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("CompType::insertMember", "H5Tinsert failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::pack
-///\brief Recursively removes padding from within a compound datatype.
+// Function: CompType::pack
+///\brief Recursively removes padding from within a compound datatype.
///
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void CompType::pack() const
{
- // Calls C routine H5Tpack to remove padding
- herr_t ret_value = H5Tpack( id );
- if( ret_value < 0 )
- {
- throw DataTypeIException("CompType::pack", "H5Tpack failed");
- }
+ // Calls C routine H5Tpack to remove padding
+ herr_t ret_value = H5Tpack(id);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("CompType::pack", "H5Tpack failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: CompType::setSize
-///\brief Sets the total size for this compound datatype.
-///\param size - IN: Size to set
-///\exception H5::DataTypeIException
+// Function: CompType::setSize
+///\brief Sets the total size for this compound datatype.
+///\param size - IN: Size to set
+///\exception H5::DataTypeIException
// Note
-// H5Tset_size works on atom datatypes and compound datatypes only
-// Programmer Binh-Minh Ribler - 2014
+// H5Tset_size works on atom datatypes and compound datatypes only
+// Programmer Binh-Minh Ribler - 2014
//--------------------------------------------------------------------------
void CompType::setSize(size_t size) const
{
@@ -472,7 +508,7 @@ void CompType::setSize(size_t size) const
herr_t ret_value = H5Tset_size(id, size);
if (ret_value < 0)
{
- throw DataTypeIException("CompType::setSize", "H5Tset_size failed");
+ throw DataTypeIException("CompType::setSize", "H5Tset_size failed");
}
}
diff --git a/c++/src/H5CompType.h b/c++/src/H5CompType.h
index 6552f9e..018d875 100644
--- a/c++/src/H5CompType.h
+++ b/c++/src/H5CompType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5CompType_H
@@ -27,90 +25,96 @@ namespace H5 {
*/
class H5_DLLCPP CompType : public DataType {
public:
- // Default constructor
- CompType();
+ // Default constructor
+ CompType();
- // Creates a compound datatype using an existing id
- CompType( const hid_t existing_id );
+ // Creates a compound datatype using an existing id
+ CompType(const hid_t existing_id);
- // Creates a new compound datatype, given the type's size
- CompType( size_t size ); // H5Tcreate
+ // Creates a new compound datatype, given the type's size
+ CompType(size_t size); // H5Tcreate
- // Gets the compound datatype of the specified dataset
- CompType( const DataSet& dataset ); // H5Dget_type
+ // Gets the compound datatype of the specified dataset
+ CompType(const DataSet& dataset); // H5Dget_type
- // Copy constructor - makes a copy of original object
- CompType( const CompType& original );
+ // Copy constructor - makes a copy of original object
+ CompType(const CompType& original);
- // Returns the type class of the specified member of this compound
- // datatype. It provides to the user a way of knowing what type
- // to create another datatype of the same class
- H5T_class_t getMemberClass( unsigned member_num ) const;
+ // Constructors that open a compound datatype, given a location.
+ CompType(const H5Location& loc, const char* name);
+ CompType(const H5Location& loc, const H5std_string& name);
- // Returns the index of a member in this compound data type.
- int getMemberIndex(const char* name) const;
- int getMemberIndex(const H5std_string& name) const;
+ // Returns the type class of the specified member of this compound
+ // datatype. It provides to the user a way of knowing what type
+ // to create another datatype of the same class
+ H5T_class_t getMemberClass(unsigned member_num) const;
- // Returns the offset of a member of this compound datatype.
- size_t getMemberOffset( unsigned memb_no ) const;
+ // Returns the index of a member in this compound data type.
+ int getMemberIndex(const char* name) const;
+ int getMemberIndex(const H5std_string& name) const;
- // Returns the name of a member of this compound datatype.
- H5std_string getMemberName( unsigned member_num ) const;
+ // Returns the offset of a member of this compound datatype.
+ size_t getMemberOffset(unsigned memb_no) const;
- // Returns the generic datatype of the specified member in
- // this compound datatype.
- DataType getMemberDataType( unsigned member_num ) const;
+ // Returns the name of a member of this compound datatype.
+ H5std_string getMemberName(unsigned member_num) const;
- // Returns the array datatype of the specified member in
- // this compound datatype.
- ArrayType getMemberArrayType( unsigned member_num ) const;
+ // Returns the generic datatype of the specified member in
+ // this compound datatype.
+ DataType getMemberDataType(unsigned member_num) const;
- // Returns the compound datatype of the specified member in
- // this compound datatype.
- CompType getMemberCompType( unsigned member_num ) const;
+ // Returns the array datatype of the specified member in
+ // this compound datatype.
+ ArrayType getMemberArrayType(unsigned member_num) const;
- // Returns the enumeration datatype of the specified member in
- // this compound datatype.
- EnumType getMemberEnumType( unsigned member_num ) const;
+ // Returns the compound datatype of the specified member in
+ // this compound datatype.
+ CompType getMemberCompType(unsigned member_num) const;
- // Returns the integer datatype of the specified member in
- // this compound datatype.
- IntType getMemberIntType( unsigned member_num ) const;
+ // Returns the enumeration datatype of the specified member in
+ // this compound datatype.
+ EnumType getMemberEnumType(unsigned member_num) const;
- // Returns the floating-point datatype of the specified member in
- // this compound datatype.
- FloatType getMemberFloatType( unsigned member_num ) const;
+ // Returns the integer datatype of the specified member in
+ // this compound datatype.
+ IntType getMemberIntType(unsigned member_num) const;
- // Returns the string datatype of the specified member in
- // this compound datatype.
- StrType getMemberStrType( unsigned member_num ) const;
+ // Returns the floating-point datatype of the specified member in
+ // this compound datatype.
+ FloatType getMemberFloatType(unsigned member_num) const;
- // Returns the variable length datatype of the specified member in
- // this compound datatype.
- VarLenType getMemberVarLenType( unsigned member_num ) const;
+ // Returns the string datatype of the specified member in
+ // this compound datatype.
+ StrType getMemberStrType(unsigned member_num) const;
- // Returns the number of members in this compound datatype.
- int getNmembers() const;
+ // Returns the variable length datatype of the specified member in
+ // this compound datatype.
+ VarLenType getMemberVarLenType(unsigned member_num) const;
- // Adds a new member to this compound datatype.
- void insertMember( const H5std_string& name, size_t offset, const DataType& new_member ) const;
+ // Returns the number of members in this compound datatype.
+ int getNmembers() const;
- // Recursively removes padding from within this compound datatype.
- void pack() const;
+ // Adds a new member to this compound datatype.
+ void insertMember(const H5std_string& name, size_t offset, const DataType& new_member) const;
- // Sets the total size for this compound datatype.
- void setSize(size_t size) const;
+ // Recursively removes padding from within this compound datatype.
+ void pack() const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("CompType"); }
+ // Sets the total size for this compound datatype.
+ void setSize(size_t size) const;
- // Noop destructor.
- virtual ~CompType();
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("CompType"); }
+
+ // Noop destructor.
+ virtual ~CompType();
private:
- // Contains common code that is used by the member functions
- // getMemberXxxType
- hid_t p_get_member_type(unsigned member_num) const;
-};
-}
+ // Contains common code that is used by the member functions
+ // getMemberXxxType
+ hid_t p_get_member_type(unsigned member_num) const;
+
+}; // end of CompType
+} // namespace H5
+
#endif // __H5CompType_H
diff --git a/c++/src/H5Cpp.h b/c++/src/H5Cpp.h
index 800eb90..09914e8 100644
--- a/c++/src/H5Cpp.h
+++ b/c++/src/H5Cpp.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5Cpp_H
@@ -27,6 +25,7 @@
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5AbstractDs.h"
@@ -42,6 +41,7 @@
#include "H5ArrayType.h"
#include "H5VarLenType.h"
#include "H5DataSet.h"
+#include "H5CommonFG.h"
#include "H5Group.h"
#include "H5File.h"
#include "H5Library.h"
diff --git a/c++/src/H5CppDoc.h b/c++/src/H5CppDoc.h
index 2420586..543f49b 100644
--- a/c++/src/H5CppDoc.h
+++ b/c++/src/H5CppDoc.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5CppDoc_H
@@ -67,34 +65,34 @@
* <br />
*/
-/// This example shows how to create datasets.
+/// This example shows how to create datasets.
///\par
///\example create.cpp
///\par
-/// This example shows how to write datasets.
+/// This example shows how to write datasets.
///\example writedata.cpp
///\par
-/// This example shows how to read datasets.
+/// This example shows how to read datasets.
///\example readdata.cpp
///\par
-/// This example shows how to create a compound datatype,
-/// write an array which has the compound datatype to the file,
-/// and read back fields' subsets.
+/// This example shows how to create a compound datatype,
+/// write an array which has the compound datatype to the file,
+/// and read back fields' subsets.
///\example compound.cpp
///\par
-/// This example shows how to work with extendible datasets.
+/// This example shows how to work with extendible datasets.
///\example extend_ds.cpp
///\par
-/// This example shows how to read data from a chunked dataset.
+/// This example shows how to read data from a chunked dataset.
///\example chunks.cpp
///\par
-/// This example shows how to work with groups.
+/// This example shows how to work with groups.
///\example h5group.cpp
#endif // __H5CppDoc_H
diff --git a/c++/src/H5DataSet.cpp b/c++/src/H5DataSet.cpp
index 749a933..dd2fd21 100644
--- a/c++/src/H5DataSet.cpp
+++ b/c++/src/H5DataSet.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef OLD_HEADER_FILENAME
@@ -20,6 +18,7 @@
#endif
#include <string>
+#include "H5private.h" // for HDfree
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
@@ -29,39 +28,37 @@
#include "H5OcreatProp.h"
#include "H5DxferProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
#include "H5DataSpace.h"
#include "H5AbstractDs.h"
-#include "H5Group.h"
-#include "H5File.h"
#include "H5Attribute.h"
#include "H5DataSet.h"
-#include "H5private.h" // for HDfree
namespace H5 {
using std::cerr;
using std::endl;
//--------------------------------------------------------------------------
-// Function: DataSet default constructor
-///\brief Default constructor: creates a stub DataSet.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet default constructor
+///\brief Default constructor: creates a stub DataSet.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataSet::DataSet() : H5Object(), AbstractDs(), id(H5I_INVALID_HID) {}
//--------------------------------------------------------------------------
-// Function: DataSet overloaded constructor
-///\brief Creates an DataSet object using the id of an existing dataset.
-///\param existing_id - IN: Id of an existing dataset
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet overloaded constructor
+///\brief Creates an DataSet object using the id of an existing dataset.
+///\param existing_id - IN: Id of an existing dataset
+// Programmer Binh-Minh Ribler - 2000
// Description
-// incRefCount() is needed here to prevent the id from being closed
-// prematurely. That is, when application uses the id of an
-// existing DataSet object to create another DataSet object. So,
-// when one of those objects is deleted, the id will be closed if
-// the reference counter is only 1.
+// incRefCount() is needed here to prevent the id from being closed
+// prematurely. That is, when application uses the id of an
+// existing DataSet object to create another DataSet object. So,
+// when one of those objects is deleted, the id will be closed if
+// the reference counter is only 1.
//--------------------------------------------------------------------------
DataSet::DataSet(const hid_t existing_id) : H5Object(), AbstractDs(), id(existing_id)
{
@@ -69,10 +66,10 @@ DataSet::DataSet(const hid_t existing_id) : H5Object(), AbstractDs(), id(existin
}
//--------------------------------------------------------------------------
-// Function: DataSet copy constructor
-///\brief Copy constructor: makes a copy of the original DataSet object.
-///\param original - IN: DataSet instance to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet copy constructor
+///\brief Copy constructor: makes a copy of the original DataSet object.
+///\param original - IN: DataSet instance to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataSet::DataSet(const DataSet& original) : H5Object(), AbstractDs(), id(original.id)
{
@@ -80,22 +77,22 @@ DataSet::DataSet(const DataSet& original) : H5Object(), AbstractDs(), id(origina
}
//--------------------------------------------------------------------------
-// Function: DataSet overload constructor - dereference
-///\brief Given a reference, ref, to an hdf5 location, creates a
-/// DataSet object
-///\param loc - IN: Dataset reference object is in or location of
-/// object that the dataset is located within.
-///\param ref - IN: Reference pointer
-///\param ref_type - IN: Reference type - default to H5R_OBJECT
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\exception H5::DataSetIException
+// Function: DataSet overload constructor - dereference
+///\brief Given a reference, ref, to an hdf5 location, creates a
+/// DataSet object
+///\param loc - IN: Dataset reference object is in or location of
+/// object that the dataset is located within.
+///\param ref - IN: Reference pointer
+///\param ref_type - IN: Reference type - default to H5R_OBJECT
+///\param plist - IN: Property list - default to PropList::DEFAULT
+///\exception H5::DataSetIException
///\par Description
-/// \c loc can be DataSet, Group, H5File, or named DataType, that
-/// is a datatype that has been named by DataType::commit.
-// Programmer Binh-Minh Ribler - Oct, 2006
+/// \c loc can be DataSet, Group, H5File, or named DataType, that
+/// is a datatype that has been named by DataType::commit.
+// Programmer Binh-Minh Ribler - Oct, 2006
// Modification
-// Jul, 2008
-// Added for application convenience.
+// Jul, 2008
+// Added for application convenience.
//--------------------------------------------------------------------------
DataSet::DataSet(const H5Location& loc, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object(), AbstractDs(), id(H5I_INVALID_HID)
{
@@ -103,18 +100,18 @@ DataSet::DataSet(const H5Location& loc, const void* ref, H5R_type_t ref_type, co
}
//--------------------------------------------------------------------------
-// Function: DataSet overload constructor - dereference
-///\brief Given a reference, ref, to an hdf5 attribute, creates a
-/// DataSet object
-///\param attr - IN: Specifying location where the referenced object is in
-///\param ref - IN: Reference pointer
-///\param ref_type - IN: Reference type - default to H5R_OBJECT
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - Oct, 2006
+// Function: DataSet overload constructor - dereference
+///\brief Given a reference, ref, to an hdf5 attribute, creates a
+/// DataSet object
+///\param attr - IN: Specifying location where the referenced object is in
+///\param ref - IN: Reference pointer
+///\param ref_type - IN: Reference type - default to H5R_OBJECT
+///\param plist - IN: Property list - default to PropList::DEFAULT
+///\exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - Oct, 2006
// Modification
-// Jul, 2008
-// Added for application convenience.
+// Jul, 2008
+// Added for application convenience.
//--------------------------------------------------------------------------
DataSet::DataSet(const Attribute& attr, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object(), AbstractDs(), id(H5I_INVALID_HID)
{
@@ -122,26 +119,26 @@ DataSet::DataSet(const Attribute& attr, const void* ref, H5R_type_t ref_type, co
}
//--------------------------------------------------------------------------
-// Function: DataSet::getSpace
-///\brief Gets a copy of the dataspace of this dataset.
-///\return DataSpace instance
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::getSpace
+///\brief Gets a copy of the dataspace of this dataset.
+///\return DataSpace instance
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataSpace DataSet::getSpace() const
{
- // Calls C function H5Dget_space to get the id of the dataspace
- hid_t dataspace_id = H5Dget_space( id );
-
- // If the dataspace id is invalid, throw an exception
- if( dataspace_id < 0 )
- {
- throw DataSetIException("DataSet::getSpace", "H5Dget_space failed");
- }
- //create dataspace object using the existing id then return the object
- DataSpace data_space;
- f_DataSpace_setId(&data_space, dataspace_id);
- return( data_space );
+ // Calls C function H5Dget_space to get the id of the dataspace
+ hid_t dataspace_id = H5Dget_space(id);
+
+ // If the dataspace id is invalid, throw an exception
+ if (dataspace_id < 0)
+ {
+ throw DataSetIException("DataSet::getSpace", "H5Dget_space failed");
+ }
+ //create dataspace object using the existing id then return the object
+ DataSpace data_space;
+ f_DataSpace_setId(&data_space, dataspace_id);
+ return(data_space);
}
// This private member function calls the C API to get the identifier
@@ -149,57 +146,57 @@ DataSpace DataSet::getSpace() const
// by the various AbstractDs functions to get the specific datatype.
hid_t DataSet::p_get_type() const
{
- hid_t type_id = H5Dget_type( id );
- if( type_id > 0 )
- return( type_id );
- else
- {
- throw DataSetIException("", "H5Dget_type failed");
- }
+ hid_t type_id = H5Dget_type(id);
+ if (type_id > 0)
+ return(type_id);
+ else
+ {
+ throw DataSetIException("", "H5Dget_type failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSet::getCreatePlist
-///\brief Gets the dataset creation property list.
-///\return DSetCreatPropList instance
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::getCreatePlist
+///\brief Gets the dataset creation property list.
+///\return DSetCreatPropList instance
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DSetCreatPropList DataSet::getCreatePlist() const
{
- hid_t create_plist_id = H5Dget_create_plist( id );
- if( create_plist_id < 0 )
- {
- throw DataSetIException("DataSet::getCreatePlist", "H5Dget_create_plist failed");
- }
-
- // create and return the DSetCreatPropList object
- DSetCreatPropList create_plist;
- f_PropList_setId(&create_plist, create_plist_id);
- return(create_plist);
+ hid_t create_plist_id = H5Dget_create_plist(id);
+ if (create_plist_id < 0)
+ {
+ throw DataSetIException("DataSet::getCreatePlist", "H5Dget_create_plist failed");
+ }
+
+ // create and return the DSetCreatPropList object
+ DSetCreatPropList create_plist;
+ f_PropList_setId(&create_plist, create_plist_id);
+ return(create_plist);
}
//--------------------------------------------------------------------------
-// Function: DataSet::getStorageSize
-///\brief Returns the amount of storage required for a dataset.
-///\return Size of the storage or 0, for no data
-///\exception H5::DataSetIException
-// Note: H5Dget_storage_size returns 0 when there is no data. This
-// function should have no failure. (from SLU)
-// Programmer Binh-Minh Ribler - Mar, 2005
+// Function: DataSet::getStorageSize
+///\brief Returns the amount of storage required for a dataset.
+///\return Size of the storage or 0, for no data
+///\exception H5::DataSetIException
+// Note: H5Dget_storage_size returns 0 when there is no data. This
+// function should have no failure. (from SLU)
+// Programmer Binh-Minh Ribler - Mar, 2005
//--------------------------------------------------------------------------
hsize_t DataSet::getStorageSize() const
{
- hsize_t storage_size = H5Dget_storage_size(id);
- return(storage_size);
+ hsize_t storage_size = H5Dget_storage_size(id);
+ return(storage_size);
}
//--------------------------------------------------------------------------
-// Function: DataSet::getInMemDataSize
-///\brief Gets the size in memory of the dataset's data.
-///\return Size of data (in memory)
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - Apr 2009
+// Function: DataSet::getInMemDataSize
+///\brief Gets the size in memory of the dataset's data.
+///\return Size of data (in memory)
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - Apr 2009
//--------------------------------------------------------------------------
size_t DataSet::getInMemDataSize() const
{
@@ -207,9 +204,9 @@ size_t DataSet::getInMemDataSize() const
// Get the data type of this dataset
hid_t mem_type_id = H5Dget_type(id);
- if( mem_type_id < 0 )
+ if (mem_type_id < 0)
{
- throw DataSetIException(func, "H5Dget_type failed");
+ throw DataSetIException(func, "H5Dget_type failed");
}
// Get the data type's size by first getting its native type then getting
@@ -217,22 +214,22 @@ size_t DataSet::getInMemDataSize() const
hid_t native_type = H5Tget_native_type(mem_type_id, H5T_DIR_DEFAULT);
if (native_type < 0)
{
- throw DataSetIException(func, "H5Tget_native_type failed");
+ throw DataSetIException(func, "H5Tget_native_type failed");
}
size_t type_size = H5Tget_size(native_type);
if (type_size == 0)
{
- throw DataSetIException(func, "H5Tget_size failed");
+ throw DataSetIException(func, "H5Tget_size failed");
}
// Close the native type and the datatype of this dataset.
if (H5Tclose(native_type) < 0)
{
- throw DataSetIException(func, "H5Tclose(native_type) failed");
+ throw DataSetIException(func, "H5Tclose(native_type) failed");
}
if (H5Tclose(mem_type_id) < 0)
{
- throw DataSetIException(func, "H5Tclose(mem_type_id) failed");
+ throw DataSetIException(func, "H5Tclose(mem_type_id) failed");
}
// Get number of elements of the dataset by first getting its dataspace,
@@ -240,18 +237,18 @@ size_t DataSet::getInMemDataSize() const
hid_t space_id = H5Dget_space(id);
if (space_id < 0)
{
- throw DataSetIException(func, "H5Dget_space failed");
+ throw DataSetIException(func, "H5Dget_space failed");
}
hssize_t num_elements = H5Sget_simple_extent_npoints(space_id);
if (num_elements < 0)
{
- throw DataSetIException(func, "H5Sget_simple_extent_npoints failed");
+ throw DataSetIException(func, "H5Sget_simple_extent_npoints failed");
}
// Close the dataspace
if (H5Sclose(space_id) < 0)
{
- throw DataSetIException(func, "H5Sclose failed");
+ throw DataSetIException(func, "H5Sclose failed");
}
// Calculate and return the size of the data
@@ -260,184 +257,184 @@ size_t DataSet::getInMemDataSize() const
}
//--------------------------------------------------------------------------
-// Function: DataSet::getOffset
-///\brief Returns the address of this dataset in the file.
-///\return Address of dataset
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::getOffset
+///\brief Returns the address of this dataset in the file.
+///\return Address of dataset
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
haddr_t DataSet::getOffset() const
{
- haddr_t ds_addr; // for address of dataset
-
- ds_addr = H5Dget_offset(id);
- if( ds_addr == HADDR_UNDEF )
- {
- throw DataSetIException("DataSet::getOffset", "H5Dget_offset returned HADDR_UNDEF");
- }
- return(ds_addr);
+ haddr_t ds_addr; // for address of dataset
+
+ ds_addr = H5Dget_offset(id);
+ if (ds_addr == HADDR_UNDEF)
+ {
+ throw DataSetIException("DataSet::getOffset", "H5Dget_offset returned HADDR_UNDEF");
+ }
+ return(ds_addr);
}
//--------------------------------------------------------------------------
-// Function: DataSet::getSpaceStatus
-///\brief Determines whether space has been allocated for a dataset.
-///\param status - OUT: Space allocation status
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::getSpaceStatus
+///\brief Determines whether space has been allocated for a dataset.
+///\param status - OUT: Space allocation status
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DataSet::getSpaceStatus(H5D_space_status_t& status) const
{
- herr_t ret_value = H5Dget_space_status(id, &status);
- if( ret_value < 0 )
- {
- throw DataSetIException("DataSet::getSpaceStatus", "H5Dget_space_status failed");
- }
+ herr_t ret_value = H5Dget_space_status(id, &status);
+ if (ret_value < 0)
+ {
+ throw DataSetIException("DataSet::getSpaceStatus", "H5Dget_space_status failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSet::getVlenBufSize
-///\brief Returns the number of bytes required to store VL data.
-///\param type - IN: Datatype, which is the datatype for the buffer
-///\param space - IN: Selection for the memory buffer
-///\return Amount of storage
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::getVlenBufSize
+///\brief Returns the number of bytes required to store VL data.
+///\param type - IN: Datatype, which is the datatype for the buffer
+///\param space - IN: Selection for the memory buffer
+///\return Amount of storage
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-hsize_t DataSet::getVlenBufSize(const DataType& type, const DataSpace& space ) const
+hsize_t DataSet::getVlenBufSize(const DataType& type, const DataSpace& space) const
{
- // Obtain identifiers for C API
- hid_t type_id = type.getId();
- hid_t space_id = space.getId();
-
- hsize_t size; // for amount of storage
-
- herr_t ret_value = H5Dvlen_get_buf_size( id, type_id, space_id, &size );
- if( ret_value < 0 )
- {
- throw DataSetIException("DataSet::getVlenBufSize", "H5Dvlen_get_buf_size failed");
- }
- return( size );
+ // Obtain identifiers for C API
+ hid_t type_id = type.getId();
+ hid_t space_id = space.getId();
+
+ hsize_t size; // for amount of storage
+
+ herr_t ret_value = H5Dvlen_get_buf_size(id, type_id, space_id, &size);
+ if (ret_value < 0)
+ {
+ throw DataSetIException("DataSet::getVlenBufSize", "H5Dvlen_get_buf_size failed");
+ }
+ return(size);
}
//--------------------------------------------------------------------------
-// Function: DataSet::getVlenBufSize
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const's. This wrapper will be removed in future release.
-// Return Amount of storage
-// Exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::getVlenBufSize
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const's. This wrapper will be removed in future release.
+// Return Amount of storage
+// Exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Modified to call its replacement. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Modified to call its replacement. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
-//hsize_t DataSet::getVlenBufSize( DataType& type, DataSpace& space ) const
+//hsize_t DataSet::getVlenBufSize(DataType& type, DataSpace& space) const
//{
// return(getVlenBufSize(type, space));
//}
//--------------------------------------------------------------------------
-// Function: DataSet::vlenReclaim
-///\brief Reclaims VL datatype memory buffers.
-///\param type - IN: Datatype, which is the datatype stored in the buffer
-///\param space - IN: Selection for the memory buffer to free the
-/// VL datatypes within
-///\param xfer_plist - IN: Property list used to create the buffer
-///\param buf - IN: Pointer to the buffer to be reclaimed
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::vlenReclaim
+///\brief Reclaims VL datatype memory buffers.
+///\param type - IN: Datatype, which is the datatype stored in the buffer
+///\param space - IN: Selection for the memory buffer to free the
+/// VL datatypes within
+///\param xfer_plist - IN: Property list used to create the buffer
+///\param buf - IN: Pointer to the buffer to be reclaimed
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSet::vlenReclaim(const DataType& type, const DataSpace& space, const DSetMemXferPropList& xfer_plist, void* buf )
+void DataSet::vlenReclaim(const DataType& type, const DataSpace& space, const DSetMemXferPropList& xfer_plist, void* buf)
{
- // Obtain identifiers for C API
- hid_t type_id = type.getId();
- hid_t space_id = space.getId();
- hid_t xfer_plist_id = xfer_plist.getId();
-
- herr_t ret_value = H5Dvlen_reclaim( type_id, space_id, xfer_plist_id, buf );
- if( ret_value < 0 )
- {
- throw DataSetIException("DataSet::vlenReclaim", "H5Dvlen_reclaim failed");
- }
+ // Obtain identifiers for C API
+ hid_t type_id = type.getId();
+ hid_t space_id = space.getId();
+ hid_t xfer_plist_id = xfer_plist.getId();
+
+ herr_t ret_value = H5Dvlen_reclaim(type_id, space_id, xfer_plist_id, buf);
+ if (ret_value < 0)
+ {
+ throw DataSetIException("DataSet::vlenReclaim", "H5Dvlen_reclaim failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSet::vlenReclaim
-///\brief Reclaims VL datatype memory buffers.
-///\param type - IN: Datatype, which is the datatype stored in the buffer
-///\param space - IN: Selection for the memory buffer to free the
-/// VL datatypes within
-///\param xfer_plist - IN: Property list used to create the buffer
-///\param buf - IN: Pointer to the buffer to be reclaimed
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::vlenReclaim
+///\brief Reclaims VL datatype memory buffers.
+///\param type - IN: Datatype, which is the datatype stored in the buffer
+///\param space - IN: Selection for the memory buffer to free the
+/// VL datatypes within
+///\param xfer_plist - IN: Property list used to create the buffer
+///\param buf - IN: Pointer to the buffer to be reclaimed
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//\parDescription
-// This function has better prototype for the users than the
-// other, which might be removed at some point. BMR - 2006/12/20
+// This function has better prototype for the users than the
+// other, which might be removed at some point. BMR - 2006/12/20
//--------------------------------------------------------------------------
void DataSet::vlenReclaim(void* buf, const DataType& type, const DataSpace& space, const DSetMemXferPropList& xfer_plist)
{
- // Obtain identifiers for C API
- hid_t type_id = type.getId();
- hid_t space_id = space.getId();
- hid_t xfer_plist_id = xfer_plist.getId();
-
- herr_t ret_value = H5Dvlen_reclaim(type_id, space_id, xfer_plist_id, buf);
- if (ret_value < 0)
- {
- throw DataSetIException("DataSet::vlenReclaim", "H5Dvlen_reclaim failed");
- }
+ // Obtain identifiers for C API
+ hid_t type_id = type.getId();
+ hid_t space_id = space.getId();
+ hid_t xfer_plist_id = xfer_plist.getId();
+
+ herr_t ret_value = H5Dvlen_reclaim(type_id, space_id, xfer_plist_id, buf);
+ if (ret_value < 0)
+ {
+ throw DataSetIException("DataSet::vlenReclaim", "H5Dvlen_reclaim failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSet::read
-///\brief Reads raw data from the specified dataset.
-///\param buf - IN: Buffer for read data
-///\param mem_type - IN: Memory datatype
-///\param mem_space - IN: Memory dataspace
-///\param file_space - IN: Dataset's dataspace in the file
-///\param xfer_plist - IN: Transfer property list for this I/O operation
-///\exception H5::DataSetIException
+// Function: DataSet::read
+///\brief Reads raw data from the specified dataset.
+///\param buf - IN: Buffer for read data
+///\param mem_type - IN: Memory datatype
+///\param mem_space - IN: Memory dataspace
+///\param file_space - IN: Dataset's dataspace in the file
+///\param xfer_plist - IN: Transfer property list for this I/O operation
+///\exception H5::DataSetIException
///\par Description
-/// This function reads raw data from this dataset into the
-/// buffer \a buf, converting from file datatype and dataspace
-/// to memory datatype \a mem_type and dataspace \a mem_space.
-// Programmer Binh-Minh Ribler - 2000
+/// This function reads raw data from this dataset into the
+/// buffer \a buf, converting from file datatype and dataspace
+/// to memory datatype \a mem_type and dataspace \a mem_space.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSet::read( void* buf, const DataType& mem_type, const DataSpace& mem_space, const DataSpace& file_space, const DSetMemXferPropList& xfer_plist ) const
+void DataSet::read(void* buf, const DataType& mem_type, const DataSpace& mem_space, const DataSpace& file_space, const DSetMemXferPropList& xfer_plist) const
{
- // Obtain identifiers for C API
- hid_t mem_type_id = mem_type.getId();
- hid_t mem_space_id = mem_space.getId();
- hid_t file_space_id = file_space.getId();
- hid_t xfer_plist_id = xfer_plist.getId();
-
- herr_t ret_value = H5Dread( id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, buf );
- if( ret_value < 0 )
- {
- throw DataSetIException("DataSet::read", "H5Dread failed");
- }
+ // Obtain identifiers for C API
+ hid_t mem_type_id = mem_type.getId();
+ hid_t mem_space_id = mem_space.getId();
+ hid_t file_space_id = file_space.getId();
+ hid_t xfer_plist_id = xfer_plist.getId();
+
+ herr_t ret_value = H5Dread(id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, buf);
+ if (ret_value < 0)
+ {
+ throw DataSetIException("DataSet::read", "H5Dread failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSet::read
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes a reference to a \c H5std_string for the buffer.
-///\param strg - IN: Buffer for read data string
-///\param mem_type - IN: Memory datatype
-///\param mem_space - IN: Memory dataspace
-///\param file_space - IN: Dataset's dataspace in the file
-///\param xfer_plist - IN: Transfer property list for this I/O operation
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::read
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes a reference to a \c H5std_string for the buffer.
+///\param strg - IN: Buffer for read data string
+///\param mem_type - IN: Memory datatype
+///\param mem_space - IN: Memory dataspace
+///\param file_space - IN: Dataset's dataspace in the file
+///\param xfer_plist - IN: Transfer property list for this I/O operation
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Jul 2009
-// Follow the change to Attribute::read and use the following
-// private functions to read datasets with fixed- and
-// variable-length string:
-// DataSet::p_read_fixed_len and
-// DataSet::p_read_variable_len
+// Jul 2009
+// Follow the change to Attribute::read and use the following
+// private functions to read datasets with fixed- and
+// variable-length string:
+// DataSet::p_read_fixed_len and
+// DataSet::p_read_variable_len
//--------------------------------------------------------------------------
void DataSet::read(H5std_string& strg, const DataType& mem_type, const DataSpace& mem_space, const DataSpace& file_space, const DSetMemXferPropList& xfer_plist) const
{
@@ -466,47 +463,47 @@ void DataSet::read(H5std_string& strg, const DataType& mem_type, const DataSpace
}
//--------------------------------------------------------------------------
-// Function: DataSet::write
-///\brief Writes raw data from an application buffer to a dataset.
-///\param buf - IN: Buffer containing data to be written
-///\param mem_type - IN: Memory datatype
-///\param mem_space - IN: Memory dataspace
-///\param file_space - IN: Dataset's dataspace in the file
-///\param xfer_plist - IN: Transfer property list for this I/O operation
-///\exception H5::DataSetIException
+// Function: DataSet::write
+///\brief Writes raw data from an application buffer to a dataset.
+///\param buf - IN: Buffer containing data to be written
+///\param mem_type - IN: Memory datatype
+///\param mem_space - IN: Memory dataspace
+///\param file_space - IN: Dataset's dataspace in the file
+///\param xfer_plist - IN: Transfer property list for this I/O operation
+///\exception H5::DataSetIException
///\par Description
-/// This function writes raw data from an application buffer
-/// \a buf to a dataset, converting from memory datatype
-/// \a mem_type and dataspace \a mem_space to file datatype
-/// and dataspace.
-// Programmer Binh-Minh Ribler - 2000
+/// This function writes raw data from an application buffer
+/// \a buf to a dataset, converting from memory datatype
+/// \a mem_type and dataspace \a mem_space to file datatype
+/// and dataspace.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSet::write( const void* buf, const DataType& mem_type, const DataSpace& mem_space, const DataSpace& file_space, const DSetMemXferPropList& xfer_plist ) const
+void DataSet::write(const void* buf, const DataType& mem_type, const DataSpace& mem_space, const DataSpace& file_space, const DSetMemXferPropList& xfer_plist) const
{
- // Obtain identifiers for C API
- hid_t mem_type_id = mem_type.getId();
- hid_t mem_space_id = mem_space.getId();
- hid_t file_space_id = file_space.getId();
- hid_t xfer_plist_id = xfer_plist.getId();
-
- herr_t ret_value = H5Dwrite( id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, buf );
- if( ret_value < 0 )
- {
- throw DataSetIException("DataSet::write", "H5Dwrite failed");
- }
+ // Obtain identifiers for C API
+ hid_t mem_type_id = mem_type.getId();
+ hid_t mem_space_id = mem_space.getId();
+ hid_t file_space_id = file_space.getId();
+ hid_t xfer_plist_id = xfer_plist.getId();
+
+ herr_t ret_value = H5Dwrite(id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, buf);
+ if (ret_value < 0)
+ {
+ throw DataSetIException("DataSet::write", "H5Dwrite failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSet::write
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes a reference to a \c H5std_string for the buffer.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::write
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes a reference to a \c H5std_string for the buffer.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Jul 2009
-// Modified to pass the buffer into H5Dwrite properly depending
-// whether the dataset has variable- or fixed-length string.
+// Jul 2009
+// Modified to pass the buffer into H5Dwrite properly depending
+// whether the dataset has variable- or fixed-length string.
//--------------------------------------------------------------------------
-void DataSet::write( const H5std_string& strg, const DataType& mem_type, const DataSpace& mem_space, const DataSpace& file_space, const DSetMemXferPropList& xfer_plist ) const
+void DataSet::write(const H5std_string& strg, const DataType& mem_type, const DataSpace& mem_space, const DataSpace& file_space, const DSetMemXferPropList& xfer_plist) const
{
// Check if this attribute has variable-len string or fixed-len string and
// proceed appropriately.
@@ -516,11 +513,11 @@ void DataSet::write( const H5std_string& strg, const DataType& mem_type, const D
throw DataSetIException("DataSet::write", "H5Tis_variable_str failed");
}
- // Obtain identifiers for C API
- hid_t mem_type_id = mem_type.getId();
- hid_t mem_space_id = mem_space.getId();
- hid_t file_space_id = file_space.getId();
- hid_t xfer_plist_id = xfer_plist.getId();
+ // Obtain identifiers for C API
+ hid_t mem_type_id = mem_type.getId();
+ hid_t mem_space_id = mem_space.getId();
+ hid_t file_space_id = file_space.getId();
+ hid_t xfer_plist_id = xfer_plist.getId();
// Convert string to C-string
const char* strg_C;
@@ -530,12 +527,12 @@ void DataSet::write( const H5std_string& strg, const DataType& mem_type, const D
// Pass string in differently depends on variable or fixed length
if (!is_variable_len)
{
- ret_value = H5Dwrite( id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, strg_C );
+ ret_value = H5Dwrite(id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, strg_C);
}
else
{
// passing string argument by address
- ret_value = H5Dwrite( id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, &strg_C );
+ ret_value = H5Dwrite(id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, &strg_C);
}
if (ret_value < 0)
{
@@ -544,65 +541,65 @@ void DataSet::write( const H5std_string& strg, const DataType& mem_type, const D
}
//--------------------------------------------------------------------------
-// Function: DataSet::iterateElems
-///\brief Iterates over all selected elements in a dataspace.
-///\param buf - IN/OUT: Pointer to the buffer in memory containing the
-/// elements to iterate over
-///\param type - IN: Datatype for the elements stored in \a buf
-///\param space - IN: Dataspace for \a buf. Also contains the selection
-/// to iterate over.
-///\param op - IN: Function pointer to the routine to be called for
-/// each element in \a buf iterated over
-///\param op_data - IN/OUT: Pointer to any user-defined data associated
-/// with the operation
-///\exception H5::DataSetIException
-///\note This function may not work correctly yet - it's still
-/// under development.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-int DataSet::iterateElems( void* buf, const DataType& type, const DataSpace& space, H5D_operator_t op, void* op_data )
+// Function: DataSet::iterateElems
+///\brief Iterates over all selected elements in a dataspace.
+///\param buf - IN/OUT: Pointer to the buffer in memory containing the
+/// elements to iterate over
+///\param type - IN: Datatype for the elements stored in \a buf
+///\param space - IN: Dataspace for \a buf. Also contains the selection
+/// to iterate over.
+///\param op - IN: Function pointer to the routine to be called for
+/// each element in \a buf iterated over
+///\param op_data - IN/OUT: Pointer to any user-defined data associated
+/// with the operation
+///\exception H5::DataSetIException
+///\note This function may not work correctly yet - it's still
+/// under development.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+int DataSet::iterateElems(void* buf, const DataType& type, const DataSpace& space, H5D_operator_t op, void* op_data)
{
- // Obtain identifiers for C API
- hid_t type_id = type.getId();
- hid_t space_id = space.getId();
- herr_t ret_value = H5Diterate( buf, type_id, space_id, op, op_data );
- if( ret_value >= 0 )
- return( ret_value );
- else // raise exception when H5Diterate returns a negative value
- {
- throw DataSetIException("DataSet::iterateElems", "H5Diterate failed");
- }
+ // Obtain identifiers for C API
+ hid_t type_id = type.getId();
+ hid_t space_id = space.getId();
+ herr_t ret_value = H5Diterate(buf, type_id, space_id, op, op_data);
+ if (ret_value >= 0)
+ return(ret_value);
+ else // raise exception when H5Diterate returns a negative value
+ {
+ throw DataSetIException("DataSet::iterateElems", "H5Diterate failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSet::extend
-///\brief Extends a dataset with unlimited dimension.
-///\param size - IN: Array containing the new magnitude of each dimension
-///\exception H5::DataSetIException
+// Function: DataSet::extend
+///\brief Extends a dataset with unlimited dimension.
+///\param size - IN: Array containing the new magnitude of each dimension
+///\exception H5::DataSetIException
///\par Description
-/// For more information, please see the Description section in
-/// C layer Reference Manual at:
+/// For more information, please see the Description section in
+/// C layer Reference Manual at:
///\par
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5D.html#Dataset-Extend
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSet::extend( const hsize_t* size ) const
+void DataSet::extend(const hsize_t* size) const
{
- herr_t ret_value = H5Dset_extent( id, size );
- if( ret_value < 0 ) // raise exception when H5Dset_extent returns a neg value
- throw DataSetIException("DataSet::extend", "H5Dset_extent failed");
+ herr_t ret_value = H5Dset_extent(id, size);
+ if (ret_value < 0) // raise exception when H5Dset_extent returns a neg value
+ throw DataSetIException("DataSet::extend", "H5Dset_extent failed");
}
//--------------------------------------------------------------------------
-// Function: DataSet::fillMemBuf
-///\brief Fills a selection in memory with a value.
-///\param fill - IN: Pointer to fill value to use - default NULL
-///\param fill_type - IN: Datatype of the fill value
-///\param buf - IN/OUT: Memory buffer to fill selection within
-///\param buf_type - IN: Datatype of the elements in buffer
-///\param space - IN: Dataspace describing memory buffer & containing selection to use
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2014
+// Function: DataSet::fillMemBuf
+///\brief Fills a selection in memory with a value.
+///\param fill - IN: Pointer to fill value to use - default NULL
+///\param fill_type - IN: Datatype of the fill value
+///\param buf - IN/OUT: Memory buffer to fill selection within
+///\param buf_type - IN: Datatype of the elements in buffer
+///\param space - IN: Dataspace describing memory buffer & containing selection to use
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2014
// Modification
//--------------------------------------------------------------------------
void DataSet::fillMemBuf(const void *fill, const DataType& fill_type, void *buf, const DataType& buf_type, const DataSpace& space) const
@@ -611,28 +608,28 @@ void DataSet::fillMemBuf(const void *fill, const DataType& fill_type, void *buf,
hid_t buf_type_id = buf_type.getId();
hid_t space_id = space.getId();
herr_t ret_value = H5Dfill(fill, fill_type_id, buf, buf_type_id, space_id);
- if( ret_value < 0 )
+ if (ret_value < 0)
{
- throw DataSetIException("DataSet::fillMemBuf", "H5Dfill failed");
+ throw DataSetIException("DataSet::fillMemBuf", "H5Dfill failed");
}
}
//--------------------------------------------------------------------------
-// Function: DataSet::fillMemBuf
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const's. This wrapper will be removed in future release.
-// Param fill - IN: Pointer to fill value to use - default NULL
-// Param fill_type - IN: Datatype of the fill value
-// Param buf - IN/OUT: Memory buffer to fill selection within
-// Param buf_type - IN: Datatype of the elements in buffer
-// Param space - IN: Dataspace describing memory buffer & containing selection to use
-// Exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::fillMemBuf
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const's. This wrapper will be removed in future release.
+// Param fill - IN: Pointer to fill value to use - default NULL
+// Param fill_type - IN: Datatype of the fill value
+// Param buf - IN/OUT: Memory buffer to fill selection within
+// Param buf_type - IN: Datatype of the elements in buffer
+// Param space - IN: Dataspace describing memory buffer & containing selection to use
+// Exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Modified to call its replacement. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Modified to call its replacement. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
//void DataSet::fillMemBuf(const void *fill, DataType& fill_type, void *buf, DataType& buf_type, DataSpace& space)
//{
@@ -640,39 +637,39 @@ void DataSet::fillMemBuf(const void *fill, const DataType& fill_type, void *buf,
//}
//--------------------------------------------------------------------------
-// Function: DataSet::fillMemBuf
-///\brief Fills a selection in memory with 0.
-///\param buf - IN/OUT: Memory buffer to fill selection within
-///\param buf_type - IN: Datatype of the elements in buffer
-///\param space - IN: Dataspace describing memory buffer & containing selection to use
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet::fillMemBuf
+///\brief Fills a selection in memory with 0.
+///\param buf - IN/OUT: Memory buffer to fill selection within
+///\param buf_type - IN: Datatype of the elements in buffer
+///\param space - IN: Dataspace describing memory buffer & containing selection to use
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DataSet::fillMemBuf(void *buf, const DataType& buf_type, const DataSpace& space) const
{
hid_t buf_type_id = buf_type.getId();
hid_t space_id = space.getId();
herr_t ret_value = H5Dfill(NULL, buf_type_id, buf, buf_type_id, space_id);
- if( ret_value < 0 )
+ if (ret_value < 0)
{
- throw DataSetIException("DataSet::fillMemBuf", "H5Dfill failed");
+ throw DataSetIException("DataSet::fillMemBuf", "H5Dfill failed");
}
}
//--------------------------------------------------------------------------
// Function: DataSet::fillMemBuf
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const's. This wrapper will be removed in future release.
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const's. This wrapper will be removed in future release.
// Param buf - IN/OUT: Memory buffer to fill selection within
// Param buf_type - IN: Datatype of the elements in buffer
// Param space - IN: Dataspace describing memory buffer & containing selection to use
-// Exception H5::DataSetIException
+// Exception H5::DataSetIException
// Programmer Binh-Minh Ribler - 2000
// Modification
-// Modified to call its replacement. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Modified to call its replacement. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
//void DataSet::fillMemBuf(void *buf, DataType& buf_type, DataSpace& space)
//{
@@ -681,8 +678,8 @@ void DataSet::fillMemBuf(void *buf, const DataType& buf_type, const DataSpace& s
//--------------------------------------------------------------------------
// Function: DataSet::getId
-///\brief Get the id of this dataset.
-///\return DataSet identifier
+///\brief Get the id of this dataset.
+///\return DataSet identifier
// Description:
// Class hierarchy is revised to address bugzilla 1068. Class
// AbstractDs and Attribute are moved out of H5Object. In
@@ -692,19 +689,19 @@ void DataSet::fillMemBuf(void *buf, const DataType& buf_type, const DataSpace& s
//--------------------------------------------------------------------------
hid_t DataSet::getId() const
{
- return(id);
+ return(id);
}
//--------------------------------------------------------------------------
-// Function: DataSet::p_read_fixed_len (private)
-// brief Reads a fixed length \a H5std_string from a dataset.
-// param mem_type - IN: DataSet datatype (in memory)
-// param strg - IN: Buffer for read string
-// exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - Jul, 2009
+// Function: DataSet::p_read_fixed_len (private)
+// brief Reads a fixed length \a H5std_string from a dataset.
+// param mem_type - IN: DataSet datatype (in memory)
+// param strg - IN: Buffer for read string
+// exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - Jul, 2009
// Modification
-// Jul 2009
-// Added in follow to the change in Attribute::read
+// Jul 2009
+// Added in follow to the change in Attribute::read
//--------------------------------------------------------------------------
void DataSet::p_read_fixed_len(const hid_t mem_type_id, const hid_t mem_space_id, const hid_t file_space_id, const hid_t xfer_plist_id, H5std_string& strg) const
{
@@ -716,33 +713,33 @@ void DataSet::p_read_fixed_len(const hid_t mem_type_id, const hid_t mem_space_id
// If there is data, allocate buffer and read it.
if (data_size > 0)
{
- char *strg_C = new char [data_size+1];
- HDmemset(strg_C, 0, data_size+1); // clear buffer
+ char *strg_C = new char [data_size+1];
+ HDmemset(strg_C, 0, data_size+1); // clear buffer
- herr_t ret_value = H5Dread(id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, strg_C);
+ herr_t ret_value = H5Dread(id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, strg_C);
- if( ret_value < 0 )
- {
- delete []strg_C; // de-allocate for fixed-len string
- throw DataSetIException("DataSet::read", "H5Dread failed for fixed length string");
- }
+ if (ret_value < 0)
+ {
+ delete []strg_C; // de-allocate for fixed-len string
+ throw DataSetIException("DataSet::read", "H5Dread failed for fixed length string");
+ }
- // Get string from the C char* and release resource allocated locally
- strg = strg_C;
- delete []strg_C;
+ // Get string from the C char* and release resource allocated locally
+ strg = strg_C;
+ delete []strg_C;
}
}
//--------------------------------------------------------------------------
-// Function: DataSet::p_read_variable_len (private)
-// brief Reads a variable length \a H5std_string from an dataset.
-// param mem_type - IN: DataSet datatype (in memory)
-// param strg - IN: Buffer for read string
-// exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - Jul, 2009
+// Function: DataSet::p_read_variable_len (private)
+// brief Reads a variable length \a H5std_string from an dataset.
+// param mem_type - IN: DataSet datatype (in memory)
+// param strg - IN: Buffer for read string
+// exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - Jul, 2009
// Modification
-// Jul 2009
-// Added in follow to the change in Attribute::read
+// Jul 2009
+// Added in follow to the change in Attribute::read
//--------------------------------------------------------------------------
void DataSet::p_read_variable_len(const hid_t mem_type_id, const hid_t mem_space_id, const hid_t file_space_id, const hid_t xfer_plist_id, H5std_string& strg) const
{
@@ -752,9 +749,9 @@ void DataSet::p_read_variable_len(const hid_t mem_type_id, const hid_t mem_space
// Read dataset, no allocation for variable-len string; C library will
herr_t ret_value = H5Dread(id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, &strg_C);
- if( ret_value < 0 )
+ if (ret_value < 0)
{
- throw DataSetIException("DataSet::read", "H5Dread failed for variable length string");
+ throw DataSetIException("DataSet::read", "H5Dread failed for variable length string");
}
// Get string from the C char* and release resource allocated by C API
@@ -806,43 +803,43 @@ void f_PropList_setId(PropList* plist, hid_t new_id)
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: DataSet::close
-///\brief Closes this dataset.
+// Function: DataSet::close
+///\brief Closes this dataset.
///
-///\exception H5::DataSetIException
-// Programmer Binh-Minh Ribler - Mar 9, 2005
+///\exception H5::DataSetIException
+// Programmer Binh-Minh Ribler - Mar 9, 2005
//--------------------------------------------------------------------------
void DataSet::close()
{
if (p_valid_id(id))
{
- herr_t ret_value = H5Dclose( id );
- if( ret_value < 0 )
- {
- throw DataSetIException("DataSet::close", "H5Dclose failed");
- }
- // reset the id
- id = H5I_INVALID_HID;
+ herr_t ret_value = H5Dclose(id);
+ if (ret_value < 0)
+ {
+ throw DataSetIException("DataSet::close", "H5Dclose failed");
+ }
+ // reset the id
+ id = H5I_INVALID_HID;
}
}
//--------------------------------------------------------------------------
-// Function: DataSet destructor
-///\brief Properly terminates access to this dataset.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSet destructor
+///\brief Properly terminates access to this dataset.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Jun 1, 2004
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Jun 1, 2004
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
DataSet::~DataSet()
{
try {
- close();
+ close();
}
catch (Exception& close_error) {
- cerr << "DataSet::~DataSet - " << close_error.getDetailMsg() << endl;
+ cerr << "DataSet::~DataSet - " << close_error.getDetailMsg() << endl;
}
}
diff --git a/c++/src/H5DataSet.h b/c++/src/H5DataSet.h
index ee9ef28..2f3eebe 100644
--- a/c++/src/H5DataSet.h
+++ b/c++/src/H5DataSet.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5DataSet_H
@@ -31,82 +29,82 @@ namespace H5 {
class H5_DLLCPP DataSet : public H5Object, public AbstractDs {
public:
- // Close this dataset.
- virtual void close();
+ // Close this dataset.
+ virtual void close();
- // Extends the dataset with unlimited dimension.
- void extend( const hsize_t* size ) const;
+ // Extends the dataset with unlimited dimension.
+ void extend(const hsize_t* size) const;
- // Fills a selection in memory with a value
- void fillMemBuf(const void *fill, const DataType& fill_type, void *buf, const DataType& buf_type, const DataSpace& space) const;
- //void fillMemBuf(const void *fill, DataType& fill_type, void *buf, DataType& buf_type, DataSpace& space); // removed from 1.8.18 and 1.10.1
+ // Fills a selection in memory with a value
+ void fillMemBuf(const void *fill, const DataType& fill_type, void *buf, const DataType& buf_type, const DataSpace& space) const;
+ //void fillMemBuf(const void *fill, DataType& fill_type, void *buf, DataType& buf_type, DataSpace& space); // removed from 1.8.18 and 1.10.1
- // Fills a selection in memory with zero
- void fillMemBuf(void *buf, const DataType& buf_type, const DataSpace& space) const;
- //void fillMemBuf(void *buf, DataType& buf_type, DataSpace& space); // removed from 1.8.18 and 1.10.1
+ // Fills a selection in memory with zero
+ void fillMemBuf(void *buf, const DataType& buf_type, const DataSpace& space) const;
+ //void fillMemBuf(void *buf, DataType& buf_type, DataSpace& space); // removed from 1.8.18 and 1.10.1
- // Gets the creation property list of this dataset.
- DSetCreatPropList getCreatePlist() const;
+ // Gets the creation property list of this dataset.
+ DSetCreatPropList getCreatePlist() const;
- // Returns the address of this dataset in the file.
- haddr_t getOffset() const;
+ // Returns the address of this dataset in the file.
+ haddr_t getOffset() const;
- // Gets the dataspace of this dataset.
- virtual DataSpace getSpace() const;
+ // Gets the dataspace of this dataset.
+ virtual DataSpace getSpace() const;
- // Determines whether space has been allocated for a dataset.
- void getSpaceStatus(H5D_space_status_t& status) const;
+ // Determines whether space has been allocated for a dataset.
+ void getSpaceStatus(H5D_space_status_t& status) const;
- // Returns the amount of storage size required for this dataset.
- virtual hsize_t getStorageSize() const;
+ // Returns the amount of storage size required for this dataset.
+ virtual hsize_t getStorageSize() const;
- // Returns the in memory size of this attribute's data.
- virtual size_t getInMemDataSize() const;
+ // Returns the in memory size of this attribute's data.
+ virtual size_t getInMemDataSize() const;
- // Returns the number of bytes required to store VL data.
- hsize_t getVlenBufSize(const DataType& type, const DataSpace& space ) const;
- //hsize_t getVlenBufSize(DataType& type, DataSpace& space) const; // removed from 1.8.18 and 1.10.1
+ // Returns the number of bytes required to store VL data.
+ hsize_t getVlenBufSize(const DataType& type, const DataSpace& space) const;
+ //hsize_t getVlenBufSize(DataType& type, DataSpace& space) const; // removed from 1.8.18 and 1.10.1
- // Reclaims VL datatype memory buffers.
- static void vlenReclaim(const DataType& type, const DataSpace& space, const DSetMemXferPropList& xfer_plist, void* buf );
- static void vlenReclaim(void *buf, const DataType& type, const DataSpace& space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT);
+ // Reclaims VL datatype memory buffers.
+ static void vlenReclaim(const DataType& type, const DataSpace& space, const DSetMemXferPropList& xfer_plist, void* buf);
+ static void vlenReclaim(void *buf, const DataType& type, const DataSpace& space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT);
- // Reads the data of this dataset and stores it in the provided buffer.
- // The memory and file dataspaces and the transferring property list
- // can be defaults.
- void read( void* buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT ) const;
- void read( H5std_string& buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT ) const;
+ // Reads the data of this dataset and stores it in the provided buffer.
+ // The memory and file dataspaces and the transferring property list
+ // can be defaults.
+ void read(void* buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT) const;
+ void read(H5std_string& buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT) const;
- // Writes the buffered data to this dataset.
- // The memory and file dataspaces and the transferring property list
- // can be defaults.
- void write( const void* buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT ) const;
- void write( const H5std_string& buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT ) const;
+ // Writes the buffered data to this dataset.
+ // The memory and file dataspaces and the transferring property list
+ // can be defaults.
+ void write(const void* buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT) const;
+ void write(const H5std_string& buf, const DataType& mem_type, const DataSpace& mem_space = DataSpace::ALL, const DataSpace& file_space = DataSpace::ALL, const DSetMemXferPropList& xfer_plist = DSetMemXferPropList::DEFAULT) const;
- // Iterates the selected elements in the specified dataspace - not implemented in C++ style yet
- int iterateElems( void* buf, const DataType& type, const DataSpace& space, H5D_operator_t op, void* op_data = NULL );
+ // Iterates the selected elements in the specified dataspace - not implemented in C++ style yet
+ int iterateElems(void* buf, const DataType& type, const DataSpace& space, H5D_operator_t op, void* op_data = NULL);
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("DataSet"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("DataSet"); }
- // Creates a dataset by way of dereference.
- DataSet(const H5Location& loc, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
- DataSet(const Attribute& attr, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
+ // Creates a dataset by way of dereference.
+ DataSet(const H5Location& loc, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
+ DataSet(const Attribute& attr, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
- // Default constructor.
- DataSet();
+ // Default constructor.
+ DataSet();
- // Copy constructor.
- DataSet( const DataSet& original );
+ // Copy constructor.
+ DataSet(const DataSet& original);
- // Creates a copy of an existing DataSet using its id.
- DataSet(const hid_t existing_id);
+ // Creates a copy of an existing DataSet using its id.
+ DataSet(const hid_t existing_id);
// Gets the dataset id.
virtual hid_t getId() const;
- // Destructor: properly terminates access to this dataset.
- virtual ~DataSet();
+ // Destructor: properly terminates access to this dataset.
+ virtual ~DataSet();
protected:
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -115,21 +113,22 @@ class H5_DLLCPP DataSet : public H5Object, public AbstractDs {
#endif // DOXYGEN_SHOULD_SKIP_THIS
private:
- hid_t id; // HDF5 dataset id
+ hid_t id; // HDF5 dataset id
// This function contains the common code that is used by
// getTypeClass and various API functions getXxxType
// defined in AbstractDs for generic datatype and specific
// sub-types
- virtual hid_t p_get_type() const;
+ virtual hid_t p_get_type() const;
- // Reads variable or fixed len strings from this dataset.
- void p_read_fixed_len(const hid_t mem_type_id, const hid_t mem_space_id, const hid_t file_space_id, const hid_t xfer_plist_id, H5std_string& strg) const;
- void p_read_variable_len(const hid_t mem_type_id, const hid_t mem_space_id, const hid_t file_space_id, const hid_t xfer_plist_id, H5std_string& strg) const;
+ // Reads variable or fixed len strings from this dataset.
+ void p_read_fixed_len(const hid_t mem_type_id, const hid_t mem_space_id, const hid_t file_space_id, const hid_t xfer_plist_id, H5std_string& strg) const;
+ void p_read_variable_len(const hid_t mem_type_id, const hid_t mem_space_id, const hid_t file_space_id, const hid_t xfer_plist_id, H5std_string& strg) const;
- // Friend function to set DataSet id. For library use only.
- friend void f_DataSet_setId(DataSet* dset, hid_t new_id);
+ // Friend function to set DataSet id. For library use only.
+ friend void f_DataSet_setId(DataSet* dset, hid_t new_id);
+
+}; // end of DataSet
+} // namespace H5
-};
-}
#endif // __H5DataSet_H
diff --git a/c++/src/H5DataSpace.cpp b/c++/src/H5DataSpace.cpp
index cb479e1..49b8ea3 100644
--- a/c++/src/H5DataSpace.cpp
+++ b/c++/src/H5DataSpace.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef OLD_HEADER_FILENAME
@@ -38,14 +36,14 @@ using std::endl;
DataSpace* DataSpace::ALL_ = 0;
//--------------------------------------------------------------------------
-// Function: DataSpace::getConstant
-// Creates a DataSpace object representing the HDF5 constant
-// H5S_ALL, pointed to by DataSpace::ALL_
-// Exception H5::DataSpaceIException
+// Function: DataSpace::getConstant
+// Creates a DataSpace object representing the HDF5 constant
+// H5S_ALL, pointed to by DataSpace::ALL_
+// Exception H5::DataSpaceIException
// Description
-// If DataSpace::ALL_ already points to an allocated object, throw
-// a DataSpaceIException. This scenario should not happen.
-// Programmer Binh-Minh Ribler - 2015
+// If DataSpace::ALL_ already points to an allocated object, throw
+// a DataSpaceIException. This scenario should not happen.
+// Programmer Binh-Minh Ribler - 2015
//--------------------------------------------------------------------------
DataSpace* DataSpace::getConstant()
{
@@ -78,55 +76,55 @@ void DataSpace::deleteConstants()
}
//--------------------------------------------------------------------------
-// Purpose Constant for default dataspace.
+// Purpose Constant for default dataspace.
//--------------------------------------------------------------------------
const DataSpace& DataSpace::ALL = *getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: DataSpace constructor
-///\brief Creates a new dataspace given a dataspace type.
-///\param type - IN: Type of the dataspace to be created, which
-/// currently can be either \c H5S_SCALAR or \c H5S_SIMPLE;
-/// default to \c H5S_SCALAR.
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace constructor
+///\brief Creates a new dataspace given a dataspace type.
+///\param type - IN: Type of the dataspace to be created, which
+/// currently can be either \c H5S_SCALAR or \c H5S_SIMPLE;
+/// default to \c H5S_SCALAR.
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataSpace::DataSpace(H5S_class_t type) : IdComponent()
{
- id = H5Screate( type );
- if( id < 0 )
- {
- throw DataSpaceIException("DataSpace constructor", "H5Screate failed");
- }
+ id = H5Screate(type);
+ if (id < 0)
+ {
+ throw DataSpaceIException("DataSpace constructor", "H5Screate failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace overloaded constructor
-///\brief Creates a new simple dataspace.
-///\param rank - IN: Number of dimensions of dataspace.
-///\param dims - IN: An array of the size of each dimension.
-///\param maxdims - IN: An array of the maximum size of each dimension.
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace overloaded constructor
+///\brief Creates a new simple dataspace.
+///\param rank - IN: Number of dimensions of dataspace.
+///\param dims - IN: An array of the size of each dimension.
+///\param maxdims - IN: An array of the maximum size of each dimension.
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DataSpace::DataSpace( int rank, const hsize_t * dims, const hsize_t * maxdims) : IdComponent()
+DataSpace::DataSpace(int rank, const hsize_t * dims, const hsize_t * maxdims) : IdComponent()
{
- id = H5Screate_simple( rank, dims, maxdims );
- if( id < 0 )
- {
- throw DataSpaceIException("DataSpace constructor", "H5Screate_simple failed");
- }
+ id = H5Screate_simple(rank, dims, maxdims);
+ if (id < 0)
+ {
+ throw DataSpaceIException("DataSpace constructor", "H5Screate_simple failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace overloaded constructor
-///\brief Creates a DataSpace object using the id of an existing
-/// dataspace.
-///\param existing_id - IN: Id of an existing dataspace
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace overloaded constructor
+///\brief Creates a DataSpace object using the id of an existing
+/// dataspace.
+///\param existing_id - IN: Id of an existing dataspace
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataSpace::DataSpace(const hid_t existing_id) : IdComponent(), id(existing_id)
{
@@ -134,10 +132,10 @@ DataSpace::DataSpace(const hid_t existing_id) : IdComponent(), id(existing_id)
}
//--------------------------------------------------------------------------
-// Function: DataSpace copy constructor
-///\brief Copy constructor: makes a copy of the original DataSpace object.
-///\param original - IN: DataSpace object to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace copy constructor
+///\brief Copy constructor: makes a copy of the original DataSpace object.
+///\param original - IN: DataSpace object to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataSpace::DataSpace(const DataSpace& original) : IdComponent(), id(original.id)
{
@@ -145,48 +143,48 @@ DataSpace::DataSpace(const DataSpace& original) : IdComponent(), id(original.id)
}
//--------------------------------------------------------------------------
-// Function: DataSpace::copy
-///\brief Makes a copy of an existing dataspace.
-///\param like_space - IN: Dataspace to be copied
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::copy
+///\brief Makes a copy of an existing dataspace.
+///\param like_space - IN: Dataspace to be copied
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Jun 1, 2004
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
-//--------------------------------------------------------------------------
-void DataSpace::copy( const DataSpace& like_space )
-{
- // If this object has an hdf5 valid id, close it
- if( id != H5S_ALL ) {
- try {
- close();
- }
- catch (Exception& close_error) {
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Jun 1, 2004
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
+//--------------------------------------------------------------------------
+void DataSpace::copy(const DataSpace& like_space)
+{
+ // If this object has an hdf5 valid id, close it
+ if (id != H5S_ALL) {
+ try {
+ close();
+ }
+ catch (Exception& close_error) {
throw DataSpaceIException("DataSpace::copy", close_error.getDetailMsg());
- }
- } // end if
+ }
+ } // end if
- // call C routine to copy the dataspace
- id = H5Scopy( like_space.getId() );
+ // call C routine to copy the dataspace
+ id = H5Scopy(like_space.getId());
- if( id < 0 )
- throw DataSpaceIException("DataSpace::copy", "H5Scopy failed");
+ if (id < 0)
+ throw DataSpaceIException("DataSpace::copy", "H5Scopy failed");
}
//--------------------------------------------------------------------------
-// Function: DataSpace::operator=
-///\brief Assignment operator.
-///\param rhs - IN: Reference to the existing dataspace
-///\return Reference to DataSpace instance
-///\exception H5::DataSpaceIException
+// Function: DataSpace::operator=
+///\brief Assignment operator.
+///\param rhs - IN: Reference to the existing dataspace
+///\return Reference to DataSpace instance
+///\exception H5::DataSpaceIException
// Description
-// Makes a copy of the type on the right hand side and stores
-// the new id in the left hand side object.
-// Programmer Binh-Minh Ribler - 2000
+// Makes a copy of the type on the right hand side and stores
+// the new id in the left hand side object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DataSpace& DataSpace::operator=( const DataSpace& rhs )
+DataSpace& DataSpace::operator=(const DataSpace& rhs)
{
if (this != &rhs)
copy(rhs);
@@ -194,435 +192,434 @@ DataSpace& DataSpace::operator=( const DataSpace& rhs )
}
//--------------------------------------------------------------------------
-// Function: DataSpace::isSimple
-///\brief Determines whether this dataspace is a simple dataspace.
-///\return \c true if the dataspace is a simple dataspace, and \c false,
-/// otherwise
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::isSimple
+///\brief Determines whether this dataspace is a simple dataspace.
+///\return \c true if the dataspace is a simple dataspace, and \c false,
+/// otherwise
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
bool DataSpace::isSimple () const
{
- htri_t simple = H5Sis_simple( id );
- if( simple > 0 )
- return true;
- else if( simple == 0 )
- return false;
- else
- {
- throw DataSpaceIException("DataSpace::isSimple",
- "H5Sis_simple returns negative value");
- }
+ htri_t simple = H5Sis_simple(id);
+ if (simple > 0)
+ return true;
+ else if (simple == 0)
+ return false;
+ else
+ {
+ throw DataSpaceIException("DataSpace::isSimple",
+ "H5Sis_simple returns negative value");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::offsetSimple
-///\brief Sets the offset of this simple dataspace.
-///\param offset - IN: Offset to position the selection at
-///\exception H5::DataSpaceIException
+// Function: DataSpace::offsetSimple
+///\brief Sets the offset of this simple dataspace.
+///\param offset - IN: Offset to position the selection at
+///\exception H5::DataSpaceIException
///\par Description
-/// This function creates an offset for the selection within
-/// an extent, allowing the same shaped selection to be moved
-/// to different locations within a dataspace without requiring
-/// it to be re-defined.
-// Programmer Binh-Minh Ribler - 2000
+/// This function creates an offset for the selection within
+/// an extent, allowing the same shaped selection to be moved
+/// to different locations within a dataspace without requiring
+/// it to be re-defined.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSpace::offsetSimple ( const hssize_t* offset ) const
+void DataSpace::offsetSimple (const hssize_t* offset) const
{
- herr_t ret_value = H5Soffset_simple( id, offset );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::offsetSimple", "H5Soffset_simple failed");
- }
+ herr_t ret_value = H5Soffset_simple(id, offset);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::offsetSimple", "H5Soffset_simple failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSimpleExtentDims
-///\brief Retrieves dataspace dimension size and maximum size.
-///\param dims - IN: Name of the new member
-///\param maxdims - IN: Pointer to the value of the new member
-///\return Number of dimensions, the same value as returned by
-/// \c DataSpace::getSimpleExtentNdims()
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::getSimpleExtentDims
+///\brief Retrieves dataspace dimension size and maximum size.
+///\param dims - IN: Name of the new member
+///\param maxdims - IN: Pointer to the value of the new member
+///\return Number of dimensions, the same value as returned by
+/// \c DataSpace::getSimpleExtentNdims()
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-int DataSpace::getSimpleExtentDims ( hsize_t *dims, hsize_t *maxdims ) const
+int DataSpace::getSimpleExtentDims (hsize_t *dims, hsize_t *maxdims) const
{
- int ndims = H5Sget_simple_extent_dims( id, dims, maxdims );
- if( ndims < 0 )
- {
- throw DataSpaceIException("DataSpace::getSimpleExtentDims",
- "H5Sget_simple_extent_dims returns negative number of dimensions");
- }
- return( ndims );
+ int ndims = H5Sget_simple_extent_dims(id, dims, maxdims);
+ if (ndims < 0)
+ {
+ throw DataSpaceIException("DataSpace::getSimpleExtentDims",
+ "H5Sget_simple_extent_dims returns negative number of dimensions");
+ }
+ return(ndims);
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSimpleExtentNdims
-///\brief Returns the dimensionality of a dataspace.
-///\return Number of dimensions
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::getSimpleExtentNdims
+///\brief Returns the dimensionality of a dataspace.
+///\return Number of dimensions
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
int DataSpace::getSimpleExtentNdims () const
{
- int ndims = H5Sget_simple_extent_ndims( id );
- if( ndims < 0 )
- {
- throw DataSpaceIException("DataSpace::getSimpleExtentNdims",
- "H5Sget_simple_extent_ndims returns negative value for dimensionality of the dataspace");
- }
- return( ndims );
+ int ndims = H5Sget_simple_extent_ndims(id);
+ if (ndims < 0)
+ {
+ throw DataSpaceIException("DataSpace::getSimpleExtentNdims",
+ "H5Sget_simple_extent_ndims returns negative value for dimensionality of the dataspace");
+ }
+ return(ndims);
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSimpleExtentNpoints
-///\brief Returns the number of elements in a dataspace.
-///\return Number of elements
-///\exception H5::DataSpaceIException
+// Function: DataSpace::getSimpleExtentNpoints
+///\brief Returns the number of elements in a dataspace.
+///\return Number of elements
+///\exception H5::DataSpaceIException
// Modification
-// 12/05/00: due to C API change
-// return type hssize_t vs. hsize_t
-// num_elements = -1 when failure occurs vs. 0
-// Programmer Binh-Minh Ribler - 2000
+// 12/05/00: due to C API change
+// return type hssize_t vs. hsize_t
+// num_elements = -1 when failure occurs vs. 0
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
hssize_t DataSpace::getSimpleExtentNpoints () const
{
- hssize_t num_elements = H5Sget_simple_extent_npoints( id );
-
- if( num_elements > -1 )
- return( num_elements );
- else
- {
- throw DataSpaceIException("DataSpace::getSimpleExtentNpoints",
- "H5Sget_simple_extent_npoints returns negative value for the number of elements in the dataspace");
- }
+ hssize_t num_elements = H5Sget_simple_extent_npoints(id);
+ if (num_elements > -1)
+ return(num_elements);
+ else
+ {
+ throw DataSpaceIException("DataSpace::getSimpleExtentNpoints",
+ "H5Sget_simple_extent_npoints returns negative value for the number of elements in the dataspace");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSimpleExtentType
-///\brief Returns the current class of a dataspace.
-///\return Class of the dataspace
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::getSimpleExtentType
+///\brief Returns the current class of a dataspace.
+///\return Class of the dataspace
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5S_class_t DataSpace::getSimpleExtentType () const
{
- H5S_class_t class_name = H5Sget_simple_extent_type( id );
- if( class_name == H5S_NO_CLASS )
- {
- throw DataSpaceIException("DataSpace::getSimpleExtentType",
- "H5Sget_simple_extent_type returns H5S_NO_CLASS");
- }
- return( class_name );
+ H5S_class_t class_name = H5Sget_simple_extent_type(id);
+ if (class_name == H5S_NO_CLASS)
+ {
+ throw DataSpaceIException("DataSpace::getSimpleExtentType",
+ "H5Sget_simple_extent_type returns H5S_NO_CLASS");
+ }
+ return(class_name);
}
//--------------------------------------------------------------------------
-// Function: DataSpace::extentCopy
-///\brief Copies the extent of a dataspace.
-///\param dest_space - IN: Dataspace to copy from
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::extentCopy
+///\brief Copies the extent of a dataspace.
+///\param dest_space - IN: Dataspace to copy from
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DataSpace::extentCopy (const DataSpace& dest_space) const
{
- hid_t dest_space_id = dest_space.getId();
- herr_t ret_value = H5Sextent_copy( dest_space_id, id );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::extentCopy", "H5Sextent_copy failed");
- }
+ hid_t dest_space_id = dest_space.getId();
+ herr_t ret_value = H5Sextent_copy(dest_space_id, id);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::extentCopy", "H5Sextent_copy failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::extentCopy
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const. This wrapper will be removed in future release.
-// Param dest_space - IN: Dataspace to copy from
-// Exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::extentCopy
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const. This wrapper will be removed in future release.
+// Param dest_space - IN: Dataspace to copy from
+// Exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Modified to call its replacement. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Modified to call its replacement. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
-//void DataSpace::extentCopy( DataSpace& dest_space ) const
+//void DataSpace::extentCopy(DataSpace& dest_space) const
//{
// extentCopy(dest_space);
//}
//--------------------------------------------------------------------------
-// Function: DataSpace::setExtentSimple
-///\brief Sets or resets the size of an existing dataspace.
-///\param rank - IN: Rank of the dataspace
-///\param current_size - IN: Array containing current size of dataspace
-///\param maximum_size - IN: Array containing maximum size of dataspace
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::setExtentSimple
+///\brief Sets or resets the size of an existing dataspace.
+///\param rank - IN: Rank of the dataspace
+///\param current_size - IN: Array containing current size of dataspace
+///\param maximum_size - IN: Array containing maximum size of dataspace
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSpace::setExtentSimple( int rank, const hsize_t *current_size, const hsize_t *maximum_size ) const
+void DataSpace::setExtentSimple(int rank, const hsize_t *current_size, const hsize_t *maximum_size) const
{
- herr_t ret_value;
- ret_value = H5Sset_extent_simple( id, rank, current_size, maximum_size );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::setExtentSimple", "H5Sset_extent_simple failed");
- }
+ herr_t ret_value;
+ ret_value = H5Sset_extent_simple(id, rank, current_size, maximum_size);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::setExtentSimple", "H5Sset_extent_simple failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::setExtentNone
-///\brief Removes the extent from a dataspace.
+// Function: DataSpace::setExtentNone
+///\brief Removes the extent from a dataspace.
///
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DataSpace::setExtentNone () const
{
- herr_t ret_value = H5Sset_extent_none( id );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::setExtentNone", "H5Sset_extent_none failed");
- }
+ herr_t ret_value = H5Sset_extent_none(id);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::setExtentNone", "H5Sset_extent_none failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSelectNpoints
-///\brief Returns the number of elements in a dataspace selection.
-///\return Number of elements
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::getSelectNpoints
+///\brief Returns the number of elements in a dataspace selection.
+///\return Number of elements
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
hssize_t DataSpace::getSelectNpoints () const
{
- hssize_t num_elements = H5Sget_select_npoints( id );
- if( num_elements < 0 )
- {
- throw DataSpaceIException("DataSpace::getSelectNpoints",
- "H5Sget_select_npoints returns negative value for number of elements in the dataspace selection");
- }
- return( num_elements );
+ hssize_t num_elements = H5Sget_select_npoints(id);
+ if (num_elements < 0)
+ {
+ throw DataSpaceIException("DataSpace::getSelectNpoints",
+ "H5Sget_select_npoints returns negative value for number of elements in the dataspace selection");
+ }
+ return(num_elements);
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSelectHyperNblocks
-///\brief Returns number of hyperslab blocks.
-///\return Number of hyperslab blocks
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::getSelectHyperNblocks
+///\brief Returns number of hyperslab blocks.
+///\return Number of hyperslab blocks
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
hssize_t DataSpace::getSelectHyperNblocks () const
{
- hssize_t num_blocks = H5Sget_select_hyper_nblocks( id );
- if( num_blocks < 0 )
- {
- throw DataSpaceIException("DataSpace::getSelectHyperNblocks",
- "H5Sget_select_hyper_nblocks returns negative value for the number of hyperslab blocks");
- }
- return( num_blocks );
+ hssize_t num_blocks = H5Sget_select_hyper_nblocks(id);
+ if (num_blocks < 0)
+ {
+ throw DataSpaceIException("DataSpace::getSelectHyperNblocks",
+ "H5Sget_select_hyper_nblocks returns negative value for the number of hyperslab blocks");
+ }
+ return(num_blocks);
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSelectHyperBlocklist
-///\brief Gets the list of hyperslab blocks currently selected
-///\param startblock - IN: Hyperslab block to start with
-///\param numblocks - IN: Number of hyperslab blocks to get
-///\param buf - IN: List of hyperslab blocks selected
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::getSelectHyperBlocklist
+///\brief Gets the list of hyperslab blocks currently selected
+///\param startblock - IN: Hyperslab block to start with
+///\param numblocks - IN: Number of hyperslab blocks to get
+///\param buf - IN: List of hyperslab blocks selected
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSpace::getSelectHyperBlocklist( hsize_t startblock, hsize_t numblocks, hsize_t *buf ) const
+void DataSpace::getSelectHyperBlocklist(hsize_t startblock, hsize_t numblocks, hsize_t *buf) const
{
- herr_t ret_value;
- ret_value = H5Sget_select_hyper_blocklist( id, startblock, numblocks, buf );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::getSelectHyperBlocklist",
- "H5Sget_select_hyper_blocklist failed");
- }
+ herr_t ret_value;
+ ret_value = H5Sget_select_hyper_blocklist(id, startblock, numblocks, buf);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::getSelectHyperBlocklist",
+ "H5Sget_select_hyper_blocklist failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSelectElemNpoints
-///\brief Returns the number of element points in the current selection.
-///\return Number of element points
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::getSelectElemNpoints
+///\brief Returns the number of element points in the current selection.
+///\return Number of element points
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
hssize_t DataSpace::getSelectElemNpoints () const
{
- hssize_t num_points = H5Sget_select_elem_npoints( id );
- if( num_points < 0 )
- {
- throw DataSpaceIException("DataSpace::getSelectElemNpoints",
- "H5Sget_select_elem_npoints failed");
- }
- return( num_points );
+ hssize_t num_points = H5Sget_select_elem_npoints(id);
+ if (num_points < 0)
+ {
+ throw DataSpaceIException("DataSpace::getSelectElemNpoints",
+ "H5Sget_select_elem_npoints failed");
+ }
+ return(num_points);
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSelectElemPointlist
-///\brief Gets the list of element points currently selected
-///\param startpoint - IN: Element point to start with
-///\param numpoints - IN: Number of element points to get
-///\param buf - IN: List of element points selected
-///\exception H5::DataSpaceIException
+// Function: DataSpace::getSelectElemPointlist
+///\brief Gets the list of element points currently selected
+///\param startpoint - IN: Element point to start with
+///\param numpoints - IN: Number of element points to get
+///\param buf - IN: List of element points selected
+///\exception H5::DataSpaceIException
///\par Description
-/// For more information, please refer to the C layer Reference
-/// Manual at:
+/// For more information, please refer to the C layer Reference
+/// Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5S.html#Dataspace-SelectElemPointList
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSpace::getSelectElemPointlist ( hsize_t startpoint, hsize_t numpoints, hsize_t *buf ) const
+void DataSpace::getSelectElemPointlist (hsize_t startpoint, hsize_t numpoints, hsize_t *buf) const
{
- herr_t ret_value;
- ret_value = H5Sget_select_elem_pointlist( id, startpoint, numpoints, buf );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::getSelectElemPointlist",
- "H5Sget_select_elem_pointlist failed");
- }
+ herr_t ret_value;
+ ret_value = H5Sget_select_elem_pointlist(id, startpoint, numpoints, buf);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::getSelectElemPointlist",
+ "H5Sget_select_elem_pointlist failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::getSelectBounds
-///\brief Gets the bounding box containing the current selection.
-///\param start - IN: Starting coordinates of the bounding box
-///\param end - IN: Ending coordinates of the bounding box, i.e.,
-/// the coordinates of the diagonally opposite corner
-///\exception H5::DataSpaceIException
+// Function: DataSpace::getSelectBounds
+///\brief Gets the bounding box containing the current selection.
+///\param start - IN: Starting coordinates of the bounding box
+///\param end - IN: Ending coordinates of the bounding box, i.e.,
+/// the coordinates of the diagonally opposite corner
+///\exception H5::DataSpaceIException
///\par Description
-/// For more information, please refer to the C layer Reference
-/// Manual at:
+/// For more information, please refer to the C layer Reference
+/// Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5S.html#Dataspace-SelectBounds
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSpace::getSelectBounds ( hsize_t* start, hsize_t* end ) const
+void DataSpace::getSelectBounds (hsize_t* start, hsize_t* end) const
{
- herr_t ret_value = H5Sget_select_bounds( id, start, end );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::getSelectBounds",
- "H5Sget_select_bounds failed");
- }
+ herr_t ret_value = H5Sget_select_bounds(id, start, end);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::getSelectBounds",
+ "H5Sget_select_bounds failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::selectElements
-///\brief Selects array elements to be included in the selection for
-/// this dataspace.
-///\param op - IN: Operator specifying how the new selection is to be
-/// combined with the existing selection for the dataspace
-///\param num_elements - IN: Number of elements to be selected
-///\param coord - IN: A 2-dimensional array of 0-based values
-/// specifying the coordinates of the elements being selected
-///\exception H5::DataSpaceIException
+// Function: DataSpace::selectElements
+///\brief Selects array elements to be included in the selection for
+/// this dataspace.
+///\param op - IN: Operator specifying how the new selection is to be
+/// combined with the existing selection for the dataspace
+///\param num_elements - IN: Number of elements to be selected
+///\param coord - IN: A 2-dimensional array of 0-based values
+/// specifying the coordinates of the elements being selected
+///\exception H5::DataSpaceIException
///\par Description
-/// For more information, please refer to the C layer Reference
-/// Manual at:
+/// For more information, please refer to the C layer Reference
+/// Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5S.html#Dataspace-SelectElements
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSpace::selectElements ( H5S_seloper_t op, const size_t num_elements, const hsize_t *coord) const
+void DataSpace::selectElements (H5S_seloper_t op, const size_t num_elements, const hsize_t *coord) const
{
- herr_t ret_value;
- ret_value = H5Sselect_elements( id, op, num_elements, coord );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::selectElements",
- "H5Sselect_elements failed");
- }
+ herr_t ret_value;
+ ret_value = H5Sselect_elements(id, op, num_elements, coord);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::selectElements",
+ "H5Sselect_elements failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::selectAll
-///\brief Selects the entire dataspace.
+// Function: DataSpace::selectAll
+///\brief Selects the entire dataspace.
///
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DataSpace::selectAll () const
{
- herr_t ret_value = H5Sselect_all( id );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::selectAll", "H5Sselect_all failed");
- }
+ herr_t ret_value = H5Sselect_all(id);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::selectAll", "H5Sselect_all failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::selectNone
-///\brief Resets the selection region to include no elements.
+// Function: DataSpace::selectNone
+///\brief Resets the selection region to include no elements.
///
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DataSpace::selectNone () const
{
- herr_t ret_value = H5Sselect_none( id );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::selectNone",
- "H5Sselect_none failed");
- }
+ herr_t ret_value = H5Sselect_none(id);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::selectNone",
+ "H5Sselect_none failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataSpace::selectValid
-///\brief Verifies that the selection is within the extent of the
-/// dataspace.
-///\return \c true if the selection is within the extent of the
-/// dataspace, and \c false, otherwise
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace::selectValid
+///\brief Verifies that the selection is within the extent of the
+/// dataspace.
+///\return \c true if the selection is within the extent of the
+/// dataspace, and \c false, otherwise
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
bool DataSpace::selectValid () const
{
- htri_t ret_value = H5Sselect_valid( id );
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else
- {
- throw DataSpaceIException("DataSpace::selectValid",
- "H5Sselect_valid returns negative value");
- }
-}
-
-//--------------------------------------------------------------------------
-// Function: DataSpace::selectHyperslab
-///\brief Selects a hyperslab region to add to the current selected region.
-///\param op - IN: Operation to perform on current selection
-///\param count - IN: Number of blocks included in the hyperslab
-///\param start - IN: Offset of the start of hyperslab
-///\param stride - IN: Hyperslab stride - default to \c NULL
-///\param block - IN: Size of block in the hyperslab - default to \c NULL
-///\exception H5::DataSpaceIException
+ htri_t ret_value = H5Sselect_valid(id);
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else
+ {
+ throw DataSpaceIException("DataSpace::selectValid",
+ "H5Sselect_valid returns negative value");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: DataSpace::selectHyperslab
+///\brief Selects a hyperslab region to add to the current selected region.
+///\param op - IN: Operation to perform on current selection
+///\param count - IN: Number of blocks included in the hyperslab
+///\param start - IN: Offset of the start of hyperslab
+///\param stride - IN: Hyperslab stride - default to \c NULL
+///\param block - IN: Size of block in the hyperslab - default to \c NULL
+///\exception H5::DataSpaceIException
///\par Description
-/// For more information, please refer to the C layer Reference
-/// Manual at:
+/// For more information, please refer to the C layer Reference
+/// Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5S.html#Dataspace-SelectHyperslab
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataSpace::selectHyperslab( H5S_seloper_t op, const hsize_t *count, const hsize_t *start, const hsize_t *stride, const hsize_t *block ) const
+void DataSpace::selectHyperslab(H5S_seloper_t op, const hsize_t *count, const hsize_t *start, const hsize_t *stride, const hsize_t *block) const
{
- herr_t ret_value;
- ret_value = H5Sselect_hyperslab( id, op, start, stride, count, block );
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::selectHyperslab",
- "H5Sselect_hyperslab failed");
- }
+ herr_t ret_value;
+ ret_value = H5Sselect_hyperslab(id, op, start, stride, count, block);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::selectHyperslab",
+ "H5Sselect_hyperslab failed");
+ }
}
//--------------------------------------------------------------------------
// Function: DataSpace::getId
-///\brief Get the id of this dataspace
-///\return Dataspace identifier
+///\brief Get the id of this dataspace
+///\return Dataspace identifier
// Modification:
-// May 2008 - BMR
+// May 2008 - BMR
// Class hierarchy is revised to address bugzilla 1068. Class
// AbstractDS and Attribute are moved out of H5Object. In
// addition, member IdComponent::id is moved into subclasses, and
@@ -631,7 +628,7 @@ void DataSpace::selectHyperslab( H5S_seloper_t op, const hsize_t *count, const h
//--------------------------------------------------------------------------
hid_t DataSpace::getId() const
{
- return(id);
+ return(id);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -656,49 +653,49 @@ void DataSpace::p_setId(const hid_t new_id)
catch (Exception& close_error) {
throw DataSpaceIException(inMemFunc("p_setId"), close_error.getDetailMsg());
}
- // reset object's id to the given id
- id = new_id;
+ // reset object's id to the given id
+ id = new_id;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: DataSpace::close
-///\brief Closes this dataspace.
+// Function: DataSpace::close
+///\brief Closes this dataspace.
///
-///\exception H5::DataSpaceIException
-// Programmer Binh-Minh Ribler - Mar 9, 2005
+///\exception H5::DataSpaceIException
+// Programmer Binh-Minh Ribler - Mar 9, 2005
//--------------------------------------------------------------------------
void DataSpace::close()
{
// check if id is a valid hdf5 object id before trying to close it
if (p_valid_id(id))
{
- herr_t ret_value = H5Sclose(id);
- if( ret_value < 0 )
- {
- throw DataSpaceIException("DataSpace::close", "H5Sclose failed");
- }
- // reset the id
- id = H5I_INVALID_HID;
+ herr_t ret_value = H5Sclose(id);
+ if (ret_value < 0)
+ {
+ throw DataSpaceIException("DataSpace::close", "H5Sclose failed");
+ }
+ // reset the id
+ id = H5I_INVALID_HID;
}
}
//--------------------------------------------------------------------------
-// Function: DataSpace destructor
-///\brief Properly terminates access to this dataspace.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataSpace destructor
+///\brief Properly terminates access to this dataspace.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Jun 1, 2004
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Jun 1, 2004
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
DataSpace::~DataSpace()
{
try {
- close();
+ close();
} catch (Exception& close_error) {
- cerr << "DataSpace::~DataSpace - " << close_error.getDetailMsg() << endl;
+ cerr << "DataSpace::~DataSpace - " << close_error.getDetailMsg() << endl;
}
}
diff --git a/c++/src/H5DataSpace.h b/c++/src/H5DataSpace.h
index e76bc72..c0a1d2b 100644
--- a/c++/src/H5DataSpace.h
+++ b/c++/src/H5DataSpace.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5DataSpace_H
@@ -27,128 +25,130 @@ namespace H5 {
*/
class H5_DLLCPP DataSpace : public IdComponent {
public:
- ///\brief Default DataSpace objects
- static const DataSpace& ALL;
+ ///\brief Default DataSpace objects
+ static const DataSpace& ALL;
- // Creates a dataspace object given the space type
- DataSpace(H5S_class_t type = H5S_SCALAR);
+ // Creates a dataspace object given the space type
+ DataSpace(H5S_class_t type = H5S_SCALAR);
- // Creates a simple dataspace
- DataSpace(int rank, const hsize_t * dims, const hsize_t * maxdims = NULL);
+ // Creates a simple dataspace
+ DataSpace(int rank, const hsize_t * dims, const hsize_t * maxdims = NULL);
- // Creates a DataSpace object using an existing dataspace id.
- DataSpace(const hid_t space_id);
+ // Creates a DataSpace object using an existing dataspace id.
+ DataSpace(const hid_t space_id);
- // Copy constructor: makes a copy of the original DataSpace object.
- DataSpace(const DataSpace& original);
+ // Copy constructor: makes a copy of the original DataSpace object.
+ DataSpace(const DataSpace& original);
- // Assignment operator
- DataSpace& operator=( const DataSpace& rhs );
+ // Assignment operator
+ DataSpace& operator=(const DataSpace& rhs);
- // Closes this dataspace.
- virtual void close();
+ // Closes this dataspace.
+ virtual void close();
- // Makes copy of an existing dataspace.
- void copy(const DataSpace& like_space);
+ // Makes copy of an existing dataspace.
+ void copy(const DataSpace& like_space);
- // Copies the extent of this dataspace.
- void extentCopy(const DataSpace& dest_space) const;
- // removed from 1.8.18 and 1.10.1
- //void extentCopy(DataSpace& dest_space) const;
+ // Copies the extent of this dataspace.
+ void extentCopy(const DataSpace& dest_space) const;
+ // removed from 1.8.18 and 1.10.1
+ //void extentCopy(DataSpace& dest_space) const;
- // Gets the bounding box containing the current selection.
- void getSelectBounds( hsize_t* start, hsize_t* end ) const;
+ // Gets the bounding box containing the current selection.
+ void getSelectBounds(hsize_t* start, hsize_t* end) const;
- // Gets the number of element points in the current selection.
- hssize_t getSelectElemNpoints() const;
+ // Gets the number of element points in the current selection.
+ hssize_t getSelectElemNpoints() const;
- // Retrieves the list of element points currently selected.
- void getSelectElemPointlist( hsize_t startpoint, hsize_t numpoints, hsize_t *buf ) const;
+ // Retrieves the list of element points currently selected.
+ void getSelectElemPointlist(hsize_t startpoint, hsize_t numpoints, hsize_t *buf) const;
- // Gets the list of hyperslab blocks currently selected.
- void getSelectHyperBlocklist( hsize_t startblock, hsize_t numblocks, hsize_t *buf ) const;
+ // Gets the list of hyperslab blocks currently selected.
+ void getSelectHyperBlocklist(hsize_t startblock, hsize_t numblocks, hsize_t *buf) const;
- // Get number of hyperslab blocks.
- hssize_t getSelectHyperNblocks() const;
+ // Get number of hyperslab blocks.
+ hssize_t getSelectHyperNblocks() const;
- // Gets the number of elements in this dataspace selection.
- hssize_t getSelectNpoints() const;
+ // Gets the number of elements in this dataspace selection.
+ hssize_t getSelectNpoints() const;
- // Retrieves dataspace dimension size and maximum size.
- int getSimpleExtentDims( hsize_t *dims, hsize_t *maxdims = NULL ) const;
+ // Retrieves dataspace dimension size and maximum size.
+ int getSimpleExtentDims(hsize_t *dims, hsize_t *maxdims = NULL) const;
- // Gets the dimensionality of this dataspace.
- int getSimpleExtentNdims() const;
+ // Gets the dimensionality of this dataspace.
+ int getSimpleExtentNdims() const;
- // Gets the number of elements in this dataspace.
- // 12/05/00 - changed return type to hssize_t from hsize_t - C API
- hssize_t getSimpleExtentNpoints() const;
+ // Gets the number of elements in this dataspace.
+ // 12/05/00 - changed return type to hssize_t from hsize_t - C API
+ hssize_t getSimpleExtentNpoints() const;
- // Gets the current class of this dataspace.
- H5S_class_t getSimpleExtentType() const;
+ // Gets the current class of this dataspace.
+ H5S_class_t getSimpleExtentType() const;
- // Determines if this dataspace is a simple one.
- bool isSimple() const;
+ // Determines if this dataspace is a simple one.
+ bool isSimple() const;
- // Sets the offset of this simple dataspace.
- void offsetSimple( const hssize_t* offset ) const;
+ // Sets the offset of this simple dataspace.
+ void offsetSimple(const hssize_t* offset) const;
- // Selects the entire dataspace.
- void selectAll() const;
+ // Selects the entire dataspace.
+ void selectAll() const;
- // Selects array elements to be included in the selection for
- // this dataspace.
- void selectElements( H5S_seloper_t op, const size_t num_elements, const hsize_t *coord) const;
+ // Selects array elements to be included in the selection for
+ // this dataspace.
+ void selectElements(H5S_seloper_t op, const size_t num_elements, const hsize_t *coord) const;
- // Selects a hyperslab region to add to the current selected region.
- void selectHyperslab( H5S_seloper_t op, const hsize_t *count, const hsize_t *start, const hsize_t *stride = NULL, const hsize_t *block = NULL ) const;
+ // Selects a hyperslab region to add to the current selected region.
+ void selectHyperslab(H5S_seloper_t op, const hsize_t *count, const hsize_t *start, const hsize_t *stride = NULL, const hsize_t *block = NULL) const;
- // Resets the selection region to include no elements.
- void selectNone() const;
+ // Resets the selection region to include no elements.
+ void selectNone() const;
- // Verifies that the selection is within the extent of the dataspace.
- bool selectValid() const;
+ // Verifies that the selection is within the extent of the dataspace.
+ bool selectValid() const;
- // Removes the extent from this dataspace.
- void setExtentNone() const;
+ // Removes the extent from this dataspace.
+ void setExtentNone() const;
- // Sets or resets the size of this dataspace.
- void setExtentSimple( int rank, const hsize_t *current_size, const hsize_t *maximum_size = NULL ) const;
+ // Sets or resets the size of this dataspace.
+ void setExtentSimple(int rank, const hsize_t *current_size, const hsize_t *maximum_size = NULL) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("DataSpace"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("DataSpace"); }
- // Gets the dataspace id.
- virtual hid_t getId() const;
+ // Gets the dataspace id.
+ virtual hid_t getId() const;
- // Deletes the global constant
- static void deleteConstants();
+ // Deletes the global constant
+ static void deleteConstants();
- // Destructor: properly terminates access to this dataspace.
- virtual ~DataSpace();
+ // Destructor: properly terminates access to this dataspace.
+ virtual ~DataSpace();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
protected:
- // Sets the dataspace id.
- virtual void p_setId(const hid_t new_id);
+ // Sets the dataspace id.
+ virtual void p_setId(const hid_t new_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
private:
- hid_t id; // HDF5 dataspace id
+ hid_t id; // HDF5 dataspace id
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- static DataSpace* ALL_;
+ static DataSpace* ALL_;
- // Creates the global constant
- static DataSpace* getConstant();
+ // Creates the global constant
+ static DataSpace* getConstant();
- // Friend function to set DataSpace id. For library use only.
- friend void f_DataSpace_setId(DataSpace *dspace, hid_t new_id);
+ // Friend function to set DataSpace id. For library use only.
+ friend void f_DataSpace_setId(DataSpace *dspace, hid_t new_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+
+}; // end of DataSpace
+} // namespace H5
+
#endif // __H5DataSpace_H
diff --git a/c++/src/H5DataType.cpp b/c++/src/H5DataType.cpp
index 7dcd343..57f9361 100644
--- a/c++/src/H5DataType.cpp
+++ b/c++/src/H5DataType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef OLD_HEADER_FILENAME
@@ -23,13 +21,14 @@
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
+#include "H5DataSpace.h"
#include "H5PropList.h"
#include "H5FaccProp.h"
#include "H5FcreatProp.h"
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
-#include "H5DataSpace.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -38,8 +37,6 @@
#include "H5private.h"
#include "H5AbstractDs.h"
#include "H5DataSet.h"
-#include "H5Group.h"
-#include "H5File.h"
#include "H5Attribute.h"
namespace H5 {
@@ -47,24 +44,24 @@ using std::cerr;
using std::endl;
//--------------------------------------------------------------------------
-// Function: DataType default constructor
-///\brief Default constructor: Creates a stub datatype
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType default constructor
+///\brief Default constructor: Creates a stub datatype
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataType::DataType() : H5Object(), id(H5I_INVALID_HID) {}
//--------------------------------------------------------------------------
-// Function: DataType overloaded constructor
-///\brief Creates a datatype using an existing datatype's id
-///\param existing_id - IN: Id of the existing datatype
+// Function: DataType overloaded constructor
+///\brief Creates a datatype using an existing datatype's id
+///\param existing_id - IN: Id of the existing datatype
// Description
-// Constructor creates a copy of an existing DataType using
-// its id.
-// Programmer Binh-Minh Ribler - 2000
+// Constructor creates a copy of an existing DataType using
+// its id.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Dec, 2005
-// Removed second argument, "predefined", after changing to the
-// new ref counting mechanism that relies on C's ref counting.
+// Dec, 2005
+// Removed second argument, "predefined", after changing to the
+// new ref counting mechanism that relies on C's ref counting.
//--------------------------------------------------------------------------
DataType::DataType(const hid_t existing_id) : H5Object(), id(existing_id)
{
@@ -72,36 +69,36 @@ DataType::DataType(const hid_t existing_id) : H5Object(), id(existing_id)
}
//--------------------------------------------------------------------------
-// Function: DataType overloaded constructor
-///\brief Creates a object given its class and size
-///\param type_class - IN: Class of datatype to create
-///\param size - IN: Number of bytes in the datatype to create
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType overloaded constructor
+///\brief Creates a object given its class and size
+///\param type_class - IN: Class of datatype to create
+///\param size - IN: Number of bytes in the datatype to create
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DataType::DataType( const H5T_class_t type_class, size_t size ) : H5Object()
+DataType::DataType(const H5T_class_t type_class, size_t size) : H5Object()
{
- // Call C routine to create the new datatype
- id = H5Tcreate( type_class, size );
- if( id < 0 )
- {
- throw DataTypeIException("DataType constructor", "H5Tcreate failed");
- }
+ // Call C routine to create the new datatype
+ id = H5Tcreate(type_class, size);
+ if (id < 0)
+ {
+ throw DataTypeIException("DataType constructor", "H5Tcreate failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataType overload constructor - dereference
-///\brief Given a reference, ref, to an hdf5 group, creates a
-/// DataType object
+// Function: DataType overload constructor - dereference
+///\brief Given a reference, ref, to an hdf5 group, creates a
+/// DataType object
///\param loc - IN: Location referenced object is in
-///\param ref - IN: Reference pointer
-///\param ref_type - IN: Reference type - default to H5R_OBJECT
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - Oct, 2006
+///\param ref - IN: Reference pointer
+///\param ref_type - IN: Reference type - default to H5R_OBJECT
+///\param plist - IN: Property list - default to PropList::DEFAULT
+///\exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - Oct, 2006
// Modification
-// Jul, 2008
-// Added for application convenience.
+// Jul, 2008
+// Added for application convenience.
//--------------------------------------------------------------------------
DataType::DataType(const H5Location& loc, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object()
{
@@ -109,28 +106,29 @@ DataType::DataType(const H5Location& loc, const void* ref, H5R_type_t ref_type,
}
//--------------------------------------------------------------------------
-// Function: DataType overload constructor - dereference
-///\brief Given a reference, ref, to an hdf5 group, creates a
-/// DataType object
-///\param attr - IN: Specifying location where the referenced object is in
-///\param ref - IN: Reference pointer
-///\param ref_type - IN: Reference type - default to H5R_OBJECT
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - Oct, 2006
+// Function: DataType overload constructor - dereference
+// brief Given a reference, ref, to an hdf5 group, creates a
+// DataType object
+// param attr - IN: Specifying location where the referenced object is in
+// param ref - IN: Reference pointer
+// param ref_type - IN: Reference type - default to H5R_OBJECT
+// param plist - IN: Property list - default to PropList::DEFAULT
+// exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - Oct, 2006
// Modification
-// Jul, 2008
-// Added for application convenience.
+// Jul, 2008
+// Added for application convenience.
//--------------------------------------------------------------------------
-DataType::DataType(const Attribute& attr, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object(), id(H5I_INVALID_HID)
+ /* DataType::DataType(const Attribute& attr, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object(), id(H5I_INVALID_HID)
{
id = H5Location::p_dereference(attr.getId(), ref, ref_type, plist, "constructor - by dereference");
}
+ */
//--------------------------------------------------------------------------
-// Function: DataType copy constructor
-///\brief Copy constructor: makes a copy of the original DataType object
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType copy constructor
+///\brief Copy constructor: makes a copy of the original DataType object
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataType::DataType(const DataType& original) : H5Object(), id(original.id)
{
@@ -144,174 +142,212 @@ DataType::DataType(const DataType& original) : H5Object(), id(original.id)
///\exception H5::DataTypeIException
// Programmer Binh-Minh Ribler - 2015
// Description
-// Copying the type so that when a predefined type is passed in,
-// a copy of it is made, not just a duplicate of the HDF5 id.
-// Note: calling DataType::copy will invoke DataType::close()
-// unnecessarily and will produce undefined behavior.
-// -BMR, Apr 2015
+// Copying the type so that when a predefined type is passed in,
+// a copy of it is made, not just a duplicate of the HDF5 id.
+// Note: calling DataType::copy will invoke DataType::close()
+// unnecessarily and will produce undefined behavior.
+// -BMR, Apr 2015
//--------------------------------------------------------------------------
DataType::DataType(const PredType& pred_type) : H5Object()
{
- // call C routine to copy the datatype
- id = H5Tcopy( pred_type.getId() );
+ // Call C routine to copy the datatype
+ id = H5Tcopy(pred_type.getId());
if (id < 0)
- throw DataTypeIException("DataType constructor", "H5Tcopy failed");
+ throw DataTypeIException("DataType constructor", "H5Tcopy failed");
+}
+
+//--------------------------------------------------------------------------
+// Function: DataType overloaded constructor
+///\brief Creates a DataType instance by opening an HDF5 datatype given
+/// its name as a char*.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Datatype name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openDataType(const char*) to
+// improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+DataType::DataType(const H5Location& loc, const char *dtype_name) : H5Object()
+{
+ id = p_opentype(loc, dtype_name);
+}
+
+//--------------------------------------------------------------------------
+// Function: DataType overloaded constructor
+///\brief Creates a DataType instance by opening an HDF5 datatype given
+/// its name as an \c H5std_string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Datatype name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openDataType(const H5std_string&) to
+// improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+DataType::DataType(const H5Location& loc, const H5std_string& dtype_name) : H5Object()
+{
+ id = p_opentype(loc, dtype_name.c_str());
}
//--------------------------------------------------------------------------
-// Function: DataType::copy
-///\brief Copies an existing datatype to this datatype object
-///\param like_type - IN: Datatype to be copied
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::copy
+///\brief Copies an existing datatype to this datatype object
+///\param like_type - IN: Datatype to be copied
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Jun 1, 2004
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Jun 1, 2004
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
-void DataType::copy( const DataType& like_type )
+void DataType::copy(const DataType& like_type)
{
// close the current data type before copying like_type to this object
try {
- close();
+ close();
}
catch (Exception& close_error) {
- throw DataTypeIException(inMemFunc("copy"), close_error.getDetailMsg());
+ throw DataTypeIException(inMemFunc("copy"), close_error.getDetailMsg());
}
// call C routine to copy the datatype
- id = H5Tcopy( like_type.getId() );
- if( id < 0 )
- throw DataTypeIException(inMemFunc("copy"), "H5Tcopy failed");
+ id = H5Tcopy(like_type.getId());
+ if (id < 0)
+ throw DataTypeIException(inMemFunc("copy"), "H5Tcopy failed");
}
//--------------------------------------------------------------------------
-// Function: DataType::copy
-///\brief Copies the datatype of the given dataset to this datatype object
-///\param dset - IN: Dataset
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Jan, 2007
+// Function: DataType::copy
+///\brief Copies the datatype of the given dataset to this datatype object
+///\param dset - IN: Dataset
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Jan, 2007
///\par Description
-/// The resulted dataset will be transient and modifiable.
+/// The resulted dataset will be transient and modifiable.
//--------------------------------------------------------------------------
void DataType::copy(const DataSet& dset)
{
// close the current data type before copying dset's datatype to this object
try {
- close();
+ close();
}
catch (Exception& close_error) {
- throw DataTypeIException(inMemFunc("copy"), close_error.getDetailMsg());
+ throw DataTypeIException(inMemFunc("copy"), close_error.getDetailMsg());
}
// call C routine to copy the datatype
- id = H5Tcopy( dset.getId() );
- if( id < 0 )
- throw DataTypeIException(inMemFunc("copy"), "H5Tcopy failed");
+ id = H5Tcopy(dset.getId());
+ if (id < 0)
+ throw DataTypeIException(inMemFunc("copy"), "H5Tcopy failed");
}
//--------------------------------------------------------------------------
-// Function: DataType::operator=
-///\brief Assignment operator
-///\param rhs - IN: Reference to the existing datatype
-///\return Reference to DataType instance
-///\exception H5::DataTypeIException
+// Function: DataType::operator=
+///\brief Assignment operator
+///\param rhs - IN: Reference to the existing datatype
+///\return Reference to DataType instance
+///\exception H5::DataTypeIException
// Description
-// Makes a copy of the type on the right hand side and stores
-// the new id in the left hand side object.
-// Programmer Binh-Minh Ribler - 2000
+// Makes a copy of the type on the right hand side and stores
+// the new id in the left hand side object.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Changed operator= to simply copy the id of rhs instead of
-// calling H5Tcopy because, when the operator= is invoked, a
-// different datatype id is created and it won't have the same
-// characteristics as the original one, specifically, if the
-// rhs represents a named datatype, "this" would still be a
-// transient datatype.
-// BMR - Mar, 2015
+// Changed operator= to simply copy the id of rhs instead of
+// calling H5Tcopy because, when the operator= is invoked, a
+// different datatype id is created and it won't have the same
+// characteristics as the original one, specifically, if the
+// rhs represents a named datatype, "this" would still be a
+// transient datatype.
+// BMR - Mar, 2015
//--------------------------------------------------------------------------
-DataType& DataType::operator=( const DataType& rhs )
+DataType& DataType::operator=(const DataType& rhs)
{
if (this != &rhs)
{
- setId(rhs.id);
+ setId(rhs.id);
}
return(*this);
}
//--------------------------------------------------------------------------
-// Function: DataType::operator==
-///\brief Compares this DataType against the given one to determines
-/// whether the two objects refer to the same actual datatype.
-///\param compared_type - IN: Reference to the datatype to compare
-///\return true if the datatypes are equal, and false, otherwise.
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::operator==
+///\brief Compares this DataType against the given one to determines
+/// whether the two objects refer to the same actual datatype.
+///\param compared_type - IN: Reference to the datatype to compare
+///\return true if the datatypes are equal, and false, otherwise.
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-bool DataType::operator==(const DataType& compared_type ) const
+bool DataType::operator==(const DataType& compared_type) const
{
- // Call C routine H5Tequal to determines whether two datatype
- // identifiers refer to the same datatype
- htri_t ret_value = H5Tequal( id, compared_type.getId() );
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else
- {
- throw DataTypeIException(inMemFunc("operator=="), "H5Tequal returns negative value");
- }
+ // Call C routine H5Tequal to determines whether two datatype
+ // identifiers refer to the same datatype
+ htri_t ret_value = H5Tequal(id, compared_type.getId());
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else
+ {
+ throw DataTypeIException(inMemFunc("operator=="), "H5Tequal returns negative value");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataType::p_commit (private)
-//\brief Commits a transient datatype to a file, creating a new
-// named datatype
-//\param loc_id - IN: The id of either a file, group, dataset, named
-// datatype, or attribute.
-//\param name - IN: Name of the datatype
-//\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::p_commit (private)
+//\brief Commits a transient datatype to a file, creating a new
+// named datatype
+//\param loc_id - IN: The id of either a file, group, dataset, named
+// datatype, or attribute.
+//\param name - IN: Name of the datatype
+//\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
// Modification:
-// Copied from DataType::commit and made into private function
-// to be commonly used by several overloads of DataType::commit.
-// BMR - Jan, 2007
+// Copied from DataType::commit and made into private function
+// to be commonly used by several overloads of DataType::commit.
+// BMR - Jan, 2007
//--------------------------------------------------------------------------
void DataType::p_commit(hid_t loc_id, const char* name)
{
- // Call C routine to commit the transient datatype
- herr_t ret_value = H5Tcommit2(loc_id, name, id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
- if( ret_value < 0 )
- throw DataTypeIException(inMemFunc("p_commit"), "H5Tcommit2 failed");
+ // Call C routine to commit the transient datatype
+ herr_t ret_value = H5Tcommit2(loc_id, name, id, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ if (ret_value < 0)
+ throw DataTypeIException(inMemFunc("p_commit"), "H5Tcommit2 failed");
}
//--------------------------------------------------------------------------
-// Function: DataType::commit
-///\brief Commits a transient datatype to a file, creating a new
-/// named datatype
-///\param loc - IN: A location (file, dataset, datatype, or group)
-///\param name - IN: Name of the datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Jan, 2007
+// Function: DataType::commit
+///\brief Commits a transient datatype to a file, creating a new
+/// named datatype
+///\param loc - IN: A location (file, dataset, datatype, or group)
+///\param name - IN: Name of the datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Jan, 2007
//--------------------------------------------------------------------------
void DataType::commit(const H5Location& loc, const char* name)
{
- p_commit(loc.getId(), name);
+ p_commit(loc.getId(), name);
}
//--------------------------------------------------------------------------
-// Function: DataType::commit
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const's. This wrapper will be removed in future release.
-// Param loc - IN: A location (file, dataset, datatype, or group)
-// Param name - IN: Name of the datatype
-// Exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Jan, 2007
+// Function: DataType::commit
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const's. This wrapper will be removed in future release.
+// Param loc - IN: A location (file, dataset, datatype, or group)
+// Param name - IN: Name of the datatype
+// Exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Jan, 2007
// Modification
-// Planned for removal. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Planned for removal. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
//void DataType::commit(H5Location& loc, const char* name)
//{
@@ -319,30 +355,30 @@ void DataType::commit(const H5Location& loc, const char* name)
//}
//--------------------------------------------------------------------------
-// Function: DataType::commit
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in the type of the
-/// argument \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::commit
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in the type of the
+/// argument \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DataType::commit(const H5Location& loc, const H5std_string& name)
{
- p_commit(loc.getId(), name.c_str());
+ p_commit(loc.getId(), name.c_str());
}
//--------------------------------------------------------------------------
-// Function: DataType::commit
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const's. This wrapper will be removed in future release.
-// Param loc - IN: A location (file, dataset, datatype, or group)
-// Param name - IN: Name of the datatype
-// Exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Jan, 2007
+// Function: DataType::commit
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const's. This wrapper will be removed in future release.
+// Param loc - IN: A location (file, dataset, datatype, or group)
+// Param name - IN: Name of the datatype
+// Exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Jan, 2007
// Modification
-// Planned for removal. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Planned for removal. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
//void DataType::commit(H5Location& loc, const H5std_string& name)
//{
@@ -350,340 +386,365 @@ void DataType::commit(const H5Location& loc, const H5std_string& name)
//}
//--------------------------------------------------------------------------
-// Function: DataType::committed
-///\brief Determines whether a datatype is a named type or a
-/// transient type.
-///\return \c true if the datatype is a named type, and \c false,
-/// otherwise.
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::committed
+///\brief Determines whether a datatype is a named type or a
+/// transient type.
+///\return \c true if the datatype is a named type, and \c false,
+/// otherwise.
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
bool DataType::committed() const
{
- // Call C function to determine if a datatype is a named one
- htri_t is_committed = H5Tcommitted( id );
- if (is_committed > 0)
- return true;
- else if (is_committed == 0)
- return false;
- else
- {
- throw DataTypeIException(inMemFunc("committed"), "H5Tcommitted return negative value");
- }
-}
-
-//--------------------------------------------------------------------------
-// Function: DataType::find
-///\brief Finds a conversion function that can handle a conversion
-/// from this datatype to the specified datatype, \a dest.
-///\param dest - IN: Destination datatype
-///\param pcdata - IN: Pointer to type conversion data
-///\return Pointer to a suitable conversion function
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-H5T_conv_t DataType::find( const DataType& dest, H5T_cdata_t **pcdata ) const
-{
- // Call C routine to find the conversion function
- H5T_conv_t func = H5Tfind( id, dest.getId(), pcdata );
- if( func == NULL )
- {
- throw DataTypeIException(inMemFunc("find"), "H5Tfind returns a NULL function");
- }
- return( func );
-}
-
-//--------------------------------------------------------------------------
-// Function: DataType::convert
-///\brief Converts data from this datatype to the specified datatypes.
-///\param dest - IN: Destination datatype
-///\param nelmts - IN: Size of array \a buf
-///\param buf - IN/OUT: Array containing pre- and post-conversion
-/// values
-///\param background - IN: Optional backgroud buffer
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\return Pointer to a suitable conversion function
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void DataType::convert( const DataType& dest, size_t nelmts, void *buf, void *background, const PropList& plist ) const
-{
- // Get identifiers for C API
- hid_t dest_id = dest.getId();
- hid_t plist_id = plist.getId();
-
- // Call C routine H5Tconvert to convert the data
- herr_t ret_value;
- ret_value = H5Tconvert( id, dest_id, nelmts, buf, background, plist_id );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("convert"), "H5Tconvert failed");
- }
-}
-
-//--------------------------------------------------------------------------
-// Function: DataType::lock
-///\brief Locks a datatype, making it read-only and non-destructible.
+ // Call C function to determine if a datatype is a named one
+ htri_t is_committed = H5Tcommitted(id);
+ if (is_committed > 0)
+ return true;
+ else if (is_committed == 0)
+ return false;
+ else
+ {
+ throw DataTypeIException(inMemFunc("committed"), "H5Tcommitted return negative value");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: DataType::find
+///\brief Finds a conversion function that can handle a conversion
+/// from this datatype to the specified datatype, \a dest.
+///\param dest - IN: Destination datatype
+///\param pcdata - IN: Pointer to type conversion data
+///\return Pointer to a suitable conversion function
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+H5T_conv_t DataType::find(const DataType& dest, H5T_cdata_t **pcdata) const
+{
+ // Call C routine to find the conversion function
+ H5T_conv_t func = H5Tfind(id, dest.getId(), pcdata);
+ if (func == NULL)
+ {
+ throw DataTypeIException(inMemFunc("find"), "H5Tfind returns a NULL function");
+ }
+ return(func);
+}
+
+//--------------------------------------------------------------------------
+// Function: DataType::convert
+///\brief Converts data from this datatype to the specified datatypes.
+///\param dest - IN: Destination datatype
+///\param nelmts - IN: Size of array \a buf
+///\param buf - IN/OUT: Array containing pre- and post-conversion
+/// values
+///\param background - IN: Optional backgroud buffer
+///\param plist - IN: Property list - default to PropList::DEFAULT
+///\return Pointer to a suitable conversion function
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void DataType::convert(const DataType& dest, size_t nelmts, void *buf, void *background, const PropList& plist) const
+{
+ // Get identifiers for C API
+ hid_t dest_id = dest.getId();
+ hid_t plist_id = plist.getId();
+
+ // Call C routine H5Tconvert to convert the data
+ herr_t ret_value;
+ ret_value = H5Tconvert(id, dest_id, nelmts, buf, background, plist_id);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("convert"), "H5Tconvert failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: DataType::lock
+///\brief Locks a datatype, making it read-only and non-destructible.
///
-///\exception H5::DataTypeIException
+///\exception H5::DataTypeIException
///\par Descrition
-/// This is normally done by the library for predefined data
-/// types so the application doesn't inadvertently change or
-/// delete a predefined type.
+/// This is normally done by the library for predefined data
+/// types so the application doesn't inadvertently change or
+/// delete a predefined type.
///
-/// Once a data type is locked it can never be unlocked unless
-/// the entire library is closed.
-// Programmer Binh-Minh Ribler - 2000
+/// Once a data type is locked it can never be unlocked unless
+/// the entire library is closed.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DataType::lock() const
{
- // Call C routine to lock the datatype
- herr_t ret_value = H5Tlock( id );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("lock"), "H5Tlock failed");
- }
+ // Call C routine to lock the datatype
+ herr_t ret_value = H5Tlock(id);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("lock"), "H5Tlock failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataType::getClass
-///\brief Returns the datatype class identifier.
-///\return Datatype class identifier
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::getClass
+///\brief Returns the datatype class identifier.
+///\return Datatype class identifier
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5T_class_t DataType::getClass() const
{
- H5T_class_t type_class = H5Tget_class( id );
+ H5T_class_t type_class = H5Tget_class(id);
- // Return datatype class identifier if successful
- if( type_class == H5T_NO_CLASS )
- {
- throw DataTypeIException(inMemFunc("getClass"), "H5Tget_class returns H5T_NO_CLASS");
- }
- return( type_class );
+ // Return datatype class identifier if successful
+ if (type_class == H5T_NO_CLASS)
+ {
+ throw DataTypeIException(inMemFunc("getClass"), "H5Tget_class returns H5T_NO_CLASS");
+ }
+ return(type_class);
}
//--------------------------------------------------------------------------
-// Function: DataType::getSize
-///\brief Returns the size of a datatype.
-///\return Datatype size in bytes
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::getSize
+///\brief Returns the size of a datatype.
+///\return Datatype size in bytes
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
size_t DataType::getSize() const
{
- // Call C routine to get the datatype size
- size_t type_size = H5Tget_size( id );
- if( type_size <= 0 ) // valid data types are never zero size
- {
- throw DataTypeIException(inMemFunc("getSize"), "H5Tget_size returns invalid datatype size");
- }
- return( type_size );
+ // Call C routine to get the datatype size
+ size_t type_size = H5Tget_size(id);
+ if (type_size <= 0) // valid data types are never zero size
+ {
+ throw DataTypeIException(inMemFunc("getSize"), "H5Tget_size returns invalid datatype size");
+ }
+ return(type_size);
}
//--------------------------------------------------------------------------
-// Function: DataType::getSuper
-///\brief Returns the base datatype from which a datatype is derived.
-///\return DataType object
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::getSuper
+///\brief Returns the base datatype from which a datatype is derived.
+///\return DataType object
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DataType DataType::getSuper() const
{
- // Call C routine to get the base datatype from which the specified
- // datatype is derived.
- hid_t base_type_id = H5Tget_super( id );
-
- // If H5Tget_super returns a valid datatype id, create and return
- // the base type, otherwise, raise exception
- if( base_type_id > 0 )
- {
- DataType base_type;
- base_type.p_setId(base_type_id);
- return(base_type);
- }
- else
- {
- throw DataTypeIException(inMemFunc("getSuper"), "H5Tget_super failed");
- }
-}
-
-//--------------------------------------------------------------------------
-// Function: DataType::registerFunc
-///\brief Registers the specified conversion function.
-///\param pers - IN: Conversion option
-/// \li \c H5T_PERS_HARD for hard conversion functions
-/// \li \c H5T_PERS_SOFT for soft conversion functions.
-///\param name - IN: Name displayed in diagnostic output.
-///\param dest - IN: Destination datatype.
-///\param func - IN: Function to convert between source and
-/// destination datatypes.
-///\exception H5::DataTypeIException
+ // Call C routine to get the base datatype from which the specified
+ // datatype is derived.
+ hid_t base_type_id = H5Tget_super(id);
+
+ // If H5Tget_super returns a valid datatype id, create and return
+ // the base type, otherwise, raise exception
+ if (base_type_id > 0)
+ {
+ DataType base_type;
+ base_type.p_setId(base_type_id);
+ return(base_type);
+ }
+ else
+ {
+ throw DataTypeIException(inMemFunc("getSuper"), "H5Tget_super failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: DataType::registerFunc
+///\brief Registers the specified conversion function.
+///\param pers - IN: Conversion option
+/// \li \c H5T_PERS_HARD for hard conversion functions
+/// \li \c H5T_PERS_SOFT for soft conversion functions.
+///\param name - IN: Name displayed in diagnostic output.
+///\param dest - IN: Destination datatype.
+///\param func - IN: Function to convert between source and
+/// destination datatypes.
+///\exception H5::DataTypeIException
///\par Description
-/// For more information, please see:
+/// For more information, please see:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5T.html#Datatype-Register
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataType::registerFunc( H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func ) const
+void DataType::registerFunc(H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func) const
{
- hid_t dest_id = dest.getId(); // get id of the destination datatype
+ hid_t dest_id = dest.getId(); // get id of the destination datatype
- // Call C routine H5Tregister to register the conversion function
- herr_t ret_value = H5Tregister( pers, name, id, dest_id, func );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("registerFunc"), "H5Tregister failed");
- }
+ // Call C routine H5Tregister to register the conversion function
+ herr_t ret_value = H5Tregister(pers, name, id, dest_id, func);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("registerFunc"), "H5Tregister failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataType::registerFunc
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in the type of the
-/// argument \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::registerFunc
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in the type of the
+/// argument \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataType::registerFunc( H5T_pers_t pers, const H5std_string& name, const DataType& dest, H5T_conv_t func ) const
+void DataType::registerFunc(H5T_pers_t pers, const H5std_string& name, const DataType& dest, H5T_conv_t func) const
{
- registerFunc( pers, name.c_str(), dest, func );
+ registerFunc(pers, name.c_str(), dest, func);
}
//--------------------------------------------------------------------------
-// Function: DataType::unregister
-///\brief Removes a conversion function from all conversion paths.
-///\param pers - IN: Conversion option
-/// \li \c H5T_PERS_HARD for hard conversion functions
-/// \li \c H5T_PERS_SOFT for soft conversion functions.
-///\param name - IN: Name displayed in diagnostic output.
-///\param dest - IN: Destination datatype.
-///\param func - IN: Function to convert between source and
-/// destination datatypes.
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::unregister
+///\brief Removes a conversion function from all conversion paths.
+///\param pers - IN: Conversion option
+/// \li \c H5T_PERS_HARD for hard conversion functions
+/// \li \c H5T_PERS_SOFT for soft conversion functions.
+///\param name - IN: Name displayed in diagnostic output.
+///\param dest - IN: Destination datatype.
+///\param func - IN: Function to convert between source and
+/// destination datatypes.
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataType::unregister( H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func ) const
+void DataType::unregister(H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func) const
{
- hid_t dest_id = dest.getId(); // get id of the dest datatype for C API
+ hid_t dest_id = dest.getId(); // get id of the dest datatype for C API
- // Call C routine H5Tunregister to remove the conversion function
- herr_t ret_value = H5Tunregister( pers, name, id, dest_id, func );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("unregister"), "H5Tunregister failed");
- }
+ // Call C routine H5Tunregister to remove the conversion function
+ herr_t ret_value = H5Tunregister(pers, name, id, dest_id, func);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("unregister"), "H5Tunregister failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataType::unregister
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in the type of the
-/// argument \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::unregister
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in the type of the
+/// argument \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataType::unregister( H5T_pers_t pers, const H5std_string& name, const DataType& dest, H5T_conv_t func ) const
+void DataType::unregister(H5T_pers_t pers, const H5std_string& name, const DataType& dest, H5T_conv_t func) const
{
- unregister( pers, name.c_str(), dest, func );
+ unregister(pers, name.c_str(), dest, func);
}
//--------------------------------------------------------------------------
-// Function: DataType::setTag
-///\brief Tags an opaque datatype.
-///\param tag - IN: Descriptive ASCII string with which the opaque
-/// datatype is to be tagged.
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::setTag
+///\brief Tags an opaque datatype.
+///\param tag - IN: Descriptive ASCII string with which the opaque
+/// datatype is to be tagged.
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataType::setTag( const char* tag ) const
+void DataType::setTag(const char* tag) const
{
- // Call C routine H5Tset_tag to tag an opaque datatype.
- herr_t ret_value = H5Tset_tag( id, tag );
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("setTag"), "H5Tset_tag failed");
- }
+ // Call C routine H5Tset_tag to tag an opaque datatype.
+ herr_t ret_value = H5Tset_tag(id, tag);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("setTag"), "H5Tset_tag failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataType::setTag
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in the type of the
-/// argument \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::setTag
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in the type of the
+/// argument \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DataType::setTag( const H5std_string& tag ) const
+void DataType::setTag(const H5std_string& tag) const
{
- setTag( tag.c_str());
+ setTag(tag.c_str());
}
//--------------------------------------------------------------------------
-// Function: DataType::getTag
-///\brief Gets the tag associated with an opaque datatype.
-///\return Tag associated with the opaque datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType::getTag
+///\brief Gets the tag associated with an opaque datatype.
+///\return Tag associated with the opaque datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5std_string DataType::getTag() const
{
- char* tag_Cstr = H5Tget_tag( id );
+ char* tag_Cstr = H5Tget_tag(id);
// if the tag C-string returned is not NULL, convert it to C++ string
// and return it, otherwise, raise an exception
- if( tag_Cstr != NULL )
+ if (tag_Cstr != NULL)
{
- H5std_string tag = H5std_string(tag_Cstr); // C string to string object
- H5free_memory(tag_Cstr); // free the C string
- return (tag); // return the tag
+ H5std_string tag = H5std_string(tag_Cstr); // C string to string object
+ H5free_memory(tag_Cstr); // free the C string
+ return (tag); // return the tag
}
else
{
- throw DataTypeIException(inMemFunc("getTag"), "H5Tget_tag returns NULL for tag");
+ throw DataTypeIException(inMemFunc("getTag"), "H5Tget_tag returns NULL for tag");
}
}
//--------------------------------------------------------------------------
-// Function: DataType::detectClass
-///\brief Checks whether a datatype contains (or is) a certain type of
-/// datatype.
-///\return true if this datatype contains or is the specified type,
-/// and false, otherwise.
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: DataType::detectClass
+///\brief Checks whether a datatype contains (or is) a certain type of
+/// datatype.
+///\return true if this datatype contains or is the specified type,
+/// and false, otherwise.
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
bool DataType::detectClass(H5T_class_t cls) const
{
- htri_t ret_value = H5Tdetect_class(id, cls);
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else
- {
- throw DataTypeIException(inMemFunc("detectClass"),
- "H5Tdetect_class returns negative value");
- }
+ htri_t ret_value = H5Tdetect_class(id, cls);
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else
+ {
+ throw DataTypeIException(inMemFunc("detectClass"),
+ "H5Tdetect_class returns negative value");
+ }
}
//--------------------------------------------------------------------------
-// Function: DataType::isVariableStr
-///\brief Check whether this datatype is a variable-length string.
-///\return true if this datatype is a variable-length string, and
-/// false, otherwise.
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: DataType::isVariableStr
+///\brief Check whether this datatype is a variable-length string.
+///\return true if this datatype is a variable-length string, and
+/// false, otherwise.
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
bool DataType::isVariableStr() const
{
- htri_t is_varlen_str = H5Tis_variable_str(id);
- if( is_varlen_str == 1 )
- return true;
- else if( is_varlen_str == 0 )
- return false;
- else
- {
- throw DataTypeIException(inMemFunc("isVariableStr"),
- "H5Tis_variable_str returns negative value");
- }
+ htri_t is_varlen_str = H5Tis_variable_str(id);
+ if (is_varlen_str == 1)
+ return true;
+ else if (is_varlen_str == 0)
+ return false;
+ else
+ {
+ throw DataTypeIException(inMemFunc("isVariableStr"),
+ "H5Tis_variable_str returns negative value");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: DataType::getCreatePlist
+///\brief Returns a copy of the property list, which is for datatype
+/// creation.
+///\return A property list object
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - March, 2017
+// Description
+// Currently, there is no datatype creation property list class
+// in the C++ API because there is no associated functionality.
+//--------------------------------------------------------------------------
+PropList DataType::getCreatePlist() const
+{
+ hid_t create_plist_id = H5Tget_create_plist(id);
+ if (create_plist_id < 0)
+ {
+ throw DataTypeIException(inMemFunc("getCreatePlist"),
+ "H5Tget_create_plist returns negative value");
+ }
+ // create and return the DSetCreatPropList object
+ PropList create_plist;
+ f_PropList_setId(&create_plist, create_plist_id);
+ return(create_plist);
}
//--------------------------------------------------------------------------
@@ -700,11 +761,31 @@ bool DataType::isVariableStr() const
//--------------------------------------------------------------------------
hid_t DataType::getId() const
{
- return(id);
+ return(id);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
+// Function: DataType::p_opentype (private)
+///\brief Opens an HDF5 datatype given its name
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Datatype name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// This function was introduced in 1.10.1 to be used by the new
+// XxxType constructors that open a datatype. -BMR, Dec 2016
+//--------------------------------------------------------------------------
+hid_t DataType::p_opentype(const H5Location& loc, const char *dtype_name) const
+{
+ // Call C function to open the named datatype at this location
+ hid_t ret_value = H5Topen2(loc.getId(), dtype_name, H5P_DEFAULT);
+ if (ret_value < 0)
+ throw DataTypeIException("DataType constructor", "H5Topen2 failed");
+ return(ret_value);
+}
+
+//--------------------------------------------------------------------------
// Function: DataType::p_setId
///\brief Sets the identifier of this object to a new value.
///
@@ -725,58 +806,59 @@ void DataType::p_setId(const hid_t new_id)
catch (Exception& close_error) {
throw DataTypeIException(inMemFunc("p_setId"), close_error.getDetailMsg());
}
- // reset object's id to the given id
- id = new_id;
+ // reset object's id to the given id
+ id = new_id;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: DataType::close
-///\brief Closes the datatype if it is not a predefined type.
+// Function: DataType::close
+///\brief Closes the datatype if it is not a predefined type.
///
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - Mar 9, 2005
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Mar 9, 2005
//--------------------------------------------------------------------------
void DataType::close()
{
if (p_valid_id(id))
{
- herr_t ret_value = H5Tclose(id);
- if( ret_value < 0 )
- {
- throw DataTypeIException(inMemFunc("close"), "H5Tclose failed");
- }
- // reset the id
- id = H5I_INVALID_HID;
+ herr_t ret_value = H5Tclose(id);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException(inMemFunc("close"), "H5Tclose failed");
+ }
+ // reset the id
+ id = H5I_INVALID_HID;
}
}
//--------------------------------------------------------------------------
-// Function: DataType destructor
-///\brief Properly terminates access to this datatype.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DataType destructor
+///\brief Properly terminates access to this datatype.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Jun 1, 2004
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
-// - Added the use of H5CPP_EXITED to terminate the HDF5 library
-// and elimiate previous memory leaks. See comments in the
-// header file "H5PredType.h" for details. - BMR, Mar 30, 2012
-// - Major re-implementation of the global constants was done
-// to avoid relying on the order of the creation and deletion
-// of the global constants. Hence, H5CPP_EXITED was removed.
-// See Design Notes in "H5PredType.cpp" for details.
-// - BMR, Sep 30, 2015
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Jun 1, 2004
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
+// - Added the use of H5CPP_EXITED to terminate the HDF5 library
+// and elimiate previous memory leaks. See comments in the
+// header file "H5PredType.h" for details. - BMR, Mar 30, 2012
+// - Major re-implementation of the global constants was done
+// to avoid relying on the order of the creation and deletion
+// of the global constants. Hence, H5CPP_EXITED was removed.
+// See Design Notes in "H5PredType.cpp" for details.
+// - BMR, Sep 30, 2015
//--------------------------------------------------------------------------
DataType::~DataType()
{
try
{
- close();
+ close();
}
catch (Exception& close_error) {
- cerr << inMemFunc("~DataType - ") << close_error.getDetailMsg() << endl;
+ cerr << inMemFunc("~DataType - ") << close_error.getDetailMsg() << endl;
}
}
+
} // end namespace
diff --git a/c++/src/H5DataType.h b/c++/src/H5DataType.h
index 9ac6ecf..32a79fa 100644
--- a/c++/src/H5DataType.h
+++ b/c++/src/H5DataType.h
@@ -6,17 +6,12 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-// Class DataType inherits from H5Object and has several subclasses for
-// specific HDF5 data types.
-
#ifndef __H5DataType_H
#define __H5DataType_H
@@ -33,116 +28,133 @@ namespace H5 {
*/
class H5_DLLCPP DataType : public H5Object {
public:
- // Creates a datatype given its class and size
- DataType( const H5T_class_t type_class, size_t size );
+ // Creates a datatype given its class and size
+ DataType(const H5T_class_t type_class, size_t size);
+
+ // Copy constructor: makes a copy of the original object
+ DataType(const DataType& original);
- // Copy constructor: makes a copy of the original object
- DataType( const DataType& original );
+ // Creates a copy of a predefined type
+ DataType(const PredType& pred_type);
- // Creates a copy of a predefined type
- DataType(const PredType& pred_type);
+ // Creates a datatype by way of dereference.
+ DataType(const H5Location& loc, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
+// DataType(const Attribute& attr, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
- // Creates a datatype by way of dereference.
- DataType(const H5Location& loc, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
- DataType(const Attribute& attr, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
+ // Closes this datatype.
+ virtual void close();
- // Closes this datatype.
- virtual void close();
+ // Copies an existing datatype to this datatype object.
+ void copy(const DataType& like_type);
- // Copies an existing datatype to this datatype object.
- void copy(const DataType& like_type);
+ // Copies the datatype of dset to this datatype object.
+ void copy(const DataSet& dset);
- // Copies the datatype of dset to this datatype object.
- void copy(const DataSet& dset);
+ // Returns the datatype class identifier.
+ H5T_class_t getClass() const;
- // Returns the datatype class identifier.
- H5T_class_t getClass() const;
+ // Commits a transient datatype to a file; this datatype becomes
+ // a named datatype which can be accessed from the location.
+ void commit(const H5Location& loc, const char* name);
+ void commit(const H5Location& loc, const H5std_string& name);
- // Commits a transient datatype to a file; this datatype becomes
- // a named datatype which can be accessed from the location.
- void commit(const H5Location& loc, const char* name);
- void commit(const H5Location& loc, const H5std_string& name);
- // These two overloaded functions are kept for backward compatibility
- // only; they missed the const - removed from 1.8.18 and 1.10.1
- //void commit(H5Location& loc, const char* name);
- //void commit(H5Location& loc, const H5std_string& name);
+ // These two overloaded functions are kept for backward compatibility
+ // only; they missed the const - removed from 1.8.18 and 1.10.1
+ //void commit(H5Location& loc, const char* name);
+ //void commit(H5Location& loc, const H5std_string& name);
- // Determines whether this datatype is a named datatype or
- // a transient datatype.
- bool committed() const;
+ // Determines whether this datatype is a named datatype or
+ // a transient datatype.
+ bool committed() const;
// Finds a conversion function that can handle the conversion
// this datatype to the given datatype, dest.
- H5T_conv_t find( const DataType& dest, H5T_cdata_t **pcdata ) const;
+ H5T_conv_t find(const DataType& dest, H5T_cdata_t **pcdata) const;
+
+ // Converts data from between specified datatypes.
+ void convert(const DataType& dest, size_t nelmts, void *buf, void *background, const PropList& plist=PropList::DEFAULT) const;
+
+ // Assignment operator
+ DataType& operator=(const DataType& rhs);
+
+ // Determines whether two datatypes are the same.
+ bool operator==(const DataType& compared_type) const;
- // Converts data from between specified datatypes.
- void convert( const DataType& dest, size_t nelmts, void *buf, void *background, const PropList& plist=PropList::DEFAULT) const;
+ // Locks a datatype.
+ void lock() const;
- // Assignment operator
- DataType& operator=( const DataType& rhs );
+ // Returns the size of a datatype.
+ size_t getSize() const;
- // Determines whether two datatypes are the same.
- bool operator==(const DataType& compared_type ) const;
+ // Returns the base datatype from which a datatype is derived.
+ // Note: not quite right for specific types yet???
+ DataType getSuper() const;
- // Locks a datatype.
- void lock() const;
+ // Registers a conversion function.
+ void registerFunc(H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func) const;
+ void registerFunc(H5T_pers_t pers, const H5std_string& name, const DataType& dest, H5T_conv_t func) const;
- // Returns the size of a datatype.
- size_t getSize() const;
+ // Removes a conversion function from all conversion paths.
+ void unregister(H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func) const;
+ void unregister(H5T_pers_t pers, const H5std_string& name, const DataType& dest, H5T_conv_t func) const;
- // Returns the base datatype from which a datatype is derived.
- // Note: not quite right for specific types yet???
- DataType getSuper() const;
+ // Tags an opaque datatype.
+ void setTag(const char* tag) const;
+ void setTag(const H5std_string& tag) const;
- // Registers a conversion function.
- void registerFunc(H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func ) const;
- void registerFunc(H5T_pers_t pers, const H5std_string& name, const DataType& dest, H5T_conv_t func ) const;
+ // Gets the tag associated with an opaque datatype.
+ H5std_string getTag() const;
- // Removes a conversion function from all conversion paths.
- void unregister( H5T_pers_t pers, const char* name, const DataType& dest, H5T_conv_t func ) const;
- void unregister( H5T_pers_t pers, const H5std_string& name, const DataType& dest, H5T_conv_t func ) const;
+ // Checks whether this datatype contains (or is) a certain type class.
+ bool detectClass(H5T_class_t cls) const;
- // Tags an opaque datatype.
- void setTag( const char* tag ) const;
- void setTag( const H5std_string& tag ) const;
+ // Checks whether this datatype is a variable-length string.
+ bool isVariableStr() const;
- // Gets the tag associated with an opaque datatype.
- H5std_string getTag() const;
+ // Returns a copy of the creation property list of a datatype.
+ PropList getCreatePlist() const;
- // Checks whether this datatype contains (or is) a certain type class.
- bool detectClass(H5T_class_t cls) const;
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("DataType"); }
- // Checks whether this datatype is a variable-length string.
- bool isVariableStr() const;
+// From CommonFG then H5Location
+ // Constructors to open a generic named datatype at a given location.
+ DataType(const H5Location& loc, const char* name);
+ DataType(const H5Location& loc, const H5std_string& name);
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("DataType"); }
+// End of From CommonFG then H5Location
- // Creates a copy of an existing DataType using its id
- DataType( const hid_t type_id );
+ // Creates a copy of an existing DataType using its id
+ DataType(const hid_t type_id);
- // Default constructor
- DataType();
+ // Default constructor
+ DataType();
- // Gets the datatype id.
- virtual hid_t getId() const;
+ // Gets the datatype id.
+ virtual hid_t getId() const;
- // Destructor: properly terminates access to this datatype.
- virtual ~DataType();
+ // Destructor: properly terminates access to this datatype.
+ virtual ~DataType();
protected:
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- hid_t id; // HDF5 datatype id
+ hid_t id; // HDF5 datatype id
+
+ // Sets the datatype id.
+ virtual void p_setId(const hid_t new_id);
+
+ // Opens a datatype and returns the id.
+ hid_t p_opentype(const H5Location& loc, const char* dtype_name) const;
- // Sets the datatype id.
- virtual void p_setId(const hid_t new_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
private:
- // Friend function to set DataType id. For library use only.
- friend void f_DataType_setId(DataType* dtype, hid_t new_id);
+ // Friend function to set DataType id. For library use only.
+ friend void f_DataType_setId(DataType* dtype, hid_t new_id);
+
+ void p_commit(hid_t loc_id, const char* name);
+
+}; // end of DataType
+} // namespace H5
- void p_commit(hid_t loc_id, const char* name);
-};
-}
#endif // __H5DataType_H
diff --git a/c++/src/H5DcreatProp.cpp b/c++/src/H5DcreatProp.cpp
index b307bde..67e4a1c 100644
--- a/c++/src/H5DcreatProp.cpp
+++ b/c++/src/H5DcreatProp.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -18,9 +16,11 @@
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
+#include "H5DataSpace.h"
#include "H5PropList.h"
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -39,7 +39,7 @@ DSetCreatPropList* DSetCreatPropList::DEFAULT_ = 0;
// Function: DSetCreatPropList::getConstant
// Purpose: Creates a DSetCreatPropList object representing the HDF5
// constant H5P_DATASET_CREATE, pointed to by
-// DSetCreatPropList::DEFAULT_
+// DSetCreatPropList::DEFAULT_
// exception H5::PropListIException
// Description
// If DSetCreatPropList::DEFAULT_ already points to an allocated
@@ -79,670 +79,719 @@ void DSetCreatPropList::deleteConstants()
}
//--------------------------------------------------------------------------
-// Purpose Constant for dataset creation default property
+// Purpose Constant for dataset creation default property
//--------------------------------------------------------------------------
const DSetCreatPropList& DSetCreatPropList::DEFAULT = *getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList default constructor
-///\brief Default constructor: creates a stub dataset creation property list
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetCreatPropList default constructor
+///\brief Default constructor: creates a stub dataset creation property list
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DSetCreatPropList::DSetCreatPropList() : ObjCreatPropList(H5P_DATASET_CREATE) {}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList copy constructor
-///\brief Copy constructor: makes a copy of the original
-/// DSetCreatPropList object
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetCreatPropList copy constructor
+///\brief Copy constructor: makes a copy of the original
+/// DSetCreatPropList object
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DSetCreatPropList::DSetCreatPropList( const DSetCreatPropList& orig ) : ObjCreatPropList(orig) {}
+DSetCreatPropList::DSetCreatPropList(const DSetCreatPropList& orig) : ObjCreatPropList(orig) {}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList overloaded constructor
-///\brief Creates a DSetCreatPropList object using the id of an
-/// existing dataset creation property list.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetCreatPropList overloaded constructor
+///\brief Creates a DSetCreatPropList object using the id of an
+/// existing dataset creation property list.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DSetCreatPropList::DSetCreatPropList(const hid_t plist_id) : ObjCreatPropList(plist_id) {}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setChunk
-///\brief Sets the size of the chunks used to store a chunked layout
-/// dataset.
-///\param ndims - IN: Number of dimensions of each chunk
-///\param dim - IN: Array containing the size of each chunk
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setChunk
+///\brief Sets the size of the chunks used to store a chunked layout
+/// dataset.
+///\param ndims - IN: Number of dimensions of each chunk
+///\param dim - IN: Array containing the size of each chunk
+///\exception H5::PropListIException
///\par Description
-/// The \a ndims parameter currently must have the same value as
-/// the rank of the dataset. The values of the \a dim array
-/// define the size of the chunks to store the dataset's raw
-/// data. As a side-effect, the layout of the dataset will be
-/// changed to \c H5D_CHUNKED, if it is not so already.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void DSetCreatPropList::setChunk( int ndims, const hsize_t* dim ) const
+/// The \a ndims parameter currently must have the same value as
+/// the rank of the dataset. The values of the \a dim array
+/// define the size of the chunks to store the dataset's raw
+/// data. As a side-effect, the layout of the dataset will be
+/// changed to \c H5D_CHUNKED, if it is not so already.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void DSetCreatPropList::setChunk(int ndims, const hsize_t* dim) const
{
- herr_t ret_value = H5Pset_chunk( id, ndims, dim );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setChunk", "H5Pset_chunk failed");
- }
+ herr_t ret_value = H5Pset_chunk(id, ndims, dim);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setChunk", "H5Pset_chunk failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getChunk
-///\brief Retrieves the size of the chunks used to store a chunked
-/// layout dataset.
-///\param max_ndims - IN: Size of \a dim array
-///\param dim - OUT: Array to store the chunk dimensions
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetCreatPropList::getChunk
+///\brief Retrieves the size of the chunks used to store a chunked
+/// layout dataset.
+///\param max_ndims - IN: Size of \a dim array
+///\param dim - OUT: Array to store the chunk dimensions
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-int DSetCreatPropList::getChunk( int max_ndims, hsize_t* dim ) const
+int DSetCreatPropList::getChunk(int max_ndims, hsize_t* dim) const
{
- int chunk_size = H5Pget_chunk( id, max_ndims, dim );
- if( chunk_size < 0 )
- {
- throw PropListIException("DSetCreatPropList::getChunk",
- "H5Pget_chunk returns negative chunk size");
- }
- return( chunk_size );
+ int chunk_size = H5Pget_chunk(id, max_ndims, dim);
+ if (chunk_size < 0)
+ {
+ throw PropListIException("DSetCreatPropList::getChunk",
+ "H5Pget_chunk returns negative chunk size");
+ }
+ return(chunk_size);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setLayout
-///\brief Sets the type of storage used store the raw data for a dataset.
-///\param layout - IN: Type of storage layout for raw data
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setLayout
+///\brief Sets the type of storage used store the raw data for a dataset.
+///\param layout - IN: Type of storage layout for raw data
+///\exception H5::PropListIException
///\par Description
-/// For information on valid layout types, please refer to
+/// For information on valid layout types, please refer to
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetLayout
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DSetCreatPropList::setLayout(H5D_layout_t layout) const
{
- herr_t ret_value = H5Pset_layout( id, layout );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setLayout",
- "H5Pset_layout failed");
- }
+ herr_t ret_value = H5Pset_layout(id, layout);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setLayout",
+ "H5Pset_layout failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getLayout
-///\brief Retrieves the layout type of this property list
-///\return Layout type, which can be:
-/// \li \c H5D_COMPACT - raw data is stored in the object
-/// header in the file.
-/// \li \c H5D_CONTIGUOUS - raw data is stored separately from the
-/// object header in one contiguous chunk in
-/// the file.
-/// \li \c H5D_CHUNKED - raw data is stored separately from the
-/// object header in chunks in separate locations
-/// in the file.
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::getLayout
+///\brief Retrieves the layout type of this property list
+///\return Layout type, which can be:
+/// \li \c H5D_COMPACT - raw data is stored in the object
+/// header in the file.
+/// \li \c H5D_CONTIGUOUS - raw data is stored separately from the
+/// object header in one contiguous chunk in
+/// the file.
+/// \li \c H5D_CHUNKED - raw data is stored separately from the
+/// object header in chunks in separate locations
+/// in the file.
+///\exception H5::PropListIException
///\par Description
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5D_layout_t DSetCreatPropList::getLayout() const
{
- H5D_layout_t layout = H5Pget_layout( id );
- if( layout == H5D_LAYOUT_ERROR )
- {
- throw PropListIException("DSetCreatPropList::getLayout",
- "H5Pget_layout returns H5D_LAYOUT_ERROR");
- }
- return( layout );
+ H5D_layout_t layout = H5Pget_layout(id);
+ if (layout == H5D_LAYOUT_ERROR)
+ {
+ throw PropListIException("DSetCreatPropList::getLayout",
+ "H5Pget_layout returns H5D_LAYOUT_ERROR");
+ }
+ return(layout);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setDeflate
-///\brief Sets compression method and compression level
-///\param level - IN: Compression level, should [0..9], inclusive
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setDeflate
+///\brief Sets compression method and compression level
+///\param level - IN: Compression level, should [0..9], inclusive
+///\exception H5::PropListIException
///\par Description
-/// The function sets the compression method for this property
-/// list to \c H5D_COMPRESS_DEFLATE and the compression level to
-/// \a level. Lower compression levels are faster but result in
-/// less compression.
-// Programmer Binh-Minh Ribler - 2000
+/// The function sets the compression method for this property
+/// list to \c H5D_COMPRESS_DEFLATE and the compression level to
+/// \a level. Lower compression levels are faster but result in
+/// less compression.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetCreatPropList::setDeflate( int level ) const
+void DSetCreatPropList::setDeflate(int level) const
{
- herr_t ret_value = H5Pset_deflate( id, level );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setDeflate",
- "H5Pset_deflate failed");
- }
+ herr_t ret_value = H5Pset_deflate(id, level);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setDeflate",
+ "H5Pset_deflate failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setSzip
-///\brief Sets up for the use of the SZIP compression filter.
-///\param options_mask - IN: A bit-mask conveying the desired SZIP
-/// options. Valid values are H5_SZIP_EC_OPTION_MASK
-/// and H5_SZIP_NN_OPTION_MASK.
-///\param pixels_per_block - IN: Number of pixels or data elements in
-/// each data block.
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setSzip
+///\brief Sets up for the use of the SZIP compression filter.
+///\param options_mask - IN: A bit-mask conveying the desired SZIP
+/// options. Valid values are H5_SZIP_EC_OPTION_MASK
+/// and H5_SZIP_NN_OPTION_MASK.
+///\param pixels_per_block - IN: Number of pixels or data elements in
+/// each data block.
+///\exception H5::PropListIException
///\par Description
-/// The associate C function sets an SZIP compression filter,
-/// H5Z_FILTER_SZIP, for a dataset. For more information about
-/// SZIP and usage, please refer to the C layer Reference
-/// Manual at:
+/// The associate C function sets an SZIP compression filter,
+/// H5Z_FILTER_SZIP, for a dataset. For more information about
+/// SZIP and usage, please refer to the C layer Reference
+/// Manual at:
/// http://hdfgroup.org/HDF5/doc/RM_H5P.html#Property-SetSzip
-// Programmer Binh-Minh Ribler - Jan, 2007
+// Programmer Binh-Minh Ribler - Jan, 2007
//--------------------------------------------------------------------------
void DSetCreatPropList::setSzip(unsigned int options_mask, unsigned int pixels_per_block) const
{
herr_t ret_value = H5Pset_szip(id, options_mask, pixels_per_block);
- if( ret_value < 0 )
+ if (ret_value < 0)
{
- throw PropListIException("DSetCreatPropList::setSzip",
- "H5Pset_szip failed");
+ throw PropListIException("DSetCreatPropList::setSzip",
+ "H5Pset_szip failed");
}
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setNbit
-///\brief Sets up for the use of the Nbit compression filter.
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setNbit
+///\brief Sets up for the use of the Nbit compression filter.
+///\exception H5::PropListIException
///\par Description
-/// The associate C function sets an Nbit compression filter,
-/// H5Z_FILTER_NBIT, for a dataset. For more information about
-/// Nbit compression, please refer to the C layer Reference
-/// Manual at:
+/// The associate C function sets an Nbit compression filter,
+/// H5Z_FILTER_NBIT, for a dataset. For more information about
+/// Nbit compression, please refer to the C layer Reference
+/// Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-setNbit
-// Programmer Binh-Minh Ribler - Apr, 2016
+// Programmer Binh-Minh Ribler - Apr, 2016
//--------------------------------------------------------------------------
void DSetCreatPropList::setNbit() const
{
herr_t ret_value = H5Pset_nbit(id);
- if( ret_value < 0 )
+ if (ret_value < 0)
{
- throw PropListIException("DSetCreatPropList::setNbit",
- "H5Pset_nbit failed");
+ throw PropListIException("DSetCreatPropList::setNbit",
+ "H5Pset_nbit failed");
}
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setFillValue
-///\brief Sets a dataset fill value
-///\param fvalue_type - IN: Data type for the value passed via \a value
-///\param value - IN: Pointer to buffer containing the fill value
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setFillValue
+///\brief Sets a dataset fill value
+///\param fvalue_type - IN: Data type for the value passed via \a value
+///\param value - IN: Pointer to buffer containing the fill value
+///\exception H5::PropListIException
///\par Description
-/// The datatype may differ from that of the dataset, but it must
-/// be one that the HDF5 library is able to convert \a value to
-/// the dataset datatype when the dataset is created.
-/// The default fill value is 0 (zero,) which is interpreted
-/// according to the actual dataset datatype.
+/// The datatype may differ from that of the dataset, but it must
+/// be one that the HDF5 library is able to convert \a value to
+/// the dataset datatype when the dataset is created.
+/// The default fill value is 0 (zero,) which is interpreted
+/// according to the actual dataset datatype.
///\par
-/// For information on setting fill value, please refer to the
-/// C layer Reference Manual at:
+/// For information on setting fill value, please refer to the
+/// C layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetFillValue
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetCreatPropList::setFillValue( const DataType& fvalue_type, const void* value ) const
+void DSetCreatPropList::setFillValue(const DataType& fvalue_type, const void* value) const
{
- herr_t ret_value = H5Pset_fill_value( id, fvalue_type.getId(), value );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setFillValue",
+ herr_t ret_value = H5Pset_fill_value(id, fvalue_type.getId(), value);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setFillValue",
"H5Pset_fill_value failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getFillValue
-///\brief Retrieves a dataset fill value
-///\param fvalue_type - IN: Data type for the value passed via \a value
-///\param value - OUT: Pointer to buffer to hold the retrieved fill value
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::getFillValue
+///\brief Retrieves a dataset fill value
+///\param fvalue_type - IN: Data type for the value passed via \a value
+///\param value - OUT: Pointer to buffer to hold the retrieved fill value
+///\exception H5::PropListIException
///\par Description
-/// The fill value is returned through \a value pointer
-/// and the memory is allocated by the caller. The fill
-/// value will be converted from its current data type to the
-/// specified by \a fvalue_type.
-// Programmer Binh-Minh Ribler - 2000
+/// The fill value is returned through \a value pointer
+/// and the memory is allocated by the caller. The fill
+/// value will be converted from its current data type to the
+/// specified by \a fvalue_type.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetCreatPropList::getFillValue( const DataType& fvalue_type, void* value ) const
+void DSetCreatPropList::getFillValue(const DataType& fvalue_type, void* value) const
{
- herr_t ret_value = H5Pget_fill_value( id, fvalue_type.getId(), value );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::getFillValue",
+ herr_t ret_value = H5Pget_fill_value(id, fvalue_type.getId(), value);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::getFillValue",
"H5Pget_fill_value failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::isFillValueDefined
-///\brief Check if fill value has been defined for this property
+// Function: DSetCreatPropList::isFillValueDefined
+///\brief Check if fill value has been defined for this property
///\return
-/// \li \c H5D_FILL_VALUE_UNDEFINED =0,
-/// \li \c H5D_FILL_VALUE_DEFAULT =1,
-/// \li \c H5D_FILL_VALUE_USER_DEFINED =2
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+/// \li \c H5D_FILL_VALUE_UNDEFINED =0,
+/// \li \c H5D_FILL_VALUE_DEFAULT =1,
+/// \li \c H5D_FILL_VALUE_USER_DEFINED =2
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5D_fill_value_t DSetCreatPropList::isFillValueDefined() const
{
- H5D_fill_value_t status;
- herr_t ret_value = H5Pfill_value_defined(id, &status);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::isFillValueDefined",
+ H5D_fill_value_t status;
+ herr_t ret_value = H5Pfill_value_defined(id, &status);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::isFillValueDefined",
"H5Pfill_value_defined returned H5D_FILL_VALUE_ERROR (-1)");
- }
- else
- return (status);
+ }
+ else
+ return (status);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setFilter
-///\brief Adds a filter to the filter pipeline
-///\param filter_id - IN: Filter to add
-///\param flags - IN: Specifies general properties of the filter
-///\param cd_nelmts - IN: Number of elements in cd_values
-///\param cd_values - IN: Auxiliary data for the filter
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setFilter
+///\brief Adds a filter to the filter pipeline
+///\param filter_id - IN: Filter to add
+///\param flags - IN: Specifies general properties of the filter
+///\param cd_nelmts - IN: Number of elements in cd_values
+///\param cd_values - IN: Auxiliary data for the filter
+///\exception H5::PropListIException
///\par Description
-/// The \a flags argument is a bit vector of the field:
-/// \c H5Z_FLAG_OPTIONAL(0x0001)
+/// The \a flags argument is a bit vector of the field:
+/// \c H5Z_FLAG_OPTIONAL(0x0001)
///\par
-/// If this bit is set then the filter is optional. If the filter
-/// fails during a \c DataSet::write() operation then the filter
-/// is just excluded from the pipeline for the chunk for which it
-/// failed; the filter will not participate in the pipeline
-/// during a \c DataSet::read() of the chunk. If this bit is clear
-/// and the filter fails then the entire I/O operation fails.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void DSetCreatPropList::setFilter( H5Z_filter_t filter_id, unsigned int flags,
- size_t cd_nelmts, const unsigned int cd_values[] ) const
+/// If this bit is set then the filter is optional. If the filter
+/// fails during a \c DataSet::write() operation then the filter
+/// is just excluded from the pipeline for the chunk for which it
+/// failed; the filter will not participate in the pipeline
+/// during a \c DataSet::read() of the chunk. If this bit is clear
+/// and the filter fails then the entire I/O operation fails.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void DSetCreatPropList::setFilter(H5Z_filter_t filter_id, unsigned int flags,
+ size_t cd_nelmts, const unsigned int cd_values[]) const
{
- herr_t ret_value = H5Pset_filter( id, filter_id, flags, cd_nelmts, cd_values );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setFilter",
+ herr_t ret_value = H5Pset_filter(id, filter_id, flags, cd_nelmts, cd_values);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setFilter",
"H5Pset_filter failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::removeFilter
-///\brief Removes one or more filters
-///\param filter_id - IN: Filter to remove
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::removeFilter
+///\brief Removes one or more filters
+///\param filter_id - IN: Filter to remove
+///\exception H5::PropListIException
///\par Description
-/// Deletes a filter from the dataset creation property list;
-/// deletes all filters if \a filter_id is \c H5Z_FILTER_NONE.
-// Programmer Binh-Minh Ribler - 2000
+/// Deletes a filter from the dataset creation property list;
+/// deletes all filters if \a filter_id is \c H5Z_FILTER_NONE.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DSetCreatPropList::removeFilter(H5Z_filter_t filter_id) const
{
- herr_t ret_value = H5Premove_filter( id, filter_id);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::removeFilter",
+ herr_t ret_value = H5Premove_filter(id, filter_id);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::removeFilter",
"H5Premove_filter failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getNfilters
-///\brief Returns the number of filters in the pipeline
-///\return Number of filters
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetCreatPropList::getNfilters
+///\brief Returns the number of filters in the pipeline
+///\return Number of filters
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
int DSetCreatPropList::getNfilters() const
{
- int num_filters = H5Pget_nfilters( id );
- if( num_filters < 0 )
- {
- throw PropListIException("DSetCreatPropList::getNfilters",
+ int num_filters = H5Pget_nfilters(id);
+ if (num_filters < 0)
+ {
+ throw PropListIException("DSetCreatPropList::getNfilters",
"H5Pget_nfilters returned negative number of filters");
- }
- else
- return( num_filters );
+ }
+ else
+ return(num_filters);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getFilter
-///\brief Returns information about a filter in a pipeline
-///\param filter_number - IN: Filter to get, range [0..N-1], where
-/// N is returned by H5Pget_nfilters()
-///\param flags - OUT: General properties of the filter
-///\param cd_nelmts - IN/OUT: Number of elements in \a cd_values /Number
-/// of values defined by the filter
-///\param cd_values - OUT: Array to hold the data; allocated by the user
-///\param namelen - IN: Length of \a name
-///\param name - OUT: Name of the filter
-///\param filter_config - OUT: Flags indicating whether filter can encode/decode
-///\return Filter id
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::getFilter
+///\brief Returns information about a filter in a pipeline
+///\param filter_number - IN: Filter to get, range [0..N-1], where
+/// N is returned by H5Pget_nfilters()
+///\param flags - OUT: General properties of the filter
+///\param cd_nelmts - IN/OUT: Number of elements in \a cd_values /Number
+/// of values defined by the filter
+///\param cd_values - OUT: Array to hold the data; allocated by the user
+///\param namelen - IN: Length of \a name
+///\param name - OUT: Name of the filter
+///\param filter_config - OUT: Flags indicating whether filter can encode/decode
+///\return Filter id
+///\exception H5::PropListIException
///\par Description
-/// Failure occurs when \a filter_number is out of range.
-// Note: the first argument was mistakenly typed as int instead
-// of unsigned int, but for backward compatibility, it cannot be
-// changed. -BMR (2014/04/15)
+/// Failure occurs when \a filter_number is out of range.
+// Note: the first argument was mistakenly typed as int instead
+// of unsigned int, but for backward compatibility, it cannot be
+// changed. -BMR (2014/04/15)
//--------------------------------------------------------------------------
H5Z_filter_t DSetCreatPropList::getFilter(int filter_number,
- unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values,
- size_t namelen, char name[], unsigned int& filter_config) const
+ unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values,
+ size_t namelen, char name[], unsigned int& filter_config) const
{
- H5Z_filter_t filter_id;
- filter_id = H5Pget_filter2(id, filter_number, &flags, &cd_nelmts,
- cd_values, namelen, name, &filter_config);
- if( filter_id == H5Z_FILTER_ERROR )
- throw PropListIException("DSetCreatPropList::getFilter",
+ H5Z_filter_t filter_id;
+ filter_id = H5Pget_filter2(id, filter_number, &flags, &cd_nelmts,
+ cd_values, namelen, name, &filter_config);
+ if (filter_id == H5Z_FILTER_ERROR)
+ throw PropListIException("DSetCreatPropList::getFilter",
"H5Pget_filter2 returned H5Z_FILTER_ERROR");
- else
- return(filter_id);
+ else
+ return(filter_id);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getFilterById
-///\brief Returns information about a filter in a pipeline given the
-/// filter id
-///\param filter_id - IN: Filter to get
-///\param flags - OUT: General properties of the filter
-///\param cd_nelmts - IN/OUT: Number of elements in \a cd_values /Number
-/// of values defined by the filter
-///\param cd_values - OUT: Array to hold the data; allocated by the user
-///\param namelen - IN: Length of \a name
-///\param name - OUT: Name of the filter
-///\param filter_config - OUT: Flags indicating whether filter can encode/decode
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetCreatPropList::getFilterById
+///\brief Returns information about a filter in a pipeline given the
+/// filter id
+///\param filter_id - IN: Filter to get
+///\param flags - OUT: General properties of the filter
+///\param cd_nelmts - IN/OUT: Number of elements in \a cd_values /Number
+/// of values defined by the filter
+///\param cd_values - OUT: Array to hold the data; allocated by the user
+///\param namelen - IN: Length of \a name
+///\param name - OUT: Name of the filter
+///\param filter_config - OUT: Flags indicating whether filter can encode/decode
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DSetCreatPropList::getFilterById(H5Z_filter_t filter_id,
- unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values,
- size_t namelen, char name[], unsigned int &filter_config) const
+ unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values,
+ size_t namelen, char name[], unsigned int &filter_config) const
{
- herr_t ret_value = H5Pget_filter_by_id2(id, filter_id, &flags, &cd_nelmts,
- cd_values, namelen, name, &filter_config);
- if (ret_value < 0)
- throw PropListIException("DSetCreatPropList::getFilterById",
+ herr_t ret_value = H5Pget_filter_by_id2(id, filter_id, &flags, &cd_nelmts,
+ cd_values, namelen, name, &filter_config);
+ if (ret_value < 0)
+ throw PropListIException("DSetCreatPropList::getFilterById",
"H5Pget_filter_by_id2 failed");
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::modifyFilter
-///\brief Modifies the specified filter
-///\param filter_id - IN: Filter to get
-///\param flags - OUT: General properties of the filter
-///\param cd_nelmts - IN: Number of elements in \a cd_values
-/// \n OUT: Number of values defined by the filter
-///\param cd_values - OUT: Array to hold the data; allocated by the user
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::modifyFilter
+///\brief Modifies the specified filter
+///\param filter_id - IN: Filter to get
+///\param flags - OUT: General properties of the filter
+///\param cd_nelmts - IN: Number of elements in \a cd_values
+/// \n OUT: Number of values defined by the filter
+///\param cd_values - OUT: Array to hold the data; allocated by the user
+///\exception H5::PropListIException
///\par Description
-/// The \a flags argument is a bit vector of the field:
-/// \c H5Z_FLAG_OPTIONAL(0x0001)
+/// The \a flags argument is a bit vector of the field:
+/// \c H5Z_FLAG_OPTIONAL(0x0001)
///\par
-/// If this bit is set then the filter is optional. If the filter
-/// fails during a DataSet::write() operation then the filter
-/// is just excluded from the pipeline for the chunk for which it
-/// failed; the filter will not participate in the pipeline
-/// during a DataSet::read() of the chunk. If this bit is clear
-/// and the filter fails then the entire I/O operation fails.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void DSetCreatPropList::modifyFilter( H5Z_filter_t filter_id, unsigned int
- flags, size_t cd_nelmts, const unsigned int cd_values[] ) const
+/// If this bit is set then the filter is optional. If the filter
+/// fails during a DataSet::write() operation then the filter
+/// is just excluded from the pipeline for the chunk for which it
+/// failed; the filter will not participate in the pipeline
+/// during a DataSet::read() of the chunk. If this bit is clear
+/// and the filter fails then the entire I/O operation fails.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void DSetCreatPropList::modifyFilter(H5Z_filter_t filter_id, unsigned int
+ flags, size_t cd_nelmts, const unsigned int cd_values[]) const
{
- herr_t ret_value = H5Pmodify_filter(id, filter_id, flags, cd_nelmts, cd_values);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::modifyFilter",
+ herr_t ret_value = H5Pmodify_filter(id, filter_id, flags, cd_nelmts, cd_values);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::modifyFilter",
"H5Pmodify_filter failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::allFiltersAvail
-///\brief Queries whether all the filters set in this property list
-/// are available currently.
-///\return true if all filters available, and false if one or more
-/// filters not currently available
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetCreatPropList::allFiltersAvail
+///\brief Queries whether all the filters set in this property list
+/// are available currently.
+///\return true if all filters available, and false if one or more
+/// filters not currently available
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
bool DSetCreatPropList::allFiltersAvail() const
{
- htri_t ret_value = H5Pall_filters_avail(id);
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else // Raise exception when H5Pall_filters_avail returns a negative value
- {
- throw PropListIException("DSetCreatPropList::allFiltersAvail", "H5Pall_filters_avail returned negative value");
- }
+ htri_t ret_value = H5Pall_filters_avail(id);
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else // Raise exception when H5Pall_filters_avail returns a negative value
+ {
+ throw PropListIException("DSetCreatPropList::allFiltersAvail", "H5Pall_filters_avail returned negative value");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setShuffle
-///\brief Sets method of the shuffle filter
+// Function: DSetCreatPropList::setShuffle
+///\brief Sets method of the shuffle filter
///
-///\exception H5::PropListIException
+///\exception H5::PropListIException
///\par Description
-/// Please refer to the Reference Manual of \c H5Pset_shuffle for
-/// details.
+/// Please refer to the Reference Manual of \c H5Pset_shuffle for
+/// details.
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetShuffle
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DSetCreatPropList::setShuffle() const
{
- herr_t ret_value = H5Pset_shuffle(id);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setShuffle",
+ herr_t ret_value = H5Pset_shuffle(id);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setShuffle",
"H5Pset_shuffle failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getAllocTime
-///\brief Get space allocation time for this property.
-///\return Space allocation time.
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::getAllocTime
+///\brief Get space allocation time for this property.
+///\return Space allocation time.
+///\exception H5::PropListIException
///\par Description
-/// The values of space allocation time can be one of the
-/// followings:
-/// \li \c H5D_ALLOC_TIME_DEFAULT
-/// \li \c H5D_ALLOC_TIME_EARLY
-/// \li \c H5D_ALLOC_TIME_LATE
-/// \li \c H5D_ALLOC_TIME_INCR
-// Programmer Binh-Minh Ribler - 2000
+/// The values of space allocation time can be one of the
+/// followings:
+/// \li \c H5D_ALLOC_TIME_DEFAULT
+/// \li \c H5D_ALLOC_TIME_EARLY
+/// \li \c H5D_ALLOC_TIME_LATE
+/// \li \c H5D_ALLOC_TIME_INCR
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5D_alloc_time_t DSetCreatPropList::getAllocTime() const
{
- H5D_alloc_time_t alloc_time;
- herr_t ret_value = H5Pget_alloc_time(id, &alloc_time);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::getAllocTime",
+ H5D_alloc_time_t alloc_time;
+ herr_t ret_value = H5Pget_alloc_time(id, &alloc_time);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::getAllocTime",
"H5Pget_alloc_time failed");
- }
- else
- return (alloc_time);
+ }
+ else
+ return (alloc_time);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getFillTime
-///\brief Gets fill value writing time.
-///\return Fill value writing time
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::getFillTime
+///\brief Gets fill value writing time.
+///\return Fill value writing time
+///\exception H5::PropListIException
///\par Description
-/// Valid values for fill value writing time include
-/// \li \c H5D_FILL_TIME_NEVER
-/// \li \c H5D_FILL_TIME_ALLOC.
-// Programmer Binh-Minh Ribler - 2000
+/// Valid values for fill value writing time include
+/// \li \c H5D_FILL_TIME_NEVER
+/// \li \c H5D_FILL_TIME_ALLOC.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5D_fill_time_t DSetCreatPropList::getFillTime() const
{
- H5D_fill_time_t fill_time;
- herr_t ret_value = H5Pget_fill_time(id, &fill_time);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::getFillTime",
+ H5D_fill_time_t fill_time;
+ herr_t ret_value = H5Pget_fill_time(id, &fill_time);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::getFillTime",
"H5Pget_fill_time failed");
- }
- else
- return (fill_time);
+ }
+ else
+ return (fill_time);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setAllocTime
-///\brief Sets space allocation time for dataset during creation.
-///\param alloc_time - IN: Allocation time
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setAllocTime
+///\brief Sets space allocation time for dataset during creation.
+///\param alloc_time - IN: Allocation time
+///\exception H5::PropListIException
///\par Description
-/// Valid values for space allocation time include:
-/// \li \c H5D_ALLOC_TIME_DEFAULT
-/// \li \c H5D_ALLOC_TIME_EARLY
-/// \li \c H5D_ALLOC_TIME_LATE
-/// \li \c H5D_ALLOC_TIME_INCR
-// Programmer Binh-Minh Ribler - 2000
+/// Valid values for space allocation time include:
+/// \li \c H5D_ALLOC_TIME_DEFAULT
+/// \li \c H5D_ALLOC_TIME_EARLY
+/// \li \c H5D_ALLOC_TIME_LATE
+/// \li \c H5D_ALLOC_TIME_INCR
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DSetCreatPropList::setAllocTime(H5D_alloc_time_t alloc_time) const
{
- herr_t ret_value = H5Pset_alloc_time(id, alloc_time);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setAllocTime",
+ herr_t ret_value = H5Pset_alloc_time(id, alloc_time);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setAllocTime",
"H5Pset_alloc_time failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setFillTime
-///\brief Sets fill value writing time for dataset.
-///\return Fill value writing time
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setFillTime
+///\brief Sets fill value writing time for dataset.
+///\return Fill value writing time
+///\exception H5::PropListIException
///\par Description
-/// Valid values for fill value writing time include
-/// \li \c H5D_FILL_TIME_NEVER
-/// \li \c H5D_FILL_TIME_ALLOC.
-// Programmer Binh-Minh Ribler - 2000
+/// Valid values for fill value writing time include
+/// \li \c H5D_FILL_TIME_NEVER
+/// \li \c H5D_FILL_TIME_ALLOC.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DSetCreatPropList::setFillTime(H5D_fill_time_t fill_time) const
{
- herr_t ret_value = H5Pset_fill_time(id, fill_time);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setFillTime",
+ herr_t ret_value = H5Pset_fill_time(id, fill_time);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setFillTime",
"H5Pset_fill_time failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setFletcher32
-///\brief Sets Fletcher32 checksum of EDC for this property list.
+// Function: DSetCreatPropList::setFletcher32
+///\brief Sets Fletcher32 checksum of EDC for this property list.
///
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DSetCreatPropList::setFletcher32() const
{
- herr_t ret_value = H5Pset_fletcher32(id);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setFletcher32",
+ herr_t ret_value = H5Pset_fletcher32(id);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setFletcher32",
"H5Pset_fletcher32 failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::setExternal
-///\brief Adds an external file to the list of external files
-///\param name - IN: Name of the external file
-///\param offset - IN: Location where the data starts in the file
-///\param size - IN: Number of bytes reserved in the file for the data
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::setExternal
+///\brief Adds an external file to the list of external files
+///\param name - IN: Name of the external file
+///\param offset - IN: Location where the data starts in the file
+///\param size - IN: Number of bytes reserved in the file for the data
+///\exception H5::PropListIException
///\par Description
-/// If a dataset is splitted across multiple files then the files
-/// should be defined in order. The total size of the dataset is
-/// the sum of the \a size arguments for all the external files. If
-/// the total size is larger than the size of a dataset then the
-/// dataset can be extended (provided the data space also allows
-/// the extending).
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void DSetCreatPropList::setExternal( const char* name, off_t offset, hsize_t size ) const
+/// If a dataset is splitted across multiple files then the files
+/// should be defined in order. The total size of the dataset is
+/// the sum of the \a size arguments for all the external files. If
+/// the total size is larger than the size of a dataset then the
+/// dataset can be extended (provided the data space also allows
+/// the extending).
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void DSetCreatPropList::setExternal(const char* name, off_t offset, hsize_t size) const
{
- herr_t ret_value = H5Pset_external( id, name, offset, size );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::setExternal",
+ herr_t ret_value = H5Pset_external(id, name, offset, size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setExternal",
"H5Pset_external failed");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getExternalCount
-///\brief Returns the number of external files for a dataset
-///\return Number of external files
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetCreatPropList::getExternalCount
+///\brief Returns the number of external files for a dataset
+///\return Number of external files
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
int DSetCreatPropList::getExternalCount() const
{
- int num_ext_files = H5Pget_external_count( id );
- if( num_ext_files < 0 )
- {
- throw PropListIException("DSetCreatPropList::getExternalCount",
+ int num_ext_files = H5Pget_external_count(id);
+ if (num_ext_files < 0)
+ {
+ throw PropListIException("DSetCreatPropList::getExternalCount",
"H5Pget_external_count returns negative number of external files");
- }
- else
- return( num_ext_files );
+ }
+ else
+ return(num_ext_files);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList::getExternal
-///\brief Returns information about an external file
-///\param idx - IN: Index of the external file, ranges [0-(N-1)] and
-/// returned by getExternalCount()
-///\param name_size - IN: Maximum length of \a name
-///\param name - OUT: Name of the external file
-///\param offset - OUT: Location to return an offset value
-///\param size - OUT: Location to return the size of the external file data
-///\exception H5::PropListIException
+// Function: DSetCreatPropList::getExternal
+///\brief Returns information about an external file
+///\param idx - IN: Index of the external file, ranges [0-(N-1)] and
+/// returned by getExternalCount()
+///\param name_size - IN: Maximum length of \a name
+///\param name - OUT: Name of the external file
+///\param offset - OUT: Location to return an offset value
+///\param size - OUT: Location to return the size of the external file data
+///\exception H5::PropListIException
///\par Description
-/// The parameter \a idx ranges [0..N-1] where N is returned by
-/// getExternalCount(). At most \a name_size characters are copied
-/// into the name array. If the external file name is longer than
-/// name_size with the null terminator, the return value is not
-/// null terminated (similar to strncpy()).
-/// If \a name_size is zero or \a name is a null pointer, the
-/// external file name will not be returned. If \a offset or
-/// \a size are null pointers then the corresponding information
-/// will not be returned.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void DSetCreatPropList::getExternal( unsigned idx, size_t name_size, char* name, off_t& offset, hsize_t& size ) const
+/// The parameter \a idx ranges [0..N-1] where N is returned by
+/// getExternalCount(). At most \a name_size characters are copied
+/// into the name array. If the external file name is longer than
+/// name_size with the null terminator, the return value is not
+/// null terminated (similar to strncpy()).
+/// If \a name_size is zero or \a name is a null pointer, the
+/// external file name will not be returned. If \a offset or
+/// \a size are null pointers then the corresponding information
+/// will not be returned.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void DSetCreatPropList::getExternal(unsigned idx, size_t name_size, char* name, off_t& offset, hsize_t& size) const
{
- herr_t ret_value = H5Pget_external( id, idx, name_size, name, &offset, &size );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetCreatPropList::getExternal",
+ herr_t ret_value = H5Pget_external(id, idx, name_size, name, &offset, &size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::getExternal",
"H5Pget_external failed");
- }
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: DSetCreatPropList::setVirtual
+///\brief Maps elements of a virtual dataset to elements of the source
+/// dataset.
+///\param vspace - IN: Dataspace the virtual dataset, possibly an
+/// unlimited selection
+///\param src_fname - IN: Name of the HDF5 file where the source dataset
+/// is located (\a char*)
+///\param src_dsname - IN: Path to the dataset in the file specified by
+/// \a src_file_name (\a char*)
+///\param sspace - IN: Dataspace with a selection applied, possibly
+/// an unlimited selection
+///\exception H5::PropListIException
+///\par Description
+/// https://support.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetVirtual
+// Programmer Binh-Minh Ribler - Mar, 2017
+//--------------------------------------------------------------------------
+void DSetCreatPropList::setVirtual(const DataSpace& vspace, const char *src_fname, const char *src_dsname, const DataSpace& sspace) const
+{
+ herr_t ret_value = H5Pset_virtual(id, vspace.getId(), src_fname, src_dsname, sspace.getId());
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetCreatPropList::setVirtual",
+ "H5Pset_virtual failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: DSetCreatPropList::setVirtual
+///\brief Maps elements of a virtual dataset to elements of the source
+/// dataset.
+///\param vspace - IN: Dataspace the virtual dataset, possibly an
+/// unlimited selection
+///\param src_fname - IN: Name of the HDF5 file where the source dataset
+/// is located (\a H5std_string)
+///\param src_dsname - IN: Path to the dataset in the file specified by
+/// \a src_file_name (\a H5std_string)
+///\param sspace - IN: Dataspace with a selection applied, possibly
+/// an unlimited selection
+///\exception H5::PropListIException
+///\par Description
+/// https://support.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetVirtual
+// Programmer Binh-Minh Ribler - Mar, 2017
+//--------------------------------------------------------------------------
+void DSetCreatPropList::setVirtual(const DataSpace& vspace, const H5std_string src_fname, const H5std_string src_dsname, const DataSpace& sspace) const
+{
+ setVirtual(vspace, src_fname.c_str(), src_dsname.c_str(), sspace);
}
//--------------------------------------------------------------------------
-// Function: DSetCreatPropList destructor
-///\brief Noop destructor.
+// Function: DSetCreatPropList destructor
+///\brief Noop destructor.
// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DSetCreatPropList::~DSetCreatPropList () {}
+DSetCreatPropList::~DSetCreatPropList() {}
} // end namespace
diff --git a/c++/src/H5DcreatProp.h b/c++/src/H5DcreatProp.h
index 787d3ec..5d5714a 100644
--- a/c++/src/H5DcreatProp.h
+++ b/c++/src/H5DcreatProp.h
@@ -6,23 +6,19 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-// Class DSetCreatPropList represents the HDF5 dataset creation property list
-// and inherits from PropList.
-
#ifndef __H5DSCreatPropList_H
#define __H5DSCreatPropList_H
namespace H5 {
class DataType;
+class DataSpace;
/*! \class DSetCreatPropList
\brief Class DSetCreatPropList inherits from ObjCreatPropList and provides
@@ -32,105 +28,109 @@ class DataType;
*/
class H5_DLLCPP DSetCreatPropList : public ObjCreatPropList {
public:
- ///\brief Default dataset creation property list.
- static const DSetCreatPropList& DEFAULT;
+ ///\brief Default dataset creation property list.
+ static const DSetCreatPropList& DEFAULT;
+
+ // Creates a dataset creation property list.
+ DSetCreatPropList();
- // Creates a dataset creation property list.
- DSetCreatPropList();
+ // Queries whether all the filters set in this property list are
+ // available currently.
+ bool allFiltersAvail() const;
- // Queries whether all the filters set in this property list are
- // available currently.
- bool allFiltersAvail() const;
+ // Get space allocation time for this property.
+ H5D_alloc_time_t getAllocTime() const;
- // Get space allocation time for this property.
- H5D_alloc_time_t getAllocTime() const;
+ // Set space allocation time for dataset during creation.
+ void setAllocTime(H5D_alloc_time_t alloc_time) const;
- // Set space allocation time for dataset during creation.
- void setAllocTime(H5D_alloc_time_t alloc_time) const;
+ // Retrieves the size of the chunks used to store a chunked layout dataset.
+ int getChunk(int max_ndims, hsize_t* dim) const;
- // Retrieves the size of the chunks used to store a chunked layout dataset.
- int getChunk( int max_ndims, hsize_t* dim ) const;
+ // Sets the size of the chunks used to store a chunked layout dataset.
+ void setChunk(int ndims, const hsize_t* dim) const;
- // Sets the size of the chunks used to store a chunked layout dataset.
- void setChunk( int ndims, const hsize_t* dim ) const;
+ // Returns information about an external file.
+ void getExternal(unsigned idx, size_t name_size, char* name, off_t& offset, hsize_t& size) const;
- // Returns information about an external file.
- void getExternal( unsigned idx, size_t name_size, char* name, off_t& offset, hsize_t& size ) const;
+ // Returns the number of external files for a dataset.
+ int getExternalCount() const;
- // Returns the number of external files for a dataset.
- int getExternalCount() const;
+ // Gets fill value writing time.
+ H5D_fill_time_t getFillTime() const;
- // Gets fill value writing time.
- H5D_fill_time_t getFillTime() const;
+ // Sets fill value writing time for dataset.
+ void setFillTime(H5D_fill_time_t fill_time) const;
- // Sets fill value writing time for dataset.
- void setFillTime(H5D_fill_time_t fill_time) const;
+ // Retrieves a dataset fill value.
+ void getFillValue(const DataType& fvalue_type, void* value) const;
- // Retrieves a dataset fill value.
- void getFillValue( const DataType& fvalue_type, void* value ) const;
+ // Sets a dataset fill value.
+ void setFillValue(const DataType& fvalue_type, const void* value) const;
- // Sets a dataset fill value.
- void setFillValue( const DataType& fvalue_type, const void* value ) const;
+ // Returns information about a filter in a pipeline.
+ H5Z_filter_t getFilter(int filter_number, unsigned int& flags, size_t& cd_nelmts, unsigned int* cd_values, size_t namelen, char name[], unsigned int &filter_config) const;
- // Returns information about a filter in a pipeline.
- H5Z_filter_t getFilter(int filter_number, unsigned int& flags, size_t& cd_nelmts, unsigned int* cd_values, size_t namelen, char name[], unsigned int &filter_config) const;
+ // Returns information about a filter in a pipeline given the filter id.
+ void getFilterById(H5Z_filter_t filter_id, unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values, size_t namelen, char name[], unsigned int &filter_config) const;
- // Returns information about a filter in a pipeline given the filter id.
- void getFilterById(H5Z_filter_t filter_id, unsigned int &flags, size_t &cd_nelmts, unsigned int* cd_values, size_t namelen, char name[], unsigned int &filter_config) const;
+ // Gets the layout of the raw data storage of the data that uses this
+ // property list.
+ H5D_layout_t getLayout() const;
- // Gets the layout of the raw data storage of the data that uses this
- // property list.
- H5D_layout_t getLayout() const;
+ // Sets the type of storage used to store the raw data for the
+ // dataset that uses this property list.
+ void setLayout(H5D_layout_t layout) const;
- // Sets the type of storage used to store the raw data for the
- // dataset that uses this property list.
- void setLayout(H5D_layout_t layout) const;
+ // Returns the number of filters in the pipeline.
+ int getNfilters() const;
- // Returns the number of filters in the pipeline.
- int getNfilters() const;
+ // Checks if fill value has been defined for this property.
+ H5D_fill_value_t isFillValueDefined() const;
- // Checks if fill value has been defined for this property.
- H5D_fill_value_t isFillValueDefined() const;
+ // Modifies the specified filter.
+ void modifyFilter(H5Z_filter_t filter_id, unsigned int flags, size_t cd_nelmts, const unsigned int cd_values[]) const;
- // Modifies the specified filter.
- void modifyFilter( H5Z_filter_t filter_id, unsigned int flags, size_t cd_nelmts, const unsigned int cd_values[] ) const;
+ // Remove one or all filters from the filter pipeline.
+ void removeFilter(H5Z_filter_t filter_id) const;
- // Remove one or all filters from the filter pipeline.
- void removeFilter( H5Z_filter_t filter_id) const;
+ // Sets compression method and compression level.
+ void setDeflate(int level) const;
- // Sets compression method and compression level.
- void setDeflate( int level ) const;
+ // Adds an external file to the list of external files.
+ void setExternal(const char* name, off_t offset, hsize_t size) const;
- // Adds an external file to the list of external files.
- void setExternal( const char* name, off_t offset, hsize_t size ) const;
+ // Adds a filter to the filter pipeline.
+ void setFilter(H5Z_filter_t filter, unsigned int flags = 0, size_t cd_nelmts = 0, const unsigned int cd_values[] = NULL) const;
- // Adds a filter to the filter pipeline.
- void setFilter( H5Z_filter_t filter, unsigned int flags = 0, size_t cd_nelmts = 0, const unsigned int cd_values[] = NULL) const;
+ // Sets Fletcher32 checksum of EDC for this property list.
+ void setFletcher32() const;
- // Sets Fletcher32 checksum of EDC for this property list.
- void setFletcher32() const;
+ // Sets method of the shuffle filter.
+ void setShuffle() const;
- // Sets method of the shuffle filter.
- void setShuffle() const;
+ // Sets SZIP compression method.
+ void setSzip(unsigned int options_mask, unsigned int pixels_per_block) const;
- // Sets SZIP compression method.
- void setSzip(unsigned int options_mask, unsigned int pixels_per_block) const;
+ // Sets N-bit compression method.
+ void setNbit() const;
- // Sets N-bit compression method.
- void setNbit() const;
+ // Maps elements of a virtual dataset to elements of the source dataset.
+ void setVirtual(const DataSpace& vspace, const char *src_fname, const char *src_dsname, const DataSpace& sspace) const;
+ void setVirtual(const DataSpace& vspace, const H5std_string src_fname, const H5std_string src_dsname, const DataSpace& sspace) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("DSetCreatPropList"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("DSetCreatPropList"); }
- // Copy constructor: creates a copy of a DSetCreatPropList object.
- DSetCreatPropList(const DSetCreatPropList& orig);
+ // Copy constructor: creates a copy of a DSetCreatPropList object.
+ DSetCreatPropList(const DSetCreatPropList& orig);
- // Creates a copy of an existing dataset creation property list
- // using the property list id.
- DSetCreatPropList(const hid_t plist_id);
+ // Creates a copy of an existing dataset creation property list
+ // using the property list id.
+ DSetCreatPropList(const hid_t plist_id);
- // Noop destructor.
- virtual ~DSetCreatPropList();
+ // Noop destructor.
+ virtual ~DSetCreatPropList();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -138,12 +138,14 @@ class H5_DLLCPP DSetCreatPropList : public ObjCreatPropList {
static void deleteConstants();
private:
- static DSetCreatPropList* DEFAULT_;
+ static DSetCreatPropList* DEFAULT_;
// Creates the global constant, should only be used by the library
static DSetCreatPropList* getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+
+}; // end of DSetCreatPropList
+} // namespace H5
+
#endif // __H5DSCreatPropList_H
diff --git a/c++/src/H5DxferProp.cpp b/c++/src/H5DxferProp.cpp
index 7d9b943..4c767cb 100644
--- a/c++/src/H5DxferProp.cpp
+++ b/c++/src/H5DxferProp.cpp
@@ -5,22 +5,20 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
+#include "H5private.h" // for HDmemset
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
#include "H5PropList.h"
#include "H5DxferProp.h"
-#include "H5private.h" // for HDmemset
namespace H5 {
@@ -36,7 +34,7 @@ DSetMemXferPropList* DSetMemXferPropList::DEFAULT_ = 0;
// Function: DSetMemXferPropList::getConstant
// Creates a DSetMemXferPropList object representing the HDF5
// constant H5P_DATASET_XFER, pointed to by
-// DSetMemXferPropList::DEFAULT_
+// DSetMemXferPropList::DEFAULT_
// exception H5::PropListIException
// Description
// If DSetMemXferPropList::DEFAULT_ already points to an allocated
@@ -76,25 +74,25 @@ void DSetMemXferPropList::deleteConstants()
}
//--------------------------------------------------------------------------
-// Purpose Constant for default dataset memory and transfer property list.
+// Purpose Constant for default dataset memory and transfer property list.
//--------------------------------------------------------------------------
const DSetMemXferPropList& DSetMemXferPropList::DEFAULT = *getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function DSetMemXferPropList default constructor
-///\brief Default constructor: creates a stub dataset memory and
-/// transfer property list object.
-// Programmer: Binh-Minh Ribler - 2000
+// Function DSetMemXferPropList default constructor
+///\brief Default constructor: creates a stub dataset memory and
+/// transfer property list object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DSetMemXferPropList::DSetMemXferPropList() : PropList(H5P_DATASET_XFER) {}
//--------------------------------------------------------------------------
-// Function DSetMemXferPropList constructor
-///\brief Creates a dataset transfer property list with transform
-/// expression.
-// Programmer: Binh-Minh Ribler - 2000
+// Function DSetMemXferPropList constructor
+///\brief Creates a dataset transfer property list with transform
+/// expression.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DSetMemXferPropList::DSetMemXferPropList(const char* exp) : PropList(H5P_DATASET_XFER)
{
@@ -102,164 +100,164 @@ DSetMemXferPropList::DSetMemXferPropList(const char* exp) : PropList(H5P_DATASET
}
//--------------------------------------------------------------------------
-// Function DSetMemXferPropList copy constructor
-///\brief Copy constructor: makes a copy of the original
-/// DSetMemXferPropList object
-///\param original - IN: Original dataset memory and transfer property
-/// list object to copy
-// Programmer: Binh-Minh Ribler - 2000
+// Function DSetMemXferPropList copy constructor
+///\brief Copy constructor: makes a copy of the original
+/// DSetMemXferPropList object
+///\param original - IN: Original dataset memory and transfer property
+/// list object to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DSetMemXferPropList::DSetMemXferPropList(const DSetMemXferPropList& original ) : PropList( original ) {}
+DSetMemXferPropList::DSetMemXferPropList(const DSetMemXferPropList& original) : PropList(original ) {}
//--------------------------------------------------------------------------
-// Function DSetMemXferPropList overloaded constructor
-///\brief Creates a DSetMemXferPropList object using the id of an
-/// existing DSetMemXferPropList.
-///\param plist_id - IN: Id of an existing dataset memory and transfer
-/// property list
-// Programmer: Binh-Minh Ribler - 2000
+// Function DSetMemXferPropList overloaded constructor
+///\brief Creates a DSetMemXferPropList object using the id of an
+/// existing DSetMemXferPropList.
+///\param plist_id - IN: Id of an existing dataset memory and transfer
+/// property list
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DSetMemXferPropList::DSetMemXferPropList(const hid_t plist_id) : PropList(plist_id) {}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setBuffer
-///\brief Sets type conversion and background buffers.
-///\param size - IN: Size, in bytes, of the type conversion and background buffers
-///\param tconv - IN: Pointer to application-allocated type conversion buffer
-///\param bkg - IN: Pointer to application-allocated background buffer
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList::setBuffer
+///\brief Sets type conversion and background buffers.
+///\param size - IN: Size, in bytes, of the type conversion and background buffers
+///\param tconv - IN: Pointer to application-allocated type conversion buffer
+///\param bkg - IN: Pointer to application-allocated background buffer
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetMemXferPropList::setBuffer( size_t size, void* tconv, void* bkg ) const
+void DSetMemXferPropList::setBuffer(size_t size, void* tconv, void* bkg) const
{
- herr_t ret_value = H5Pset_buffer( id, size, tconv, bkg );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::setBuffer",
- "H5Pset_buffer failed");
- }
+ herr_t ret_value = H5Pset_buffer(id, size, tconv, bkg);
+ if(ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setBuffer",
+ "H5Pset_buffer failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getBuffer
-///\brief Reads buffer settings.
-///\param tconv - OUT: Pointer to application-allocated type conversion buf
-///\param bkg - OUT: Pointer to application-allocated background buffer
-///\return Buffer size, in bytes
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList::getBuffer
+///\brief Reads buffer settings.
+///\param tconv - OUT: Pointer to application-allocated type conversion buf
+///\param bkg - OUT: Pointer to application-allocated background buffer
+///\return Buffer size, in bytes
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-size_t DSetMemXferPropList::getBuffer( void** tconv, void** bkg ) const
+size_t DSetMemXferPropList::getBuffer(void** tconv, void** bkg) const
{
- size_t buffer_size = H5Pget_buffer( id, tconv, bkg );
- if( buffer_size == 0 )
- {
- throw PropListIException("DSetMemXferPropList::getBuffer",
- "H5Pget_buffer returned 0 for buffer size - failure");
- }
- return( buffer_size );
+ size_t buffer_size = H5Pget_buffer(id, tconv, bkg);
+ if(buffer_size == 0)
+ {
+ throw PropListIException("DSetMemXferPropList::getBuffer",
+ "H5Pget_buffer returned 0 for buffer size - failure");
+ }
+ return(buffer_size);
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setPreserve
-///\brief Sets the dataset transfer property list status to true or false.
-///\param status - IN: Status to set, true or false
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList::setPreserve
+///\brief Sets the dataset transfer property list status to true or false.
+///\param status - IN: Status to set, true or false
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetMemXferPropList::setPreserve( bool status ) const
+void DSetMemXferPropList::setPreserve(bool status) const
{
- herr_t ret_value = H5Pset_preserve( id, (hbool_t) status );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::setPreserve",
- "H5Pset_preserve failed");
- }
+ herr_t ret_value = H5Pset_preserve(id, (hbool_t) status);
+ if(ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setPreserve",
+ "H5Pset_preserve failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getPreserve
-///\brief Checks status of the dataset transfer property list.
-///\return Status of the dataset transfer property list
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList::getPreserve
+///\brief Checks status of the dataset transfer property list.
+///\return Status of the dataset transfer property list
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
bool DSetMemXferPropList::getPreserve() const
{
- int ret_value = H5Pget_preserve( id );
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else
- {
- throw PropListIException("DSetMemXferPropList::getPreserve",
- "H5Pget_preserve returned negative value for status");
- }
+ int ret_value = H5Pget_preserve(id);
+ if(ret_value > 0)
+ return true;
+ else if(ret_value == 0)
+ return false;
+ else
+ {
+ throw PropListIException("DSetMemXferPropList::getPreserve",
+ "H5Pget_preserve returned negative value for status");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setBtreeRatios
-///\brief Sets B-tree split ratios for a dataset transfer property list.
-///\param left - IN: B-tree split ratio for left-most nodes
-///\param middle - IN: B-tree split ratio for right-most nodes and lone nodes
-///\param right - IN: B-tree split ratio for all other nodes
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList::setBtreeRatios
+///\brief Sets B-tree split ratios for a dataset transfer property list.
+///\param left - IN: B-tree split ratio for left-most nodes
+///\param middle - IN: B-tree split ratio for right-most nodes and lone nodes
+///\param right - IN: B-tree split ratio for all other nodes
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetMemXferPropList::setBtreeRatios( double left, double middle, double right ) const
+void DSetMemXferPropList::setBtreeRatios(double left, double middle, double right) const
{
- herr_t ret_value = H5Pset_btree_ratios( id, left, middle, right );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::setBtreeRatios",
- "H5Pset_btree_ratios failed");
- }
+ herr_t ret_value = H5Pset_btree_ratios(id, left, middle, right);
+ if(ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setBtreeRatios",
+ "H5Pset_btree_ratios failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getBtreeRatios
-///\brief Gets B-tree split ratios for a dataset transfer property list.
-///\param left - OUT: B-tree split ratio for left-most nodes
-///\param middle - OUT: B-tree split ratio for right-most nodes and lone nodes
-///\param right - OUT: B-tree split ratio for all other nodes
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList::getBtreeRatios
+///\brief Gets B-tree split ratios for a dataset transfer property list.
+///\param left - OUT: B-tree split ratio for left-most nodes
+///\param middle - OUT: B-tree split ratio for right-most nodes and lone nodes
+///\param right - OUT: B-tree split ratio for all other nodes
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetMemXferPropList::getBtreeRatios( double& left, double& middle, double& right ) const
+void DSetMemXferPropList::getBtreeRatios(double& left, double& middle, double& right) const
{
- herr_t ret_value = H5Pget_btree_ratios( id, &left, &middle, &right );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::getBtreeRatios",
- "H5Pget_btree_ratios failed");
- }
+ herr_t ret_value = H5Pget_btree_ratios(id, &left, &middle, &right);
+ if(ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::getBtreeRatios",
+ "H5Pget_btree_ratios failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setDataTransform
-///\brief Sets data transform expression.
-///\param expression - IN: null-terminated data transform expression (char*)
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - Mar, 2014
+// Function: DSetMemXferPropList::setDataTransform
+///\brief Sets data transform expression.
+///\param expression - IN: null-terminated data transform expression (char*)
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Mar, 2014
//--------------------------------------------------------------------------
void DSetMemXferPropList::setDataTransform(const char* expression) const
{
- herr_t ret_value = H5Pset_data_transform( id, expression);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::setDataTransform",
- "H5Pset_data_transform failed");
- }
+ herr_t ret_value = H5Pset_data_transform(id, expression);
+ if(ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setDataTransform",
+ "H5Pset_data_transform failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setDataTransform
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes a reference to a \c H5std_string for the expression.
-///\param expression - IN: H5std_string data transform expression
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - Mar, 2014
+// Function: DSetMemXferPropList::setDataTransform
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes a reference to a \c H5std_string for the expression.
+///\param expression - IN: H5std_string data transform expression
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Mar, 2014
//--------------------------------------------------------------------------
void DSetMemXferPropList::setDataTransform(const H5std_string& expression) const
{
@@ -267,13 +265,13 @@ void DSetMemXferPropList::setDataTransform(const H5std_string& expression) const
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getDataTransform
-///\brief Sets data transform expression.
-///\param exp - OUT: buffer for data transform expression (char*)
-///\param buf_size - IN: size of buffer for expression, including the
-/// null terminator
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - Mar, 2014
+// Function: DSetMemXferPropList::getDataTransform
+///\brief Sets data transform expression.
+///\param exp - OUT: buffer for data transform expression (char*)
+///\param buf_size - IN: size of buffer for expression, including the
+/// null terminator
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Mar, 2014
//--------------------------------------------------------------------------
ssize_t DSetMemXferPropList::getDataTransform(char* exp, size_t buf_size) const
{
@@ -285,8 +283,8 @@ ssize_t DSetMemXferPropList::getDataTransform(char* exp, size_t buf_size) const
// H5Pget_data_transform returns a negative value, raise an exception
if (exp_len < 0)
{
- throw PropListIException("DSetMemXferPropList::getDataTransform",
- "H5Pget_data_transform failed");
+ throw PropListIException("DSetMemXferPropList::getDataTransform",
+ "H5Pget_data_transform failed");
}
// H5Pget_data_transform will put a null terminator at the end of the
@@ -298,11 +296,11 @@ ssize_t DSetMemXferPropList::getDataTransform(char* exp, size_t buf_size) const
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getDataTransform
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes no parameter and returns a \c H5std_string for the expression.
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - Mar, 2014
+// Function: DSetMemXferPropList::getDataTransform
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes no parameter and returns a \c H5std_string for the expression.
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Mar, 2014
//--------------------------------------------------------------------------
H5std_string DSetMemXferPropList::getDataTransform() const
{
@@ -340,226 +338,226 @@ H5std_string DSetMemXferPropList::getDataTransform() const
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getTypeConvCB
-///\brief Sets an exception handling callback for datatype conversion
-/// for a dataset transfer property list.
-///\param op - IN: User's function
-///\param user_data - IN: User's data
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: DSetMemXferPropList::getTypeConvCB
+///\brief Sets an exception handling callback for datatype conversion
+/// for a dataset transfer property list.
+///\param op - IN: User's function
+///\param user_data - IN: User's data
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
-void DSetMemXferPropList::setTypeConvCB( H5T_conv_except_func_t op, void *user_data) const
+void DSetMemXferPropList::setTypeConvCB(H5T_conv_except_func_t op, void *user_data) const
{
- herr_t ret_value = H5Pset_type_conv_cb( id, op, user_data);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::setTypeConvCB",
- "H5Pset_type_conv_cb failed");
- }
+ herr_t ret_value = H5Pset_type_conv_cb(id, op, user_data);
+ if(ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setTypeConvCB",
+ "H5Pset_type_conv_cb failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getTypeConvCB
-///\brief Gets the exception handling callback function and data.
-///\param op - IN: Retrieved user function
-///\param user_data - IN: Retrieved user data
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: DSetMemXferPropList::getTypeConvCB
+///\brief Gets the exception handling callback function and data.
+///\param op - IN: Retrieved user function
+///\param user_data - IN: Retrieved user data
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
-void DSetMemXferPropList::getTypeConvCB( H5T_conv_except_func_t *op, void **user_data) const
+void DSetMemXferPropList::getTypeConvCB(H5T_conv_except_func_t *op, void **user_data) const
{
- herr_t ret_value = H5Pget_type_conv_cb( id, op, user_data);
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::getTypeConvCB",
- "H5Pget_type_conv_cb failed");
- }
+ herr_t ret_value = H5Pget_type_conv_cb(id, op, user_data);
+ if(ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::getTypeConvCB",
+ "H5Pget_type_conv_cb failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setVlenMemManager
-///\brief Sets the memory manager for variable-length datatype allocation.
-///\param alloc_func - IN: User's allocate routine
-///\param alloc_info - IN: User's allocation parameters
-///\param free_func - IN: User's free routine
-///\param free_info - IN: User's free parameters
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList::setVlenMemManager
+///\brief Sets the memory manager for variable-length datatype allocation.
+///\param alloc_func - IN: User's allocate routine
+///\param alloc_info - IN: User's allocation parameters
+///\param free_func - IN: User's free routine
+///\param free_info - IN: User's free parameters
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetMemXferPropList::setVlenMemManager( H5MM_allocate_t alloc_func, void* alloc_info, H5MM_free_t free_func, void* free_info ) const
+void DSetMemXferPropList::setVlenMemManager(H5MM_allocate_t alloc_func, void* alloc_info, H5MM_free_t free_func, void* free_info) const
{
- herr_t ret_value = H5Pset_vlen_mem_manager( id, alloc_func, alloc_info,
- free_func, free_info );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::setVlenMemManager",
- "H5Pset_vlen_mem_manager failed");
- }
+ herr_t ret_value = H5Pset_vlen_mem_manager(id, alloc_func, alloc_info,
+ free_func, free_info);
+ if(ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setVlenMemManager",
+ "H5Pset_vlen_mem_manager failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setVlenMemManager
-///\brief Sets the memory manager for variable-length datatype
-/// allocation - system \c malloc and \c free will be used.
+// Function: DSetMemXferPropList::setVlenMemManager
+///\brief Sets the memory manager for variable-length datatype
+/// allocation - system \c malloc and \c free will be used.
///
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void DSetMemXferPropList::setVlenMemManager() const
{
- setVlenMemManager( NULL, NULL, NULL, NULL );
+ setVlenMemManager(NULL, NULL, NULL, NULL);
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getVlenMemManager
-///\brief Gets the memory manager for variable-length datatype allocation
-///\param alloc_func - OUT: User's allocate routine
-///\param alloc_info - OUT: User's allocation parameters
-///\param free_func - OUT: User's free routine
-///\param free_info - OUT: User's free parameters
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList::getVlenMemManager
+///\brief Gets the memory manager for variable-length datatype allocation
+///\param alloc_func - OUT: User's allocate routine
+///\param alloc_info - OUT: User's allocation parameters
+///\param free_func - OUT: User's free routine
+///\param free_info - OUT: User's free parameters
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void DSetMemXferPropList::getVlenMemManager( H5MM_allocate_t& alloc_func, void** alloc_info, H5MM_free_t& free_func, void** free_info ) const
+void DSetMemXferPropList::getVlenMemManager(H5MM_allocate_t& alloc_func, void** alloc_info, H5MM_free_t& free_func, void** free_info) const
{
- herr_t ret_value = H5Pget_vlen_mem_manager( id, &alloc_func, alloc_info, &free_func, free_info );
- if( ret_value < 0 )
- {
- throw PropListIException("DSetMemXferPropList::getVlenMemManager",
- "H5Pget_vlen_mem_manager failed");
- }
+ herr_t ret_value = H5Pget_vlen_mem_manager(id, &alloc_func, alloc_info, &free_func, free_info);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::getVlenMemManager",
+ "H5Pget_vlen_mem_manager failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setSmallDataBlockSize
-///\brief Sets the size of a contiguous block reserved for small data.
-///\param size - IN: Maximum size, in bytes, of the small data block.
-///\exception H5::PropListIException
+// Function: DSetMemXferPropList::setSmallDataBlockSize
+///\brief Sets the size of a contiguous block reserved for small data.
+///\param size - IN: Maximum size, in bytes, of the small data block.
+///\exception H5::PropListIException
///\par Description
-/// For detail, please refer to the C layer Reference Manual at:
+/// For detail, please refer to the C layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetSmallData
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void DSetMemXferPropList::setSmallDataBlockSize(hsize_t size) const
{
- herr_t ret_value = H5Pset_small_data_block_size(id, size);
- if (ret_value < 0)
- {
- throw PropListIException("DSetMemXferPropList::setSmallDataBlockSize",
- "H5Pset_small_data_block_size failed");
- }
+ herr_t ret_value = H5Pset_small_data_block_size(id, size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setSmallDataBlockSize",
+ "H5Pset_small_data_block_size failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getSmallDataBlockSize
-///\brief Returns the current small data block size setting.
-///\return Size of the small data block, in bytes
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: DSetMemXferPropList::getSmallDataBlockSize
+///\brief Returns the current small data block size setting.
+///\return Size of the small data block, in bytes
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
hsize_t DSetMemXferPropList::getSmallDataBlockSize() const
{
- hsize_t size;
- herr_t ret_value = H5Pget_small_data_block_size(id, &size);
- if (ret_value < 0)
- {
- throw PropListIException("DSetMemXferPropList::getSmallDataBlockSize",
- "H5Pget_small_data_block_size failed");
- }
- return(size);
+ hsize_t size;
+ herr_t ret_value = H5Pget_small_data_block_size(id, &size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::getSmallDataBlockSize",
+ "H5Pget_small_data_block_size failed");
+ }
+ return(size);
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setHyperVectorSize
-///\brief Sets number of I/O vectors to be read/written in hyperslab I/O.
+// Function: DSetMemXferPropList::setHyperVectorSize
+///\brief Sets number of I/O vectors to be read/written in hyperslab I/O.
///
-///\exception H5::PropListIException
+///\exception H5::PropListIException
///\par Description
-/// For information, please refer to the C layer Reference
-/// Manual at:
+/// For information, please refer to the C layer Reference
+/// Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetHyperVectorSize
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void DSetMemXferPropList::setHyperVectorSize(size_t vector_size) const
{
- herr_t ret_value = H5Pset_hyper_vector_size(id, vector_size);
- if (ret_value < 0)
- {
- throw PropListIException("DSetMemXferPropList::setHyperVectorSize",
- "H5Pset_hyper_vector_size failed");
- }
+ herr_t ret_value = H5Pset_hyper_vector_size(id, vector_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setHyperVectorSize",
+ "H5Pset_hyper_vector_size failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getHyperVectorSize
-///\brief Returns the number of I/O vectors to be read/written in
-/// hyperslab I/O.
-///\return Number of I/O vectors
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: DSetMemXferPropList::getHyperVectorSize
+///\brief Returns the number of I/O vectors to be read/written in
+/// hyperslab I/O.
+///\return Number of I/O vectors
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
size_t DSetMemXferPropList::getHyperVectorSize() const
{
- size_t vector_size;
- herr_t ret_value = H5Pget_hyper_vector_size(id, &vector_size);
- if (ret_value < 0)
- {
- throw PropListIException("DSetMemXferPropList::getHyperVectorSize",
- "H5Pget_hyper_vector_size failed");
- }
- return(vector_size);
+ size_t vector_size;
+ herr_t ret_value = H5Pget_hyper_vector_size(id, &vector_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::getHyperVectorSize",
+ "H5Pget_hyper_vector_size failed");
+ }
+ return(vector_size);
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::setEDCCheck
-///\brief Enables or disables error-detecting for a dataset reading
-/// process.
-///\param check - IN: Specifies whether error detection is enabled or
-/// disabled
-///\exception H5::PropListIException
+// Function: DSetMemXferPropList::setEDCCheck
+///\brief Enables or disables error-detecting for a dataset reading
+/// process.
+///\param check - IN: Specifies whether error detection is enabled or
+/// disabled
+///\exception H5::PropListIException
///\par Description
-/// The error detection algorithm used is the algorithm previously
-/// specified in the corresponding dataset creation property
-/// list. This function does not affect the use of error
-/// detection in the writing process.
+/// The error detection algorithm used is the algorithm previously
+/// specified in the corresponding dataset creation property
+/// list. This function does not affect the use of error
+/// detection in the writing process.
///\par
-/// Valid values are as follows:
-/// \li \c H5Z_ENABLE_EDC (default)
-/// \li \c H5Z_DISABLE_EDC
-// Programmer: Binh-Minh Ribler - April, 2004
+/// Valid values are as follows:
+/// \li \c H5Z_ENABLE_EDC (default)
+/// \li \c H5Z_DISABLE_EDC
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void DSetMemXferPropList::setEDCCheck(H5Z_EDC_t check) const
{
- herr_t ret_value = H5Pset_edc_check(id, check);
- if (ret_value < 0)
- {
- throw PropListIException("DSetMemXferPropList::setEDCCheck",
- "H5Pset_edc_check failed");
- }
+ herr_t ret_value = H5Pset_edc_check(id, check);
+ if (ret_value < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::setEDCCheck",
+ "H5Pset_edc_check failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList::getEDCCheck
-///\brief Determines whether error-detection is enabled for dataset reads.
-///\return \c H5Z_ENABLE_EDC or \c H5Z_DISABLE_EDC
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: DSetMemXferPropList::getEDCCheck
+///\brief Determines whether error-detection is enabled for dataset reads.
+///\return \c H5Z_ENABLE_EDC or \c H5Z_DISABLE_EDC
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
H5Z_EDC_t DSetMemXferPropList::getEDCCheck() const
{
- H5Z_EDC_t check = H5Pget_edc_check(id);
- if (check < 0)
- {
- throw PropListIException("DSetMemXferPropList::getEDCCheck",
- "H5Pget_edc_check failed");
- }
- return(check);
+ H5Z_EDC_t check = H5Pget_edc_check(id);
+ if (check < 0)
+ {
+ throw PropListIException("DSetMemXferPropList::getEDCCheck",
+ "H5Pget_edc_check failed");
+ }
+ return(check);
}
//--------------------------------------------------------------------------
-// Function: DSetMemXferPropList destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: DSetMemXferPropList destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
DSetMemXferPropList::~DSetMemXferPropList() {}
diff --git a/c++/src/H5DxferProp.h b/c++/src/H5DxferProp.h
index 81361cf..4a38bd0 100644
--- a/c++/src/H5DxferProp.h
+++ b/c++/src/H5DxferProp.h
@@ -6,17 +6,12 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-// Class DSetMemXferPropList represents the HDF5 dataset transfer property list
-// and inherits from PropList.
-
#ifndef __H5DSetMemXferPropList_H
#define __H5DSetMemXferPropList_H
@@ -30,93 +25,93 @@ namespace H5 {
*/
class H5_DLLCPP DSetMemXferPropList : public PropList {
public:
- ///\brief Default dataset memory and transfer property list.
- static const DSetMemXferPropList& DEFAULT;
+ ///\brief Default dataset memory and transfer property list.
+ static const DSetMemXferPropList& DEFAULT;
- // Creates a dataset memory and transfer property list.
- DSetMemXferPropList();
+ // Creates a dataset memory and transfer property list.
+ DSetMemXferPropList();
- // Creates a dataset transform property list.
- DSetMemXferPropList(const char* expression);
+ // Creates a dataset transform property list.
+ DSetMemXferPropList(const char* expression);
- // Sets type conversion and background buffers.
- void setBuffer( size_t size, void* tconv, void* bkg ) const;
+ // Sets type conversion and background buffers.
+ void setBuffer(size_t size, void* tconv, void* bkg) const;
- // Reads buffer settings.
- size_t getBuffer( void** tconv, void** bkg ) const;
+ // Reads buffer settings.
+ size_t getBuffer(void** tconv, void** bkg) const;
- // Sets B-tree split ratios for a dataset transfer property list.
- void setBtreeRatios( double left, double middle, double right ) const;
+ // Sets B-tree split ratios for a dataset transfer property list.
+ void setBtreeRatios(double left, double middle, double right) const;
- // Gets B-tree split ratios for a dataset transfer property list.
- void getBtreeRatios( double& left, double& middle, double& right ) const;
+ // Gets B-tree split ratios for a dataset transfer property list.
+ void getBtreeRatios(double& left, double& middle, double& right) const;
- // Sets data transform expression.
- void setDataTransform(const char* expression) const;
- void setDataTransform(const H5std_string& expression) const;
+ // Sets data transform expression.
+ void setDataTransform(const char* expression) const;
+ void setDataTransform(const H5std_string& expression) const;
- // Gets data transform expression.
- ssize_t getDataTransform(char* exp, size_t buf_size=0) const;
- H5std_string getDataTransform() const;
+ // Gets data transform expression.
+ ssize_t getDataTransform(char* exp, size_t buf_size=0) const;
+ H5std_string getDataTransform() const;
- // Sets the dataset transfer property list status to TRUE or FALSE.
- void setPreserve( bool status ) const;
+ // Sets the dataset transfer property list status to TRUE or FALSE.
+ void setPreserve(bool status) const;
- // Checks status of the dataset transfer property list.
- bool getPreserve() const;
+ // Checks status of the dataset transfer property list.
+ bool getPreserve() const;
- // Sets an exception handling callback for datatype conversion.
- void setTypeConvCB( H5T_conv_except_func_t op, void *user_data) const;
+ // Sets an exception handling callback for datatype conversion.
+ void setTypeConvCB(H5T_conv_except_func_t op, void *user_data) const;
- // Gets the exception handling callback for datatype conversion.
- void getTypeConvCB( H5T_conv_except_func_t *op, void **user_data) const;
+ // Gets the exception handling callback for datatype conversion.
+ void getTypeConvCB(H5T_conv_except_func_t *op, void **user_data) const;
- // Sets the memory manager for variable-length datatype
- // allocation in H5Dread and H5Dvlen_reclaim.
- void setVlenMemManager( H5MM_allocate_t alloc, void* alloc_info,
- H5MM_free_t free, void* free_info ) const;
+ // Sets the memory manager for variable-length datatype
+ // allocation in H5Dread and H5Dvlen_reclaim.
+ void setVlenMemManager(H5MM_allocate_t alloc, void* alloc_info,
+ H5MM_free_t free, void* free_info) const;
- // alloc and free are set to NULL, indicating that system
- // malloc and free are to be used.
- void setVlenMemManager() const;
+ // alloc and free are set to NULL, indicating that system
+ // malloc and free are to be used.
+ void setVlenMemManager() const;
- // Gets the memory manager for variable-length datatype
- // allocation in H5Dread and H5Tvlen_reclaim.
- void getVlenMemManager( H5MM_allocate_t& alloc, void** alloc_info,
- H5MM_free_t& free, void** free_info ) const;
+ // Gets the memory manager for variable-length datatype
+ // allocation in H5Dread and H5Tvlen_reclaim.
+ void getVlenMemManager(H5MM_allocate_t& alloc, void** alloc_info,
+ H5MM_free_t& free, void** free_info) const;
- // Sets the size of a contiguous block reserved for small data.
- void setSmallDataBlockSize(hsize_t size) const;
+ // Sets the size of a contiguous block reserved for small data.
+ void setSmallDataBlockSize(hsize_t size) const;
- // Returns the current small data block size setting.
- hsize_t getSmallDataBlockSize() const;
+ // Returns the current small data block size setting.
+ hsize_t getSmallDataBlockSize() const;
- // Sets number of I/O vectors to be read/written in hyperslab I/O.
- void setHyperVectorSize(size_t vector_size) const;
+ // Sets number of I/O vectors to be read/written in hyperslab I/O.
+ void setHyperVectorSize(size_t vector_size) const;
- // Returns the number of I/O vectors to be read/written in
- // hyperslab I/O.
- size_t getHyperVectorSize() const;
+ // Returns the number of I/O vectors to be read/written in
+ // hyperslab I/O.
+ size_t getHyperVectorSize() const;
- // Enables or disables error-detecting for a dataset reading
- // process.
- void setEDCCheck(H5Z_EDC_t check) const;
+ // Enables or disables error-detecting for a dataset reading
+ // process.
+ void setEDCCheck(H5Z_EDC_t check) const;
- // Determines whether error-detection is enabled for dataset reads.
- H5Z_EDC_t getEDCCheck() const;
+ // Determines whether error-detection is enabled for dataset reads.
+ H5Z_EDC_t getEDCCheck() const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("DSetMemXferPropList"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("DSetMemXferPropList"); }
- // Copy constructor: makes a copy of a DSetMemXferPropList object.
- DSetMemXferPropList(const DSetMemXferPropList& orig);
+ // Copy constructor: makes a copy of a DSetMemXferPropList object.
+ DSetMemXferPropList(const DSetMemXferPropList& orig);
- // Creates a copy of an existing dataset memory and transfer
- // property list using the property list id.
- DSetMemXferPropList(const hid_t plist_id);
+ // Creates a copy of an existing dataset memory and transfer
+ // property list using the property list id.
+ DSetMemXferPropList(const hid_t plist_id);
- // Noop destructor
- virtual ~DSetMemXferPropList();
+ // Noop destructor
+ virtual ~DSetMemXferPropList();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -124,12 +119,14 @@ class H5_DLLCPP DSetMemXferPropList : public PropList {
static void deleteConstants();
private:
- static DSetMemXferPropList* DEFAULT_;
+ static DSetMemXferPropList* DEFAULT_;
// Creates the global constant, should only be used by the library
static DSetMemXferPropList* getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+
+}; // end of DSetMemXferPropList
+} // namespace H5
+
#endif // __H5DSetMemXferPropList_H
diff --git a/c++/src/H5EnumType.cpp b/c++/src/H5EnumType.cpp
index 2b614a9..a96239c 100644
--- a/c++/src/H5EnumType.cpp
+++ b/c++/src/H5EnumType.cpp
@@ -5,16 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
+#include "H5private.h" // for HDmemset
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
@@ -23,6 +22,7 @@
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
#include "H5DataSpace.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5AbstractDs.h"
@@ -31,199 +31,236 @@
#include "H5AtomType.h"
#include "H5IntType.h"
#include "H5EnumType.h"
-#include "H5private.h" // for HDmemset
namespace H5 {
//--------------------------------------------------------------------------
-// Function: EnumType default constructor
-///\brief Default constructor: Creates a stub datatype
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType default constructor
+///\brief Default constructor: Creates a stub datatype
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
EnumType::EnumType() : DataType() {}
//--------------------------------------------------------------------------
-// Function: EnumType overloaded constructor
-///\brief Creates an EnumType object using the id of an existing datatype.
-///\param existing_id - IN: Id of an existing datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType overloaded constructor
+///\brief Creates an EnumType object using the id of an existing datatype.
+///\param existing_id - IN: Id of an existing datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-EnumType::EnumType( const hid_t existing_id ) : DataType( existing_id ) {}
+EnumType::EnumType(const hid_t existing_id) : DataType( existing_id ) {}
//--------------------------------------------------------------------------
-// Function: EnumType copy constructor
-///\brief Copy constructor: makes a copy of the original EnumType object.
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType copy constructor
+///\brief Copy constructor: makes a copy of the original EnumType object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-EnumType::EnumType( const EnumType& original ) : DataType( original ) {}
+EnumType::EnumType(const EnumType& original) : DataType( original ) {}
//--------------------------------------------------------------------------
-// Function: EnumType overloaded constructor
-///\brief Creates an empty enumeration datatype given a size, in bytes.
-///\param size - IN: Number of bytes in the datatype to create
-///\exception H5::DataTypeIException
+// Function: EnumType overloaded constructor
+///\brief Creates an empty enumeration datatype given a size, in bytes.
+///\param size - IN: Number of bytes in the datatype to create
+///\exception H5::DataTypeIException
// Description
-// The DataType constructor calls the C API H5Tcreate to create
-// the enum datatype.
-// Programmer Binh-Minh Ribler - 2000
+// The DataType constructor calls the C API H5Tcreate to create
+// the enum datatype.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-EnumType::EnumType( size_t size ) : DataType( H5T_ENUM, size ) {}
+EnumType::EnumType(size_t size) : DataType( H5T_ENUM, size ) {}
//--------------------------------------------------------------------------
-// Function: EnumType overloaded constructor
-///\brief Gets the enum datatype of the specified dataset.
-///\param dataset - IN: Dataset that this enum datatype associates with
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType overloaded constructor
+///\brief Gets the enum datatype of the specified dataset.
+///\param dataset - IN: Dataset that this enum datatype associates with
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-EnumType::EnumType( const DataSet& dataset ) : DataType()
+EnumType::EnumType(const DataSet& dataset) : DataType()
{
- // Calls C function H5Dget_type to get the id of the datatype
- id = H5Dget_type( dataset.getId() );
+ // Calls C function H5Dget_type to get the id of the datatype
+ id = H5Dget_type(dataset.getId());
- // If the datatype id is not valid, throw an exception
- if( id < 0 )
- {
- throw DataSetIException("EnumType constructor", "H5Dget_type failed");
- }
+ // If the datatype id is not valid, throw an exception
+ if (id < 0)
+ {
+ throw DataSetIException("EnumType constructor", "H5Dget_type failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: EnumType overloaded constructor
-///\brief Creates a new enum datatype based on an integer datatype.
-///\param data_type - IN: Base datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType overloaded constructor
+///\brief Creates a new enum datatype based on an integer datatype.
+///\param data_type - IN: Base datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-EnumType::EnumType( const IntType& data_type ) : DataType()
+EnumType::EnumType(const IntType& data_type) : DataType()
{
- // Calls C function H5Tenum_create to get the id of the datatype
- id = H5Tenum_create( data_type.getId() );
+ // Calls C function H5Tenum_create to get the id of the datatype
+ id = H5Tenum_create(data_type.getId());
- // If the datatype id is not valid, throw an exception
- if( id < 0 )
- {
- throw DataSetIException("EnumType constructor", "H5Tenum_create failed");
- }
+ // If the datatype id is not valid, throw an exception
+ if (id < 0)
+ {
+ throw DataSetIException("EnumType constructor", "H5Tenum_create failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: EnumType::insert
-///\brief Inserts a new member to this enumeration datatype.
-///\param name - IN: Name of the new member
-///\param value - IN: Pointer to the value of the new member
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType overloaded constructor
+///\brief Creates an EnumType instance by opening an HDF5 enum datatype
+/// given its name, provided as a C character string.
+///\param dtype_name - IN: Enum datatype name
+///\param loc - IN: Location of the type
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openEnumType(const char*) to
+// improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+EnumType::EnumType(const H5Location& loc, const char *dtype_name) : DataType()
+{
+ id = p_opentype(loc, dtype_name);
+}
+
+//--------------------------------------------------------------------------
+// Function: EnumType overloaded constructor
+///\brief Creates an EnumType instance by opening an HDF5 enum datatype
+/// given its name, provided as an \c H5std_string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Enum datatype name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openEnumType(const H5std_string&)
+// to improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+EnumType::EnumType(const H5Location& loc, const H5std_string& dtype_name) : DataType()
+{
+ id = p_opentype(loc, dtype_name.c_str());
+}
+
+//--------------------------------------------------------------------------
+// Function: EnumType::insert
+///\brief Inserts a new member to this enumeration datatype.
+///\param name - IN: Name of the new member
+///\param value - IN: Pointer to the value of the new member
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void EnumType::insert( const char* name, void *value ) const
+void EnumType::insert(const char* name, void *value) const
{
- // Calls C routine H5Tenum_insert to insert the new enum datatype member.
- herr_t ret_value = H5Tenum_insert( id, name, value );
- if( ret_value < 0 )
- {
- throw DataTypeIException("EnumType::insert", "H5Tenum_insert failed");
- }
+ // Calls C routine H5Tenum_insert to insert the new enum datatype member.
+ herr_t ret_value = H5Tenum_insert(id, name, value);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("EnumType::insert", "H5Tenum_insert failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: EnumType::insert
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in the type of
-/// argument \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType::insert
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in the type of
+/// argument \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void EnumType::insert( const H5std_string& name, void *value ) const
+void EnumType::insert(const H5std_string& name, void *value) const
{
- insert( name.c_str(), value );
+ insert(name.c_str(), value);
}
//--------------------------------------------------------------------------
-// Function: EnumType::nameOf
-///\brief Returns the symbol name corresponding to a specified member
-/// of this enumeration datatype.
-///\param value - IN: Pointer to the value of the enum datatype
-///\param size - IN: Size for the name
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType::nameOf
+///\brief Returns the symbol name corresponding to a specified member
+/// of this enumeration datatype.
+///\param value - IN: Pointer to the value of the enum datatype
+///\param size - IN: Size for the name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5std_string EnumType::nameOf( void *value, size_t size ) const
+H5std_string EnumType::nameOf(void *value, size_t size) const
{
- char* name_C = new char[size+1]; // temporary C-string for C API
- HDmemset(name_C, 0, size+1); // clear buffer
+ char* name_C = new char[size+1]; // temporary C-string for C API
+ HDmemset(name_C, 0, size+1); // clear buffer
- // Calls C routine H5Tenum_nameof to get the name of the specified enum type
- herr_t ret_value = H5Tenum_nameof( id, value, name_C, size );
+ // Calls C routine H5Tenum_nameof to get the name of the specified enum type
+ herr_t ret_value = H5Tenum_nameof(id, value, name_C, size);
- // If H5Tenum_nameof returns a negative value, raise an exception,
- if( ret_value < 0 )
- {
+ // If H5Tenum_nameof returns a negative value, raise an exception,
+ if (ret_value < 0)
+ {
delete []name_C;
- throw DataTypeIException("EnumType::nameOf", "H5Tenum_nameof failed");
- }
- // otherwise, create the string to hold the datatype name and return it
- H5std_string name(name_C);
- delete []name_C;
- return( name );
+ throw DataTypeIException("EnumType::nameOf", "H5Tenum_nameof failed");
+ }
+ // otherwise, create the string to hold the datatype name and return it
+ H5std_string name(name_C);
+ delete []name_C;
+ return(name);
}
//--------------------------------------------------------------------------
-// Function: EnumType::valueOf
-///\brief Retrieves the value corresponding to a member of this
-/// enumeration datatype, given the member's name.
-///\param name - IN: Name of the queried member
-///\param value - OUT: Pointer to the retrieved value
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType::valueOf
+///\brief Retrieves the value corresponding to a member of this
+/// enumeration datatype, given the member's name.
+///\param name - IN: Name of the queried member
+///\param value - OUT: Pointer to the retrieved value
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void EnumType::valueOf( const char* name, void *value ) const
+void EnumType::valueOf(const char* name, void *value) const
{
- // Calls C routine H5Tenum_valueof to get the enum datatype value
- herr_t ret_value = H5Tenum_valueof( id, name, value );
- if( ret_value < 0 )
- {
- throw DataTypeIException("EnumType::valueOf", "H5Tenum_valueof failed");
- }
+ // Calls C routine H5Tenum_valueof to get the enum datatype value
+ herr_t ret_value = H5Tenum_valueof(id, name, value);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("EnumType::valueOf", "H5Tenum_valueof failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: EnumType::valueOf
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in the type of
-/// argument \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType::valueOf
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in the type of
+/// argument \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void EnumType::valueOf( const H5std_string& name, void *value ) const
+void EnumType::valueOf(const H5std_string& name, void *value) const
{
- valueOf( name.c_str(), value );
+ valueOf(name.c_str(), value);
}
//--------------------------------------------------------------------------
-// Function: EnumType::getMemberIndex
-///\brief Returns the index of a member in this enumeration datatype.
-///\param name - IN: Name of the queried member
-///\return Index of the member if it exists. Index will have the value
-/// between 0 and \c N-1, where \c N is the value returned by the
-/// member function \c EnumType::getNmembers.
-///\exception H5::DataTypeIException
+// Function: EnumType::getMemberIndex
+///\brief Returns the index of a member in this enumeration datatype.
+///\param name - IN: Name of the queried member
+///\return Index of the member if it exists. Index will have the value
+/// between 0 and \c N-1, where \c N is the value returned by the
+/// member function \c EnumType::getNmembers.
+///\exception H5::DataTypeIException
// Programmer Binh-Minh Ribler - May 16, 2002
//--------------------------------------------------------------------------
int EnumType::getMemberIndex(const char *name) const
{
- int member_index = H5Tget_member_index(id, name);
- if( member_index < 0 )
- {
- throw DataTypeIException("EnumType::getMemberIndex",
+ int member_index = H5Tget_member_index(id, name);
+ if (member_index < 0)
+ {
+ throw DataTypeIException("EnumType::getMemberIndex",
"H5Tget_member_index returns negative value");
- }
- return( member_index );
+ }
+ return(member_index);
}
//--------------------------------------------------------------------------
-// Function: EnumType::getMemberIndex
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in the type of
-/// argument \a name.
+// Function: EnumType::getMemberIndex
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in the type of
+/// argument \a name.
// Programmer Binh-Minh Ribler - May 16, 2002
//--------------------------------------------------------------------------
int EnumType::getMemberIndex(const H5std_string& name) const
@@ -232,46 +269,46 @@ int EnumType::getMemberIndex(const H5std_string& name) const
}
//--------------------------------------------------------------------------
-// Function: EnumType::getNmembers
-///\brief Returns the number of members in this enumeration datatype.
-///\return Number of members
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: EnumType::getNmembers
+///\brief Returns the number of members in this enumeration datatype.
+///\return Number of members
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
int EnumType::getNmembers() const
{
- int num_members = H5Tget_nmembers( id );
- if( num_members < 0 )
- {
- throw DataTypeIException("EnumType::getNmembers",
+ int num_members = H5Tget_nmembers(id);
+ if (num_members < 0)
+ {
+ throw DataTypeIException("EnumType::getNmembers",
"H5Tget_nmembers returns negative number of members");
- }
- return( num_members );
+ }
+ return(num_members);
}
//--------------------------------------------------------------------------
-// Function: EnumType::getMemberValue
-///\brief Retrieves the value of a member in this enumeration datatype,
-/// given the member's index.
-///\param memb_no - IN: Index of the queried member
-///\param value - OUT: Pointer to the retrieved value
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType::getMemberValue
+///\brief Retrieves the value of a member in this enumeration datatype,
+/// given the member's index.
+///\param memb_no - IN: Index of the queried member
+///\param value - OUT: Pointer to the retrieved value
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void EnumType::getMemberValue( unsigned memb_no, void *value ) const
+void EnumType::getMemberValue(unsigned memb_no, void *value) const
{
- // Call C routine H5Tget_member_value to get the datatype member's value
- hid_t ret_value = H5Tget_member_value( id, memb_no, value );
- if( ret_value < 0 )
- {
- throw DataTypeIException("EnumType::getMemberValue", "H5Tget_member_value failed");
- }
+ // Call C routine H5Tget_member_value to get the datatype member's value
+ hid_t ret_value = H5Tget_member_value(id, memb_no, value);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("EnumType::getMemberValue", "H5Tget_member_value failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: EnumType destructor
-///\brief Properly terminates access to this enum datatype.
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType destructor
+///\brief Properly terminates access to this enum datatype.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
EnumType::~EnumType() {}
diff --git a/c++/src/H5EnumType.h b/c++/src/H5EnumType.h
index 1f96552..fc8089e 100644
--- a/c++/src/H5EnumType.h
+++ b/c++/src/H5EnumType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5EnumType_H
@@ -28,52 +26,58 @@ namespace H5 {
class H5_DLLCPP EnumType : public DataType {
public:
- // Creates an empty enumeration datatype based on a native signed
- // integer type, whose size is given by size.
- EnumType( size_t size );
+ // Creates an empty enumeration datatype based on a native signed
+ // integer type, whose size is given by size.
+ EnumType(size_t size);
- // Gets the enum datatype of the specified dataset
- EnumType( const DataSet& dataset ); // H5Dget_type
+ // Gets the enum datatype of the specified dataset
+ EnumType(const DataSet& dataset); // H5Dget_type
- // Creates a new enum datatype based on an integer datatype
- EnumType( const IntType& data_type ); // H5Tenum_create
+ // Creates a new enum datatype based on an integer datatype
+ EnumType(const IntType& data_type); // H5Tenum_create
- // Returns the number of members in this enumeration datatype.
- int getNmembers () const;
+ // Constructors that open an enum datatype, given a location.
+ EnumType(const H5Location& loc, const char* name);
+ EnumType(const H5Location& loc, const H5std_string& name);
- // Returns the index of a member in this enumeration data type.
- int getMemberIndex(const char* name) const;
- int getMemberIndex(const H5std_string& name) const;
+ // Returns the number of members in this enumeration datatype.
+ int getNmembers () const;
- // Returns the value of an enumeration datatype member
- void getMemberValue( unsigned memb_no, void *value ) const;
+ // Returns the index of a member in this enumeration data type.
+ int getMemberIndex(const char* name) const;
+ int getMemberIndex(const H5std_string& name) const;
- // Inserts a new member to this enumeration type.
- void insert( const char* name, void *value ) const;
- void insert( const H5std_string& name, void *value ) const;
+ // Returns the value of an enumeration datatype member
+ void getMemberValue(unsigned memb_no, void *value) const;
- // Returns the symbol name corresponding to a specified member
- // of this enumeration datatype.
- H5std_string nameOf( void *value, size_t size ) const;
+ // Inserts a new member to this enumeration type.
+ void insert(const char* name, void *value) const;
+ void insert(const H5std_string& name, void *value) const;
- // Returns the value corresponding to a specified member of this
- // enumeration datatype.
- void valueOf( const char* name, void *value ) const;
- void valueOf( const H5std_string& name, void *value ) const;
+ // Returns the symbol name corresponding to a specified member
+ // of this enumeration datatype.
+ H5std_string nameOf(void *value, size_t size) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("EnumType"); }
+ // Returns the value corresponding to a specified member of this
+ // enumeration datatype.
+ void valueOf(const char* name, void *value) const;
+ void valueOf(const H5std_string& name, void *value) const;
- // Default constructor
- EnumType();
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("EnumType"); }
- // Creates an enumeration datatype using an existing id
- EnumType( const hid_t existing_id );
+ // Default constructor
+ EnumType();
- // Copy constructor: makes a copy of the original EnumType object.
- EnumType( const EnumType& original );
+ // Creates an enumeration datatype using an existing id
+ EnumType(const hid_t existing_id);
+
+ // Copy constructor: makes a copy of the original EnumType object.
+ EnumType(const EnumType& original);
+
+ virtual ~EnumType();
+
+}; // end of EnumType
+} // namespace H5
- virtual ~EnumType();
-};
-}
#endif // __H5EnumType_H
diff --git a/c++/src/H5Exception.cpp b/c++/src/H5Exception.cpp
index 2e23587..c9a1323 100644
--- a/c++/src/H5Exception.cpp
+++ b/c++/src/H5Exception.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -23,207 +21,207 @@ namespace H5 {
const char Exception::DEFAULT_MSG[] = "No detailed information provided";
//--------------------------------------------------------------------------
-// Function: Exception default constructor
-///\brief Default constructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception default constructor
+///\brief Default constructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
Exception::Exception() : detail_message(""), func_name("") {}
//--------------------------------------------------------------------------
-// Function: Exception overloaded constructor
-///\brief Creates an exception with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception overloaded constructor
+///\brief Creates an exception with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
Exception::Exception(const H5std_string& func, const H5std_string& message) : detail_message(message), func_name(func) {}
//--------------------------------------------------------------------------
-// Function: Exception copy constructor
-///\brief Copy constructor: makes a copy of the original Exception object.
-///\param orig - IN: Exception instance to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception copy constructor
+///\brief Copy constructor: makes a copy of the original Exception object.
+///\param orig - IN: Exception instance to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Exception::Exception( const Exception& orig ) : detail_message(orig.detail_message), func_name(orig.func_name) {}
+Exception::Exception(const Exception& orig) : detail_message(orig.detail_message), func_name(orig.func_name) {}
//--------------------------------------------------------------------------
-// Function: Exception::getMajorString
-///\brief Returns a text string that describes the error
-/// specified by a major error number.
-///\param err_major - IN: Major error number
-///\return Major error string
+// Function: Exception::getMajorString
+///\brief Returns a text string that describes the error
+/// specified by a major error number.
+///\param err_major - IN: Major error number
+///\return Major error string
///\par Description
-/// In the failure case, the string "Invalid major error number"
-/// will be returned.
-// Programmer Binh-Minh Ribler - 2000
+/// In the failure case, the string "Invalid major error number"
+/// will be returned.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5std_string Exception::getMajorString( hid_t err_major ) const
+H5std_string Exception::getMajorString(hid_t err_major) const
{
- // Preliminary call to H5Eget_msg() to get the length of the message
- ssize_t mesg_size = H5Eget_msg(err_major, NULL, NULL, 0);
-
- // If H5Eget_msg() returns a negative value, raise an exception,
- if( mesg_size < 0 )
- throw IdComponentException("Exception::getMajorString",
- "H5Eget_msg failed");
-
- // Call H5Eget_msg again to get the actual message
- char* mesg_C = new char[mesg_size+1]; // temporary C-string for C API
- mesg_size = H5Eget_msg(err_major, NULL, mesg_C, mesg_size+1);
-
- // Check for failure again
- if( mesg_size < 0 )
- {
- delete []mesg_C;
- throw IdComponentException("Exception::getMajorString",
- "H5Eget_msg failed");
- }
-
- // Convert the C error description and return
- H5std_string major_str(mesg_C);
- delete []mesg_C;
- return( major_str );
+ // Preliminary call to H5Eget_msg() to get the length of the message
+ ssize_t mesg_size = H5Eget_msg(err_major, NULL, NULL, 0);
+
+ // If H5Eget_msg() returns a negative value, raise an exception,
+ if (mesg_size < 0)
+ throw IdComponentException("Exception::getMajorString",
+ "H5Eget_msg failed");
+
+ // Call H5Eget_msg again to get the actual message
+ char* mesg_C = new char[mesg_size+1]; // temporary C-string for C API
+ mesg_size = H5Eget_msg(err_major, NULL, mesg_C, mesg_size+1);
+
+ // Check for failure again
+ if (mesg_size < 0)
+ {
+ delete []mesg_C;
+ throw IdComponentException("Exception::getMajorString",
+ "H5Eget_msg failed");
+ }
+
+ // Convert the C error description and return
+ H5std_string major_str(mesg_C);
+ delete []mesg_C;
+ return(major_str);
}
//--------------------------------------------------------------------------
-// Function: Exception::getMinorString
-///\brief Returns a text string that describes the error
-/// specified by a minor error number.
-///\param err_minor - IN: Minor error number
-///\return Minor error string
+// Function: Exception::getMinorString
+///\brief Returns a text string that describes the error
+/// specified by a minor error number.
+///\param err_minor - IN: Minor error number
+///\return Minor error string
///\par Description
-/// In the failure case, the string "Invalid minor error number"
-/// will be returned.
-// Programmer Binh-Minh Ribler - 2000
+/// In the failure case, the string "Invalid minor error number"
+/// will be returned.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5std_string Exception::getMinorString( hid_t err_minor ) const
+H5std_string Exception::getMinorString(hid_t err_minor) const
{
- // Preliminary call to H5Eget_msg() to get the length of the message
- ssize_t mesg_size = H5Eget_msg(err_minor, NULL, NULL, 0);
-
- // If H5Eget_msg() returns a negative value, raise an exception,
- if( mesg_size < 0 )
- throw IdComponentException("Exception::getMinorString",
- "H5Eget_msg failed");
-
- // Call H5Eget_msg again to get the actual message
- char* mesg_C = new char[mesg_size+1]; // temporary C-string for C API
- mesg_size = H5Eget_msg(err_minor, NULL, mesg_C, mesg_size+1);
-
- // Check for failure again
- if( mesg_size < 0 )
- {
- delete []mesg_C;
- throw IdComponentException("Exception::getMinorString",
- "H5Eget_msg failed");
- }
-
- // Convert the C error description and return
- H5std_string minor_str(mesg_C);
- delete []mesg_C;
- return( minor_str );
+ // Preliminary call to H5Eget_msg() to get the length of the message
+ ssize_t mesg_size = H5Eget_msg(err_minor, NULL, NULL, 0);
+
+ // If H5Eget_msg() returns a negative value, raise an exception,
+ if (mesg_size < 0)
+ throw IdComponentException("Exception::getMinorString",
+ "H5Eget_msg failed");
+
+ // Call H5Eget_msg again to get the actual message
+ char* mesg_C = new char[mesg_size+1]; // temporary C-string for C API
+ mesg_size = H5Eget_msg(err_minor, NULL, mesg_C, mesg_size+1);
+
+ // Check for failure again
+ if (mesg_size < 0)
+ {
+ delete []mesg_C;
+ throw IdComponentException("Exception::getMinorString",
+ "H5Eget_msg failed");
+ }
+
+ // Convert the C error description and return
+ H5std_string minor_str(mesg_C);
+ delete []mesg_C;
+ return(minor_str);
}
//--------------------------------------------------------------------------
-// Function: Exception::setAutoPrint
-///\brief Turns on the automatic error printing.
-///\param func - IN: Function to be called upon an error condition
-///\param client_data - IN: Data passed to the error function
+// Function: Exception::setAutoPrint
+///\brief Turns on the automatic error printing.
+///\param func - IN: Function to be called upon an error condition
+///\param client_data - IN: Data passed to the error function
///\par Description
-/// When the library is first initialized the auto printing
-/// function, \a func, is set to the C API \c H5Eprint and
-/// \a client_data is the standard error stream pointer, \c stderr.
-/// Automatic stack traversal is always in the \c H5E_WALK_DOWNWARD
-/// direction.
+/// When the library is first initialized the auto printing
+/// function, \a func, is set to the C API \c H5Eprint and
+/// \a client_data is the standard error stream pointer, \c stderr.
+/// Automatic stack traversal is always in the \c H5E_WALK_DOWNWARD
+/// direction.
///\par
-/// Users are encouraged to write their own more specific error
-/// handlers
-// Programmer Binh-Minh Ribler - 2000
+/// Users are encouraged to write their own more specific error
+/// handlers
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void Exception::setAutoPrint( H5E_auto2_t& func, void* client_data )
+void Exception::setAutoPrint(H5E_auto2_t& func, void* client_data)
{
- // calls the C API routine H5Eset_auto to set the auto printing to
- // the specified function.
- herr_t ret_value = H5Eset_auto2( H5E_DEFAULT, func, client_data );
- if( ret_value < 0 )
- throw Exception( "Exception::setAutoPrint", "H5Eset_auto failed" );
+ // calls the C API routine H5Eset_auto to set the auto printing to
+ // the specified function.
+ herr_t ret_value = H5Eset_auto2(H5E_DEFAULT, func, client_data);
+ if (ret_value < 0)
+ throw Exception("Exception::setAutoPrint", "H5Eset_auto failed");
}
//--------------------------------------------------------------------------
-// Function: Exception::dontPrint
-///\brief Turns off the automatic error printing from the C library.
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception::dontPrint
+///\brief Turns off the automatic error printing from the C library.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void Exception::dontPrint()
{
- // calls the C API routine H5Eset_auto with NULL parameters to turn
- // off the automatic error printing.
- herr_t ret_value = H5Eset_auto2( H5E_DEFAULT, NULL, NULL );
- if( ret_value < 0 )
- throw Exception( "Exception::dontPrint", "H5Eset_auto failed" );
+ // calls the C API routine H5Eset_auto with NULL parameters to turn
+ // off the automatic error printing.
+ herr_t ret_value = H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+ if (ret_value < 0)
+ throw Exception("Exception::dontPrint", "H5Eset_auto failed");
}
//--------------------------------------------------------------------------
-// Function: Exception::getAutoPrint
-///\brief Retrieves the current settings for the automatic error
-/// stack traversal function and its data.
-///\param func - OUT: Current setting for the function to be
-/// called upon an error condition
-///\param client_data - OUT: Current setting for the data passed to
-/// the error function
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception::getAutoPrint
+///\brief Retrieves the current settings for the automatic error
+/// stack traversal function and its data.
+///\param func - OUT: Current setting for the function to be
+/// called upon an error condition
+///\param client_data - OUT: Current setting for the data passed to
+/// the error function
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void Exception::getAutoPrint( H5E_auto2_t& func, void** client_data )
+void Exception::getAutoPrint(H5E_auto2_t& func, void** client_data)
{
- // calls the C API routine H5Eget_auto to get the current setting of
- // the automatic error printing
- herr_t ret_value = H5Eget_auto2( H5E_DEFAULT, &func, client_data );
- if( ret_value < 0 )
- throw Exception( "Exception::getAutoPrint", "H5Eget_auto failed" );
+ // calls the C API routine H5Eget_auto to get the current setting of
+ // the automatic error printing
+ herr_t ret_value = H5Eget_auto2(H5E_DEFAULT, &func, client_data);
+ if (ret_value < 0)
+ throw Exception("Exception::getAutoPrint", "H5Eget_auto failed");
}
//--------------------------------------------------------------------------
-// Function: Exception::clearErrorStack
-///\brief Clears the error stack for the current thread.
+// Function: Exception::clearErrorStack
+///\brief Clears the error stack for the current thread.
///\par Description
-/// The stack is also cleared whenever a C API function is
-/// called, with certain exceptions (for instance, \c H5Eprint).
-// Programmer Binh-Minh Ribler - 2000
+/// The stack is also cleared whenever a C API function is
+/// called, with certain exceptions (for instance, \c H5Eprint).
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void Exception::clearErrorStack()
{
- // calls the C API routine H5Eclear to clear the error stack
- herr_t ret_value = H5Eclear2(H5E_DEFAULT);
- if( ret_value < 0 )
- throw Exception( "Exception::clearErrorStack", "H5Eclear failed" );
+ // calls the C API routine H5Eclear to clear the error stack
+ herr_t ret_value = H5Eclear2(H5E_DEFAULT);
+ if (ret_value < 0)
+ throw Exception("Exception::clearErrorStack", "H5Eclear failed");
}
//--------------------------------------------------------------------------
-// Function: Exception::walkErrorStack
-///\brief Walks the error stack for the current thread, calling the
-/// specified function.
-///\param direction - IN: Direction in which the error stack is to be walked
-///\param func - IN: Function to be called for each error encountered
-///\param client_data - IN: Data passed to the error function
+// Function: Exception::walkErrorStack
+///\brief Walks the error stack for the current thread, calling the
+/// specified function.
+///\param direction - IN: Direction in which the error stack is to be walked
+///\param func - IN: Function to be called for each error encountered
+///\param client_data - IN: Data passed to the error function
///\par Description
-/// Valid values for \a direction include:
-/// \li \c H5E_WALK_UPWARD - begin with the most specific error
-/// and end at the API
-/// \li \c H5E_WALK_DOWNWARD - begin at the API and end at the
-/// inner-most function where the error was first detected
+/// Valid values for \a direction include:
+/// \li \c H5E_WALK_UPWARD - begin with the most specific error
+/// and end at the API
+/// \li \c H5E_WALK_DOWNWARD - begin at the API and end at the
+/// inner-most function where the error was first detected
///\par
-/// The function specified by \a func will be called for each
-/// error in the error stack. The \c H5E_walk_t prototype is as
-/// follows:
+/// The function specified by \a func will be called for each
+/// error in the error stack. The \c H5E_walk_t prototype is as
+/// follows:
///\code
/// typedef herr_t (*H5E_walk_t)(int n, H5E_error_t *err_desc, void *client_data)
/// int n - Indexed position of the error in the stack; it begins at zero
-/// regardless of stack traversal direction
+/// regardless of stack traversal direction
/// H5E_error_t *err_desc - Pointer to a data structure describing the
-/// error. This structure is listed below.
+/// error. This structure is listed below.
/// void *client_data - Pointer to client data in the format expected by
-/// the user-defined function.
+/// the user-defined function.
///\endcode
///\par
/// Data structure to describe the error:
@@ -238,87 +236,87 @@ void Exception::clearErrorStack()
/// const char *desc; //optional supplied description
/// } H5E_error2_t;
///\endcode
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void Exception::walkErrorStack( H5E_direction_t direction, H5E_walk2_t func, void* client_data )
+void Exception::walkErrorStack(H5E_direction_t direction, H5E_walk2_t func, void* client_data)
{
- // calls the C API routine H5Ewalk to walk the error stack
- herr_t ret_value = H5Ewalk2( H5E_DEFAULT, direction, func, client_data );
- if( ret_value < 0 )
- throw Exception( "Exception::walkErrorStack", "H5Ewalk failed" );
+ // calls the C API routine H5Ewalk to walk the error stack
+ herr_t ret_value = H5Ewalk2(H5E_DEFAULT, direction, func, client_data);
+ if (ret_value < 0)
+ throw Exception("Exception::walkErrorStack", "H5Ewalk failed");
}
//--------------------------------------------------------------------------
-// Function: Exception::getDetailMsg
-///\brief Returns the detailed message set at the time the exception
-/// is thrown.
-///\return Text message - \c H5std_string
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception::getDetailMsg
+///\brief Returns the detailed message set at the time the exception
+/// is thrown.
+///\return Text message - \c H5std_string
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5std_string Exception::getDetailMsg() const
{
- return(detail_message);
+ return(detail_message);
}
//--------------------------------------------------------------------------
-// Function: Exception::getCDetailMsg
-///\brief Returns the detailed message set at the time the exception
-/// is thrown.
-///\return Text message - \c char pointer
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception::getCDetailMsg
+///\brief Returns the detailed message set at the time the exception
+/// is thrown.
+///\return Text message - \c char pointer
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
const char* Exception::getCDetailMsg() const
{
- return(detail_message.c_str());
+ return(detail_message.c_str());
}
//--------------------------------------------------------------------------
-// Function: Exception::getFuncName
-///\brief Returns the name of the function, where the exception is thrown.
-///\return Text message - \c H5std_string
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception::getFuncName
+///\brief Returns the name of the function, where the exception is thrown.
+///\return Text message - \c H5std_string
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5std_string Exception::getFuncName() const
{
- return(func_name);
+ return(func_name);
}
//--------------------------------------------------------------------------
-// Function: Exception::getCFuncName
-///\brief Returns the name of the function, where the exception is thrown.
-///\return Text message - \c char pointer
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception::getCFuncName
+///\brief Returns the name of the function, where the exception is thrown.
+///\return Text message - \c char pointer
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
const char* Exception::getCFuncName() const
{
- return(func_name.c_str());
+ return(func_name.c_str());
}
//--------------------------------------------------------------------------
-// Function: Exception::printErrorStack (static)
-///\brief Prints the error stack in a default manner.
-///\param stream - IN: File pointer, default to stderr
-///\param err_stack - IN: Error stack ID, default to H5E_DEFAULT(0)
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception::printErrorStack (static)
+///\brief Prints the error stack in a default manner.
+///\param stream - IN: File pointer, default to stderr
+///\param err_stack - IN: Error stack ID, default to H5E_DEFAULT(0)
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void Exception::printErrorStack(FILE* stream, hid_t err_stack)
{
herr_t ret_value = H5Eprint2(err_stack, stream);
- if( ret_value < 0 )
- throw Exception( "Printing error stack", "H5Eprint2 failed" );
+ if (ret_value < 0)
+ throw Exception("Printing error stack", "H5Eprint2 failed");
}
//--------------------------------------------------------------------------
-// Function: Exception::printError
-// Purpose: Prints the error stack in a default manner. This member
-// function is replaced by the static function printErrorStack
-// and will be removed from the next major release.
-// Parameter stream - IN: File pointer
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception::printError
+// Purpose: Prints the error stack in a default manner. This member
+// function is replaced by the static function printErrorStack
+// and will be removed from the next major release.
+// Parameter stream - IN: File pointer
+// Programmer Binh-Minh Ribler - 2000
// Description:
-// This function can be removed in next major release.
-// -BMR, 2014/04/24
-// Removed from documentation. -BMR, 2016/03/23
+// This function can be removed in next major release.
+// -BMR, 2014/04/24
+// Removed from documentation. -BMR, 2016/03/23
//--------------------------------------------------------------------------
void Exception::printError(FILE* stream) const
{
@@ -326,262 +324,285 @@ void Exception::printError(FILE* stream) const
}
//--------------------------------------------------------------------------
-// Function: Exception destructor
-///\brief Noop destructor
-// Programmer Binh-Minh Ribler - 2000
+// Function: Exception destructor
+///\brief Noop destructor
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
Exception::~Exception() throw() {}
//--------------------------------------------------------------------------
-// Subclass: FileIException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: FileIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: FileIException default constructor
-///\brief Default constructor.
+// Function: FileIException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
FileIException::FileIException():Exception(){}
//--------------------------------------------------------------------------
-// Function: FileIException overloaded constructor
-///\brief Creates a FileIException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: FileIException overloaded constructor
+///\brief Creates a FileIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
FileIException::FileIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: FileIException destructor
-///\brief Noop destructor.
+// Function: FileIException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
FileIException::~FileIException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: GroupIException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: GroupIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: GroupIException default constructor
-///\brief Default constructor.
+// Function: GroupIException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
GroupIException::GroupIException():Exception(){}
//--------------------------------------------------------------------------
-// Function: GroupIException overloaded constructor
-///\brief Creates a GroupIException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: GroupIException overloaded constructor
+///\brief Creates a GroupIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
GroupIException::GroupIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: GroupIException destructor
-///\brief Noop destructor.
+// Function: GroupIException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
GroupIException::~GroupIException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: DataSpaceIException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: DataSpaceIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: DataSpaceIException default constructor
-///\brief Default constructor.
+// Function: DataSpaceIException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
DataSpaceIException::DataSpaceIException():Exception(){}
//--------------------------------------------------------------------------
-// Function: DataSpaceIException overloaded constructor
-///\brief Creates a DataSpaceIException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: DataSpaceIException overloaded constructor
+///\brief Creates a DataSpaceIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
DataSpaceIException::DataSpaceIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: DataSpaceIException destructor
-///\brief Noop destructor.
+// Function: DataSpaceIException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
DataSpaceIException::~DataSpaceIException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: DataTypeIException default constructor
-///\brief Default constructor.
+// Function: DataTypeIException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
DataTypeIException::DataTypeIException():Exception(){}
//--------------------------------------------------------------------------
-// Function: DataTypeIException overloaded constructor
-///\brief Creates a DataTypeIException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: DataTypeIException overloaded constructor
+///\brief Creates a DataTypeIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
DataTypeIException::DataTypeIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: DataTypeIException destructor
-///\brief Noop destructor.
+// Function: DataTypeIException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
DataTypeIException::~DataTypeIException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: ObjHeaderIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: PropListIException default constructor
-///\brief Default constructor.
+// Function: ObjHeaderIException default constructor
+///\brief Default constructor.
+//--------------------------------------------------------------------------
+ObjHeaderIException::ObjHeaderIException():Exception(){}
+//--------------------------------------------------------------------------
+// Function: ObjHeaderIException overloaded constructor
+///\brief Creates an ObjHeaderIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
+//--------------------------------------------------------------------------
+ObjHeaderIException::ObjHeaderIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
+//--------------------------------------------------------------------------
+// Function: ObjHeaderIException destructor
+///\brief Noop destructor.
+//--------------------------------------------------------------------------
+ObjHeaderIException::~ObjHeaderIException() throw() {}
+
+//--------------------------------------------------------------------------
+// Subclass: PropListIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+//--------------------------------------------------------------------------
+// Function: PropListIException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
PropListIException::PropListIException():Exception(){}
//--------------------------------------------------------------------------
-// Function: PropListIException overloaded constructor
-///\brief Creates a PropListIException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: PropListIException overloaded constructor
+///\brief Creates a PropListIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
PropListIException::PropListIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: PropListIException destructor
-///\brief Noop destructor.
+// Function: PropListIException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
PropListIException::~PropListIException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: DataSetIException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: DataSetIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: DataSetIException default constructor
-///\brief Default constructor.
+// Function: DataSetIException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
DataSetIException::DataSetIException():Exception(){}
//--------------------------------------------------------------------------
-// Function: DataSetIException overloaded constructor
-///\brief Creates a DataSetIException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: DataSetIException overloaded constructor
+///\brief Creates a DataSetIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
DataSetIException::DataSetIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: DataSetIException destructor
-///\brief Noop destructor.
+// Function: DataSetIException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
DataSetIException::~DataSetIException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: AttributeIException default constructor
-///\brief Default constructor.
+// Function: AttributeIException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
AttributeIException::AttributeIException():Exception(){}
//--------------------------------------------------------------------------
-// Function: AttributeIException overloaded constructor
-///\brief Creates an AttributeIException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: AttributeIException overloaded constructor
+///\brief Creates an AttributeIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
AttributeIException::AttributeIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: AttributeIException destructor
-///\brief Noop destructor.
+// Function: AttributeIException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
AttributeIException::~AttributeIException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: ReferenceException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: ReferenceException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: ReferenceException default constructor
-///\brief Default constructor.
+// Function: ReferenceException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
ReferenceException::ReferenceException():Exception(){}
//--------------------------------------------------------------------------
-// Function: ReferenceException overloaded constructor
-///\brief Creates a ReferenceException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: ReferenceException overloaded constructor
+///\brief Creates a ReferenceException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
ReferenceException::ReferenceException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: ReferenceException destructor
-///\brief Noop destructor.
+// Function: ReferenceException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
ReferenceException::~ReferenceException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: LibraryIException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: LibraryIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: LibraryIException default constructor
-///\brief Default constructor.
+// Function: LibraryIException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
LibraryIException::LibraryIException():Exception(){}
//--------------------------------------------------------------------------
-// Function: LibraryIException overloaded constructor
-///\brief Creates a LibraryIException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: LibraryIException overloaded constructor
+///\brief Creates a LibraryIException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
LibraryIException::LibraryIException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: LibraryIException destructor
-///\brief Noop destructor.
+// Function: LibraryIException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
LibraryIException::~LibraryIException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: LocationException
-// Programmer Binh-Minh Ribler - 2014
+// Subclass: LocationException
+// Programmer Binh-Minh Ribler - 2014
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: LocationException default constructor
-///\brief Default constructor.
+// Function: LocationException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
LocationException::LocationException():Exception(){}
//--------------------------------------------------------------------------
-// Function: LocationException overloaded constructor
-///\brief Creates a LocationException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: LocationException overloaded constructor
+///\brief Creates a LocationException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
LocationException::LocationException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: LocationException destructor
-///\brief Noop destructor.
+// Function: LocationException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
LocationException::~LocationException() throw() {}
//--------------------------------------------------------------------------
-// Subclass: IdComponentException
-// Programmer Binh-Minh Ribler - 2000
+// Subclass: IdComponentException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
//--------------------------------------------------------------------------
-// Function: IdComponentException default constructor
-///\brief Default constructor.
+// Function: IdComponentException default constructor
+///\brief Default constructor.
//--------------------------------------------------------------------------
IdComponentException::IdComponentException(): Exception() {}
//--------------------------------------------------------------------------
-// Function: IdComponentException overloaded constructor
-///\brief Creates a IdComponentException with the name of the function,
-/// in which the failure occurs, and an optional detailed message.
-///\param func - IN: Name of the function where failure occurs
-///\param message - IN: Message on the failure
+// Function: IdComponentException overloaded constructor
+///\brief Creates a IdComponentException with the name of the function,
+/// in which the failure occurs, and an optional detailed message.
+///\param func - IN: Name of the function where failure occurs
+///\param message - IN: Message on the failure
//--------------------------------------------------------------------------
IdComponentException::IdComponentException(const H5std_string& func, const H5std_string& message) : Exception(func, message) {}
//--------------------------------------------------------------------------
-// Function: IdComponentException destructor
-///\brief Noop destructor.
+// Function: IdComponentException destructor
+///\brief Noop destructor.
//--------------------------------------------------------------------------
IdComponentException::~IdComponentException() throw() {}
diff --git a/c++/src/H5Exception.h b/c++/src/H5Exception.h
index da9066a..2cc2dd6 100644
--- a/c++/src/H5Exception.h
+++ b/c++/src/H5Exception.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5Exception_H
@@ -33,142 +31,149 @@ namespace H5 {
*/
class H5_DLLCPP Exception {
public:
- // Creates an exception with a function name where the failure occurs
- // and an optional detailed message
- Exception(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ // Creates an exception with a function name where the failure occurs
+ // and an optional detailed message
+ Exception(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- // Returns a character string that describes the error specified by
- // a major error number.
- H5std_string getMajorString( hid_t err_major_id ) const;
+ // Returns a character string that describes the error specified by
+ // a major error number.
+ H5std_string getMajorString(hid_t err_major_id) const;
- // Returns a character string that describes the error specified by
- // a minor error number.
- H5std_string getMinorString( hid_t err_minor_id ) const;
+ // Returns a character string that describes the error specified by
+ // a minor error number.
+ H5std_string getMinorString(hid_t err_minor_id) const;
- // Returns the detailed message set at the time the exception is thrown
- H5std_string getDetailMsg() const;
- const char* getCDetailMsg() const; // C string of detailed message
- H5std_string getFuncName() const; // function name as a string object
- const char* getCFuncName() const; // function name as a char string
+ // Returns the detailed message set at the time the exception is thrown
+ H5std_string getDetailMsg() const;
+ const char* getCDetailMsg() const; // C string of detailed message
+ H5std_string getFuncName() const; // function name as a string object
+ const char* getCFuncName() const; // function name as a char string
- // Turns on the automatic error printing.
- static void setAutoPrint( H5E_auto2_t& func, void* client_data);
+ // Turns on the automatic error printing.
+ static void setAutoPrint(H5E_auto2_t& func, void* client_data);
- // Turns off the automatic error printing.
- static void dontPrint();
+ // Turns off the automatic error printing.
+ static void dontPrint();
- // Retrieves the current settings for the automatic error stack
- // traversal function and its data.
- static void getAutoPrint( H5E_auto2_t& func, void** client_data);
+ // Retrieves the current settings for the automatic error stack
+ // traversal function and its data.
+ static void getAutoPrint(H5E_auto2_t& func, void** client_data);
- // Clears the error stack for the current thread.
- static void clearErrorStack();
+ // Clears the error stack for the current thread.
+ static void clearErrorStack();
- // Walks the error stack for the current thread, calling the
- // specified function.
- static void walkErrorStack( H5E_direction_t direction,
- H5E_walk2_t func, void* client_data);
+ // Walks the error stack for the current thread, calling the
+ // specified function.
+ static void walkErrorStack(H5E_direction_t direction,
+ H5E_walk2_t func, void* client_data);
- // Prints the error stack in a default manner.
- static void printErrorStack(FILE* stream = stderr,
- hid_t err_stack = H5E_DEFAULT); // Static
- virtual void printError(FILE* stream = NULL) const;
+ // Prints the error stack in a default manner.
+ static void printErrorStack(FILE* stream = stderr,
+ hid_t err_stack = H5E_DEFAULT);
+ virtual void printError(FILE* stream = NULL) const;
- // Default constructor
- Exception();
+ // Default constructor
+ Exception();
- // copy constructor
- Exception( const Exception& orig);
+ // copy constructor
+ Exception(const Exception& orig);
- // virtual Destructor
- virtual ~Exception() throw();
+ // virtual Destructor
+ virtual ~Exception() throw();
protected:
- // Default value for detail_message
- static const char DEFAULT_MSG[];
+ // Default value for detail_message
+ static const char DEFAULT_MSG[];
private:
- H5std_string detail_message;
- H5std_string func_name;
+ H5std_string detail_message;
+ H5std_string func_name;
};
class H5_DLLCPP FileIException : public Exception {
public:
- FileIException( const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- FileIException();
- virtual ~FileIException() throw();
+ FileIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ FileIException();
+ virtual ~FileIException() throw();
};
class H5_DLLCPP GroupIException : public Exception {
public:
- GroupIException( const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- GroupIException();
- virtual ~GroupIException() throw();
+ GroupIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ GroupIException();
+ virtual ~GroupIException() throw();
};
class H5_DLLCPP DataSpaceIException : public Exception {
public:
- DataSpaceIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- DataSpaceIException();
- virtual ~DataSpaceIException() throw();
+ DataSpaceIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ DataSpaceIException();
+ virtual ~DataSpaceIException() throw();
};
class H5_DLLCPP DataTypeIException : public Exception {
public:
- DataTypeIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- DataTypeIException();
- virtual ~DataTypeIException() throw();
+ DataTypeIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ DataTypeIException();
+ virtual ~DataTypeIException() throw();
+};
+
+class H5_DLLCPP ObjHeaderIException : public Exception {
+ public:
+ ObjHeaderIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ ObjHeaderIException();
+ virtual ~ObjHeaderIException() throw();
};
class H5_DLLCPP PropListIException : public Exception {
public:
- PropListIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- PropListIException();
- virtual ~PropListIException() throw();
+ PropListIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ PropListIException();
+ virtual ~PropListIException() throw();
};
class H5_DLLCPP DataSetIException : public Exception {
public:
- DataSetIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- DataSetIException();
- virtual ~DataSetIException() throw();
+ DataSetIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ DataSetIException();
+ virtual ~DataSetIException() throw();
};
class H5_DLLCPP AttributeIException : public Exception {
public:
- AttributeIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- AttributeIException();
- virtual ~AttributeIException() throw();
+ AttributeIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ AttributeIException();
+ virtual ~AttributeIException() throw();
};
class H5_DLLCPP ReferenceException : public Exception {
public:
- ReferenceException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- ReferenceException();
- virtual ~ReferenceException() throw();
+ ReferenceException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ ReferenceException();
+ virtual ~ReferenceException() throw();
};
class H5_DLLCPP LibraryIException : public Exception {
public:
- LibraryIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- LibraryIException();
- virtual ~LibraryIException() throw();
+ LibraryIException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ LibraryIException();
+ virtual ~LibraryIException() throw();
};
class H5_DLLCPP LocationException : public Exception {
public:
- LocationException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- LocationException();
- virtual ~LocationException() throw();
+ LocationException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ LocationException();
+ virtual ~LocationException() throw();
};
class H5_DLLCPP IdComponentException : public Exception {
public:
- IdComponentException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
- IdComponentException();
- virtual ~IdComponentException() throw();
-};
+ IdComponentException(const H5std_string& func_name, const H5std_string& message = DEFAULT_MSG);
+ IdComponentException();
+ virtual ~IdComponentException() throw();
-}
+}; // end of IdComponentException
+} // namespace H5
#endif // __H5Exception_H
diff --git a/c++/src/H5FaccProp.cpp b/c++/src/H5FaccProp.cpp
index c3919da..b414294 100644
--- a/c++/src/H5FaccProp.cpp
+++ b/c++/src/H5FaccProp.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -63,7 +61,7 @@ FileAccPropList* FileAccPropList::getConstant()
//--------------------------------------------------------------------------
// Function: FileAccPropList::deleteConstants
-// Purpose: Deletes the constant object that FileAccPropList::DEFAULT_
+// Purpose Deletes the constant object that FileAccPropList::DEFAULT_
// points to.
// exception H5::PropListIException
// Programmer Binh-Minh Ribler - 2015
@@ -75,271 +73,271 @@ void FileAccPropList::deleteConstants()
}
//--------------------------------------------------------------------------
-// Purpose: Constant for default property
+// Purpose Constant for default property
//--------------------------------------------------------------------------
const FileAccPropList& FileAccPropList::DEFAULT = *getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: Default Constructor
-///\brief Creates a file access property list
-// Programmer: Binh-Minh Ribler - 2000
+// Function: Default Constructor
+///\brief Creates a file access property list
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-FileAccPropList::FileAccPropList() : PropList( H5P_FILE_ACCESS ) {}
+FileAccPropList::FileAccPropList() : PropList(H5P_FILE_ACCESS) {}
//--------------------------------------------------------------------------
-// Function: FileAccPropList copy constructor
-///\brief Copy Constructor: makes a copy of the original
-///\param original - IN: FileAccPropList instance to copy
-// Programmer: Binh-Minh Ribler - 2000
+// Function: FileAccPropList copy constructor
+///\brief Copy Constructor: makes a copy of the original
+///\param original - IN: FileAccPropList instance to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FileAccPropList::FileAccPropList(const FileAccPropList& original) : PropList(original) {}
//--------------------------------------------------------------------------
-// Function: FileAccPropList overloaded constructor
-///\brief Creates a file access property list using the id of an
-/// existing one.
-// Programmer: Binh-Minh Ribler - 2000
+// Function: FileAccPropList overloaded constructor
+///\brief Creates a file access property list using the id of an
+/// existing one.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FileAccPropList::FileAccPropList(const hid_t plist_id) : PropList(plist_id) {}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setStdio
-///\brief Modifies this property list to use the \c H5FD_STDIO driver.
+// Function: FileAccPropList::setStdio
+///\brief Modifies this property list to use the \c H5FD_STDIO driver.
///
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setStdio() const
{
- herr_t ret_value = H5Pset_fapl_stdio(id);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setStdio", "H5Pset_fapl_stdio failed");
- }
+ herr_t ret_value = H5Pset_fapl_stdio(id);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setStdio", "H5Pset_fapl_stdio failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getDriver
-///\brief Return the ID of the low-level file driver.
-///\return A low-level driver ID which is the same ID used when the
-/// driver was set for the property list. The driver ID is
-/// only valid as long as the file driver remains registered.
-/// Valid driver identifiers can be found at:
+// Function: FileAccPropList::getDriver
+///\brief Return the ID of the low-level file driver.
+///\return A low-level driver ID which is the same ID used when the
+/// driver was set for the property list. The driver ID is
+/// only valid as long as the file driver remains registered.
+/// Valid driver identifiers can be found at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-GetDriver
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
hid_t FileAccPropList::getDriver() const
{
- hid_t driver = H5Pget_driver(id);
- if (driver < 0)
- {
- throw PropListIException("FileAccPropList::getDriver", "H5Pget_driver failed");
- }
+ hid_t driver = H5Pget_driver(id);
+ if (driver < 0)
+ {
+ throw PropListIException("FileAccPropList::getDriver", "H5Pget_driver failed");
+ }
return(driver);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setDriver
-///\brief Set file driver for this property list.
-///\param new_driver_id - IN: File driver
-///\param new_driver_info - IN: Struct containing the driver-specific properites
-///\exception H5::PropListIException
+// Function: FileAccPropList::setDriver
+///\brief Set file driver for this property list.
+///\param new_driver_id - IN: File driver
+///\param new_driver_info - IN: Struct containing the driver-specific properites
+///\exception H5::PropListIException
///\par Description
-/// For a list of valid driver identifiers, please see the C
-/// layer Reference Manual at:
+/// For a list of valid driver identifiers, please see the C
+/// layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetDriver
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setDriver(hid_t new_driver_id, const void *new_driver_info) const
{
- herr_t ret_value = H5Pset_driver(id, new_driver_id, new_driver_info);
- if (ret_value < 0)
- {
- throw PropListIException("FileAccPropList::setDriver", "H5Pset_driver failed");
- }
+ herr_t ret_value = H5Pset_driver(id, new_driver_id, new_driver_info);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setDriver", "H5Pset_driver failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setFamilyOffset
-///\brief Sets offset for family driver.
-///\param offset - IN: offset value
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::setFamilyOffset
+///\brief Sets offset for family driver.
+///\param offset - IN: offset value
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setFamilyOffset(hsize_t offset) const
{
- herr_t ret_value = H5Pset_family_offset(id, offset);
- if (ret_value < 0)
- {
- throw PropListIException("FileAccPropList::setFamilyOffset", "H5Pset_family_offset failed");
- }
+ herr_t ret_value = H5Pset_family_offset(id, offset);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setFamilyOffset", "H5Pset_family_offset failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getFamilyOffset
-///\brief Get offset for family driver.
-///\return Offset for family driver
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::getFamilyOffset
+///\brief Get offset for family driver.
+///\return Offset for family driver
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
hsize_t FileAccPropList::getFamilyOffset() const
{
- hsize_t offset;
- herr_t ret_value = H5Pget_family_offset(id, &offset);
- if (ret_value < 0)
- {
- throw PropListIException("FileAccPropList::getFamilyOffset", "H5Pget_family_offset failed");
- }
+ hsize_t offset;
+ herr_t ret_value = H5Pget_family_offset(id, &offset);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getFamilyOffset", "H5Pget_family_offset failed");
+ }
return(offset);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setCore
-///\brief Modifies this file access property list to use the \c H5FD_CORE
-/// driver.
-///\param increment - IN: Specifies how much memory to increase each
-/// time more memory is needed, in bytes
-///\param backing_store - IN: Indicating whether to write the file
-/// contents to disk when the file is closed
-///\exception H5::PropListIException
+// Function: FileAccPropList::setCore
+///\brief Modifies this file access property list to use the \c H5FD_CORE
+/// driver.
+///\param increment - IN: Specifies how much memory to increase each
+/// time more memory is needed, in bytes
+///\param backing_store - IN: Indicating whether to write the file
+/// contents to disk when the file is closed
+///\exception H5::PropListIException
///\par Description
-/// For more details on the use of \c H5FD_CORE driver, please
-/// refer to
+/// For more details on the use of \c H5FD_CORE driver, please
+/// refer to
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetFaplCore
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setCore (size_t increment, hbool_t backing_store) const
{
- herr_t ret_value = H5Pset_fapl_core (id, increment, backing_store);
- if (ret_value < 0)
- {
- throw PropListIException ("FileAccPropList::setCore", "H5Pset_fapl_core failed");
- }
+ herr_t ret_value = H5Pset_fapl_core (id, increment, backing_store);
+ if (ret_value < 0)
+ {
+ throw PropListIException ("FileAccPropList::setCore", "H5Pset_fapl_core failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getCore
-///\brief Queries core file driver properties.
-///\param increment - OUT: Size of memory increment, in bytes
-///\param backing_store - OUT: Indicating whether to write the file
-/// contents to disk when the file is closed
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::getCore
+///\brief Queries core file driver properties.
+///\param increment - OUT: Size of memory increment, in bytes
+///\param backing_store - OUT: Indicating whether to write the file
+/// contents to disk when the file is closed
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::getCore (size_t& increment, hbool_t& backing_store) const
{
- herr_t ret_value = H5Pget_fapl_core(id, &increment, &backing_store);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getCore", "H5Pget_fapl_core failed");
- }
+ herr_t ret_value = H5Pget_fapl_core(id, &increment, &backing_store);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getCore", "H5Pget_fapl_core failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setFamily
-///\brief Sets this file access property list to use the family driver.
-///\param memb_size - IN: Size in bytes of each file member
-///\param memb_plist - IN: File access property list to be used for
-/// each family member
-///\exception H5::PropListIException
+// Function: FileAccPropList::setFamily
+///\brief Sets this file access property list to use the family driver.
+///\param memb_size - IN: Size in bytes of each file member
+///\param memb_plist - IN: File access property list to be used for
+/// each family member
+///\exception H5::PropListIException
///\par Description
-/// Note that \a memb_size is used only when creating a new file.
-// Programmer: Binh-Minh Ribler - April, 2004
+/// Note that \a memb_size is used only when creating a new file.
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
-void FileAccPropList::setFamily( hsize_t memb_size, const FileAccPropList& memb_plist ) const
+void FileAccPropList::setFamily(hsize_t memb_size, const FileAccPropList& memb_plist) const
{
- herr_t ret_value = H5Pset_fapl_family (id, memb_size, memb_plist.getId() );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setFamily", "H5Pset_fapl_family failed");
- }
+ herr_t ret_value = H5Pset_fapl_family (id, memb_size, memb_plist.getId());
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setFamily", "H5Pset_fapl_family failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getFamily
-///\brief Returns information about the family file access property
-/// list.
-///\param memb_size - OUT: Size in bytes of each file member
-///\param memb_plist - OUT: Retrieved file access property list for each
-/// file member
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::getFamily
+///\brief Returns information about the family file access property
+/// list.
+///\param memb_size - OUT: Size in bytes of each file member
+///\param memb_plist - OUT: Retrieved file access property list for each
+/// file member
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::getFamily(hsize_t& memb_size, FileAccPropList& memb_plist) const
{
- hid_t memb_plist_id;
- herr_t ret_value = H5Pget_fapl_family( id, &memb_size, &memb_plist_id );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getFamily", "H5Pget_fapl_family failed");
- }
- memb_plist.p_setId(memb_plist_id);
+ hid_t memb_plist_id;
+ herr_t ret_value = H5Pget_fapl_family(id, &memb_size, &memb_plist_id);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getFamily", "H5Pget_fapl_family failed");
+ }
+ memb_plist.p_setId(memb_plist_id);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getFamily
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts and its return value.
-///\param memb_size - OUT: Size in bytes of each file member
-///\return The file access property list for each file member
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::getFamily
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts and its return value.
+///\param memb_size - OUT: Size in bytes of each file member
+///\return The file access property list for each file member
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
FileAccPropList FileAccPropList::getFamily(hsize_t& memb_size) const
{
- hid_t memb_plist_id;
- herr_t ret_value = H5Pget_fapl_family( id, &memb_size, &memb_plist_id );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getFamily", "H5Pget_fapl_family failed");
- }
- FileAccPropList memb_plist(memb_plist_id);
- return(memb_plist);
-}
-
-//--------------------------------------------------------------------------
-// Function: FileAccPropList::setSplit
-///\brief Emulates the old split file driver, which stored meta data
-/// in one file and raw data in another file.
-///\param meta_plist - IN: File access plist for the metadata file
-///\param raw_plist - IN: File access plist for the raw data file
-///\param meta_ext - IN: Metadata filename extension as \c char*
-///\param raw_ext - IN: Raw data filename extension as \c char*
-///\exception H5::PropListIException
+ hid_t memb_plist_id;
+ herr_t ret_value = H5Pget_fapl_family(id, &memb_size, &memb_plist_id);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getFamily", "H5Pget_fapl_family failed");
+ }
+ FileAccPropList memb_plist(memb_plist_id);
+ return(memb_plist);
+}
+
+//--------------------------------------------------------------------------
+// Function: FileAccPropList::setSplit
+///\brief Emulates the old split file driver, which stored meta data
+/// in one file and raw data in another file.
+///\param meta_plist - IN: File access plist for the metadata file
+///\param raw_plist - IN: File access plist for the raw data file
+///\param meta_ext - IN: Metadata filename extension as \c char*
+///\param raw_ext - IN: Raw data filename extension as \c char*
+///\exception H5::PropListIException
///\par Description
-/// Temporary - For information, please refer to:
+/// Temporary - For information, please refer to:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetFaplSplit
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
-void FileAccPropList::setSplit(const FileAccPropList& meta_plist, const FileAccPropList& raw_plist, const char* meta_ext, const char* raw_ext ) const
-{
- hid_t meta_pid = meta_plist.getId();
- hid_t raw_pid = raw_plist.getId();
- herr_t ret_value = H5Pset_fapl_split( id, meta_ext, meta_pid, raw_ext, raw_pid );
- if( ret_value < 0 )
+void FileAccPropList::setSplit(const FileAccPropList& meta_plist, const FileAccPropList& raw_plist, const char* meta_ext, const char* raw_ext) const
{
- throw PropListIException("FileAccPropList::setSplit", "H5Pset_fapl_split failed");
- }
+ hid_t meta_pid = meta_plist.getId();
+ hid_t raw_pid = raw_plist.getId();
+ herr_t ret_value = H5Pset_fapl_split(id, meta_ext, meta_pid, raw_ext, raw_pid);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setSplit", "H5Pset_fapl_split failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setSplit
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes character arguments as \c H5std_string.
-///\param meta_plist - IN: File access plist for the metadata file
-///\param raw_plist - IN: File access plist for the raw data file
-///\param meta_ext - IN: Metadata filename extension as \c H5std_string
-///\param raw_ext - IN: Raw data filename extension as \c H5std_string
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::setSplit
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes character arguments as \c H5std_string.
+///\param meta_plist - IN: File access plist for the metadata file
+///\param raw_plist - IN: File access plist for the raw data file
+///\param meta_ext - IN: Metadata filename extension as \c H5std_string
+///\param raw_ext - IN: Raw data filename extension as \c H5std_string
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
-void FileAccPropList::setSplit(const FileAccPropList& meta_plist, const FileAccPropList& raw_plist, const H5std_string& meta_ext, const H5std_string& raw_ext ) const
+void FileAccPropList::setSplit(const FileAccPropList& meta_plist, const FileAccPropList& raw_plist, const H5std_string& meta_ext, const H5std_string& raw_ext) const
{
- setSplit( meta_plist, raw_plist, meta_ext.c_str(), raw_ext.c_str() );
+ setSplit(meta_plist, raw_plist, meta_ext.c_str(), raw_ext.c_str());
}
// Stream Virtual File Driver had been removed from the main library.
@@ -347,402 +345,402 @@ void FileAccPropList::setSplit(const FileAccPropList& meta_plist, const FileAccP
// -BMR, March, 2012
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getSieveBufSize
-///\brief Returns the current settings for the data sieve buffer size
-/// property from this property list.
-///\return Data sieve buffer size, in bytes
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::getSieveBufSize
+///\brief Returns the current settings for the data sieve buffer size
+/// property from this property list.
+///\return Data sieve buffer size, in bytes
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
size_t FileAccPropList::getSieveBufSize() const
{
- size_t bufsize;
- herr_t ret_value = H5Pget_sieve_buf_size(id, &bufsize);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getSieveBufSize", "H5Pget_sieve_buf_size failed");
- }
- return(bufsize);
+ size_t bufsize;
+ herr_t ret_value = H5Pget_sieve_buf_size(id, &bufsize);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getSieveBufSize", "H5Pget_sieve_buf_size failed");
+ }
+ return(bufsize);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setSieveBufSize
-///\brief Sets the maximum size of the data sieve buffer.
-///\param bufsize - IN: Maximum size, in bytes, of data sieve buffer
-///\exception H5::PropListIException
+// Function: FileAccPropList::setSieveBufSize
+///\brief Sets the maximum size of the data sieve buffer.
+///\param bufsize - IN: Maximum size, in bytes, of data sieve buffer
+///\exception H5::PropListIException
///\par Description
-/// For detail on data sieving, please refer to
+/// For detail on data sieving, please refer to
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetSieveBufSize
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setSieveBufSize(size_t bufsize) const
{
- herr_t ret_value = H5Pset_sieve_buf_size(id, bufsize);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getSieveBufSize", "H5Pget_sieve_buf_size failed");
- }
+ herr_t ret_value = H5Pset_sieve_buf_size(id, bufsize);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getSieveBufSize", "H5Pget_sieve_buf_size failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setMetaBlockSize
-///\brief Sets the minimum size of metadata block allocations.
-///\param block_size - IN: Minimum size, in bytes, of metadata
-/// block allocations
-///\exception H5::PropListIException
+// Function: FileAccPropList::setMetaBlockSize
+///\brief Sets the minimum size of metadata block allocations.
+///\param block_size - IN: Minimum size, in bytes, of metadata
+/// block allocations
+///\exception H5::PropListIException
///\par Description
-/// For more detail, please see the C layer Reference Manual at:
+/// For more detail, please see the C layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetMetaBlockSize
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setMetaBlockSize(hsize_t &block_size) const
{
- herr_t ret_value = H5Pset_meta_block_size(id, block_size);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setMetaBlockSize", "H5Pset_meta_block_size failed");
- }
+ herr_t ret_value = H5Pset_meta_block_size(id, block_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setMetaBlockSize", "H5Pset_meta_block_size failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getMetaBlockSize
-///\brief Returns the current metadata block size setting.
-///\return Metadata block size
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::getMetaBlockSize
+///\brief Returns the current metadata block size setting.
+///\return Metadata block size
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
hsize_t FileAccPropList::getMetaBlockSize() const
{
- hsize_t block_size;
- herr_t ret_value = H5Pget_meta_block_size(id, &block_size);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getMetaBlockSize", "H5Pget_meta_block_size failed");
- }
- return(block_size);
+ hsize_t block_size;
+ herr_t ret_value = H5Pget_meta_block_size(id, &block_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getMetaBlockSize", "H5Pget_meta_block_size failed");
+ }
+ return(block_size);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setLog
-///\brief Modifies this file access property list to use the logging
-/// driver.
-///\param logfile - IN: Name of the log file
-///\param flags - IN: Flags specifying the types of logging activity
-///\param buf_size - IN: Size of the logging buffer
-///\exception H5::PropListIException
+// Function: FileAccPropList::setLog
+///\brief Modifies this file access property list to use the logging
+/// driver.
+///\param logfile - IN: Name of the log file
+///\param flags - IN: Flags specifying the types of logging activity
+///\param buf_size - IN: Size of the logging buffer
+///\exception H5::PropListIException
///\par Description
-/// For detail on \a flags, please refer to
+/// For detail on \a flags, please refer to
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetFaplLog
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setLog(const char *logfile, unsigned flags, size_t buf_size) const
{
- herr_t ret_value = H5Pset_fapl_log(id, logfile, flags, buf_size);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setLog", "H5Pset_fapl_log failed");
- }
+ herr_t ret_value = H5Pset_fapl_log(id, logfile, flags, buf_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setLog", "H5Pset_fapl_log failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setLog
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param logfile - IN: Name of the log file - string
-///\param flags - IN: Flags specifying the types of logging activity
-///\param buf_size - IN: Size of the logging buffer
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::setLog
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param logfile - IN: Name of the log file - string
+///\param flags - IN: Flags specifying the types of logging activity
+///\param buf_size - IN: Size of the logging buffer
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setLog(const H5std_string& logfile, unsigned flags, size_t buf_size) const
{
- setLog(logfile.c_str(), flags, buf_size);
+ setLog(logfile.c_str(), flags, buf_size);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setSec2
-///\brief Modifies this file access property list to use the sec2
-/// driver.
+// Function: FileAccPropList::setSec2
+///\brief Modifies this file access property list to use the sec2
+/// driver.
///
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setSec2() const
{
- herr_t ret_value = H5Pset_fapl_sec2(id);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setSec2", "H5Pset_fapl_sec2 failed");
- }
+ herr_t ret_value = H5Pset_fapl_sec2(id);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setSec2", "H5Pset_fapl_sec2 failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setAlignment
-///\brief Sets the alignment properties of this property list.
-///\param threshold - IN: Threshold value for file object size
-///\param alignment - IN: Alignment value
-///\exception H5::PropListIException
+// Function: FileAccPropList::setAlignment
+///\brief Sets the alignment properties of this property list.
+///\param threshold - IN: Threshold value for file object size
+///\param alignment - IN: Alignment value
+///\exception H5::PropListIException
///\par Description
-/// The parameter \a threshold must have a non-negative value.
-/// Note that setting the threshold value to 0 (zero) has the
-/// effect of a special case, forcing everything to be aligned.
-/// The parameter \a alignment must have a positive value.
+/// The parameter \a threshold must have a non-negative value.
+/// Note that setting the threshold value to 0 (zero) has the
+/// effect of a special case, forcing everything to be aligned.
+/// The parameter \a alignment must have a positive value.
///
-/// For detail on \a setting alignment, please refer to
+/// For detail on \a setting alignment, please refer to
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetAlignment
-// Programmer: Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileAccPropList::setAlignment( hsize_t threshold, hsize_t alignment ) const
+void FileAccPropList::setAlignment(hsize_t threshold, hsize_t alignment) const
{
- herr_t ret_value = H5Pset_alignment( id, threshold, alignment );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setAlignment", "H5Pset_alignment failed");
- }
+ herr_t ret_value = H5Pset_alignment(id, threshold, alignment);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setAlignment", "H5Pset_alignment failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getAlignment
-///\brief Returns the current settings for alignment properties from
-/// this property list.
-///\param threshold - OUT: Retrieved threshold value for file object size
-///\param alignment - OUT: Retrieved alignment value
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: FileAccPropList::getAlignment
+///\brief Returns the current settings for alignment properties from
+/// this property list.
+///\param threshold - OUT: Retrieved threshold value for file object size
+///\param alignment - OUT: Retrieved alignment value
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileAccPropList::getAlignment( hsize_t &threshold, hsize_t &alignment ) const
+void FileAccPropList::getAlignment(hsize_t &threshold, hsize_t &alignment) const
{
- herr_t ret_value = H5Pget_alignment( id, &threshold, &alignment );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getAlignment", "H5Pget_alignment failed");
- }
+ herr_t ret_value = H5Pget_alignment(id, &threshold, &alignment);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getAlignment", "H5Pget_alignment failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setMultiType
-///\brief Sets data type for \c MULTI driver.
-///\param dtype - IN: Type of data
-///\exception H5::PropListIException
+// Function: FileAccPropList::setMultiType
+///\brief Sets data type for \c MULTI driver.
+///\param dtype - IN: Type of data
+///\exception H5::PropListIException
///\par Description
-/// More details and valid values for \a dtype can be found at:
+/// More details and valid values for \a dtype can be found at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetMultiType
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setMultiType(H5FD_mem_t dtype) const
{
- herr_t ret_value = H5Pset_multi_type(id, dtype);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setMultiType", "H5Pset_multi_type failed");
- }
+ herr_t ret_value = H5Pset_multi_type(id, dtype);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setMultiType", "H5Pset_multi_type failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getMultiType
-///\brief Returns the data type property for \c MULTI driver.
-///\return The data type property
-///\exception H5::PropListIException
+// Function: FileAccPropList::getMultiType
+///\brief Returns the data type property for \c MULTI driver.
+///\return The data type property
+///\exception H5::PropListIException
///\par Description
-/// More details and possible returned values can be found at:
+/// More details and possible returned values can be found at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-GetMultiType
-// Programmer: Binh-Minh Ribler - April, 2004
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
H5FD_mem_t FileAccPropList::getMultiType() const
{
- H5FD_mem_t dtype;
- herr_t ret_value = H5Pget_multi_type(id, &dtype);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getMultiType", "H5Pget_multi_type failed");
- }
- return(dtype);
+ H5FD_mem_t dtype;
+ herr_t ret_value = H5Pget_multi_type(id, &dtype);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getMultiType", "H5Pget_multi_type failed");
+ }
+ return(dtype);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setCache
-///\brief Sets the meta data cache and raw data chunk cache parameters.
-///\param mdc_nelmts - IN: Number of elements in the meta data cache
-///\param rdcc_nelmts - IN: Number of elements in the raw data chunk cache
-///\param rdcc_nbytes - IN: Total size of the raw data chunk cache, in bytes
-///\param rdcc_w0 - IN: Preemption policy
-///\exception H5::PropListIException
+// Function: FileAccPropList::setCache
+///\brief Sets the meta data cache and raw data chunk cache parameters.
+///\param mdc_nelmts - IN: Number of elements in the meta data cache
+///\param rdcc_nelmts - IN: Number of elements in the raw data chunk cache
+///\param rdcc_nbytes - IN: Total size of the raw data chunk cache, in bytes
+///\param rdcc_w0 - IN: Preemption policy
+///\exception H5::PropListIException
///\par Description
-/// The argument \a rdcc_w0 should hold a value between 0 and 1
-/// inclusive. This value indicates how much chunks that have
-/// been fully read are favored for preemption. A value of zero
-/// means fully read chunks are treated no differently than other
-/// chunks (the preemption is strictly LRU) while a value of one
-/// means fully read chunks are always preempted before other chunks.
-// Programmer: Binh-Minh Ribler - 2000
+/// The argument \a rdcc_w0 should hold a value between 0 and 1
+/// inclusive. This value indicates how much chunks that have
+/// been fully read are favored for preemption. A value of zero
+/// means fully read chunks are treated no differently than other
+/// chunks (the preemption is strictly LRU) while a value of one
+/// means fully read chunks are always preempted before other chunks.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileAccPropList::setCache( int mdc_nelmts, size_t rdcc_nelmts, size_t rdcc_nbytes, double rdcc_w0 ) const
+void FileAccPropList::setCache(int mdc_nelmts, size_t rdcc_nelmts, size_t rdcc_nbytes, double rdcc_w0) const
{
- herr_t ret_value = H5Pset_cache( id, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0 );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setCache", "H5Pset_cache failed");
- }
+ herr_t ret_value = H5Pset_cache(id, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setCache", "H5Pset_cache failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getCache
-///\brief Queries the meta data cache and raw data chunk cache parameters.
-///\param mdc_nelmts - OUT: Number of elements in the meta data cache
-///\param rdcc_nelmts - OUT: Number of elements in the raw data chunk cache
-///\param rdcc_nbytes - OUT: Total size of the raw data chunk cache, in bytes
-///\param rdcc_w0 - OUT: Preemption policy
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: FileAccPropList::getCache
+///\brief Queries the meta data cache and raw data chunk cache parameters.
+///\param mdc_nelmts - OUT: Number of elements in the meta data cache
+///\param rdcc_nelmts - OUT: Number of elements in the raw data chunk cache
+///\param rdcc_nbytes - OUT: Total size of the raw data chunk cache, in bytes
+///\param rdcc_w0 - OUT: Preemption policy
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileAccPropList::getCache( int& mdc_nelmts, size_t& rdcc_nelmts, size_t& rdcc_nbytes, double& rdcc_w0 ) const
+void FileAccPropList::getCache(int& mdc_nelmts, size_t& rdcc_nelmts, size_t& rdcc_nbytes, double& rdcc_w0) const
{
- herr_t ret_value = H5Pget_cache( id, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0 );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getCache", "H5Pget_cache failed");
- }
+ herr_t ret_value = H5Pget_cache(id, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getCache", "H5Pget_cache failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setFcloseDegree
-///\brief Sets the degree for the file close behavior.
-///\param degree - IN:
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::setFcloseDegree
+///\brief Sets the degree for the file close behavior.
+///\param degree - IN:
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void FileAccPropList::setFcloseDegree(H5F_close_degree_t degree) const
{
- herr_t ret_value = H5Pset_fclose_degree(id, degree);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setFcloseDegree", "H5Pset_fclose_degree failed");
- }
+ herr_t ret_value = H5Pset_fclose_degree(id, degree);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setFcloseDegree", "H5Pset_fclose_degree failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getFcloseDegree
-///\brief Returns the degree for the file close behavior.
-///\return The degree for the file close behavior
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - April, 2004
+// Function: FileAccPropList::getFcloseDegree
+///\brief Returns the degree for the file close behavior.
+///\return The degree for the file close behavior
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
H5F_close_degree_t FileAccPropList::getFcloseDegree() const
{
- H5F_close_degree_t degree;
- herr_t ret_value = H5Pget_fclose_degree(id, &degree);
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getFcloseDegree", "H5Pget_fclose_degree failed");
- }
- return(degree);
+ H5F_close_degree_t degree;
+ herr_t ret_value = H5Pget_fclose_degree(id, &degree);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getFcloseDegree", "H5Pget_fclose_degree failed");
+ }
+ return(degree);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setGcReferences
-///\brief Sets garbage collecting references flag.
-///\param gc_ref - IN: Flag setting reference garbage collection to
-/// on (1) or off (0).
-///\exception H5::PropListIException
+// Function: FileAccPropList::setGcReferences
+///\brief Sets garbage collecting references flag.
+///\param gc_ref - IN: Flag setting reference garbage collection to
+/// on (1) or off (0).
+///\exception H5::PropListIException
///\par Description
-/// For detail on \a fapl, please refer to
+/// For detail on \a fapl, please refer to
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetGCReferences
-// Programmer: Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileAccPropList::setGcReferences( unsigned gc_ref ) const
+void FileAccPropList::setGcReferences(unsigned gc_ref) const
{
- herr_t ret_value = H5Pset_gc_references( id, gc_ref );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::setGcReferences", "H5Pset_gc_references failed");
- }
+ herr_t ret_value = H5Pset_gc_references(id, gc_ref);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::setGcReferences", "H5Pset_gc_references failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getGcReferences
-///\brief Returns the garbage collecting references setting.
-///\return Garbage collecting references setting, 0 (off) or 1 (on)
-///\exception H5::PropListIException
-// Programmer: Binh-Minh Ribler - 2000
+// Function: FileAccPropList::getGcReferences
+///\brief Returns the garbage collecting references setting.
+///\return Garbage collecting references setting, 0 (off) or 1 (on)
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
unsigned FileAccPropList::getGcReferences() const
{
- unsigned gc_ref;
+ unsigned gc_ref;
- // the name of this routine will be changed to H5Pget_gc_references???
- herr_t ret_value = H5Pget_gc_references( id, &gc_ref );
- if( ret_value < 0 )
- {
- throw PropListIException("FileAccPropList::getGcReferences", "H5Pget_gc_references failed");
- }
- return( gc_ref );
+ // the name of this routine will be changed to H5Pget_gc_references???
+ herr_t ret_value = H5Pget_gc_references(id, &gc_ref);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileAccPropList::getGcReferences", "H5Pget_gc_references failed");
+ }
+ return(gc_ref);
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::setLibverBounds
-///\brief Sets bounds on versions of library format to be used when creating
-/// or writing objects.
-///\param libver_low - IN: Earliest version of the library that will be
-/// used for creating or writing objects
-///\param libver_high - IN: Latest version of the library that will be
-///\exception H5::PropListIException
+// Function: FileAccPropList::setLibverBounds
+///\brief Sets bounds on versions of library format to be used when creating
+/// or writing objects.
+///\param libver_low - IN: Earliest version of the library that will be
+/// used for creating or writing objects
+///\param libver_high - IN: Latest version of the library that will be
+///\exception H5::PropListIException
///\par Description
-/// Valid values of \a libver_low are as follows:
-/// \li \c H5F_LIBVER_EARLIEST (Default)
-/// \li \c H5F_LIBVER_18
-/// \li \c H5F_LIBVER_LATEST
+/// Valid values of \a libver_low are as follows:
+/// \li \c H5F_LIBVER_EARLIEST (Default)
+/// \li \c H5F_LIBVER_18
+/// \li \c H5F_LIBVER_LATEST
///
-/// Valid values of \a libver_high are as follows:
-/// \li \c H5F_LIBVER_18
-/// \li \c H5F_LIBVER_LATEST (Default)
+/// Valid values of \a libver_high are as follows:
+/// \li \c H5F_LIBVER_18
+/// \li \c H5F_LIBVER_LATEST (Default)
///
-/// For more details, please refer to
+/// For more details, please refer to
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetLibverBounds
-// Programmer: Binh-Minh Ribler - March, 2015
+// Programmer Binh-Minh Ribler - March, 2015
//--------------------------------------------------------------------------
void FileAccPropList::setLibverBounds(H5F_libver_t libver_low, H5F_libver_t libver_high) const
{
herr_t ret_value = H5Pset_libver_bounds(id, libver_low, libver_high);
if (ret_value < 0)
{
- throw PropListIException("FileAccPropList::setLibverBounds", "H5Pset_libver_bounds failed");
+ throw PropListIException("FileAccPropList::setLibverBounds", "H5Pset_libver_bounds failed");
}
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList::getLibverBounds
-///\brief Gets the current settings for the library version format bounds
-/// from a file access property list.
-///\param libver_low - OUT: Earliest version of the library that will be
-/// used for creating or writing objects
-///\param libver_high - OUT: Latest version of the library that will be
-/// used for creating or writing objects
-///\exception H5::PropListIException
+// Function: FileAccPropList::getLibverBounds
+///\brief Gets the current settings for the library version format bounds
+/// from a file access property list.
+///\param libver_low - OUT: Earliest version of the library that will be
+/// used for creating or writing objects
+///\param libver_high - OUT: Latest version of the library that will be
+/// used for creating or writing objects
+///\exception H5::PropListIException
///\par Description
-/// On success, the argument \a libver_low can have the following
-/// values:
-/// \li \c H5F_LIBVER_EARLIEST
-/// \li \c H5F_LIBVER_18
-/// \li \c H5F_LIBVER_LATEST
+/// On success, the argument \a libver_low can have the following
+/// values:
+/// \li \c H5F_LIBVER_EARLIEST
+/// \li \c H5F_LIBVER_18
+/// \li \c H5F_LIBVER_LATEST
///
-/// and \a libver_high:
-/// \li \c H5F_LIBVER_18
-/// \li \c H5F_LIBVER_LATEST
-// Programmer: Binh-Minh Ribler - March, 2015
+/// and \a libver_high:
+/// \li \c H5F_LIBVER_18
+/// \li \c H5F_LIBVER_LATEST
+// Programmer Binh-Minh Ribler - March, 2015
//--------------------------------------------------------------------------
void FileAccPropList::getLibverBounds(H5F_libver_t& libver_low, H5F_libver_t& libver_high) const
{
herr_t ret_value = H5Pget_libver_bounds(id, &libver_low, &libver_high);
- if( ret_value < 0 )
+ if (ret_value < 0)
{
- throw PropListIException("FileAccPropList::getLibverBounds", "H5Pget_libver_bounds failed");
+ throw PropListIException("FileAccPropList::getLibverBounds", "H5Pget_libver_bounds failed");
}
}
//--------------------------------------------------------------------------
-// Function: FileAccPropList destructor
-///\brief Noop destructor
-// Programmer Binh-Minh Ribler - 2000
+// Function: FileAccPropList destructor
+///\brief Noop destructor
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FileAccPropList::~FileAccPropList() {}
diff --git a/c++/src/H5FaccProp.h b/c++/src/H5FaccProp.h
index a0605e0..2563163 100644
--- a/c++/src/H5FaccProp.h
+++ b/c++/src/H5FaccProp.h
@@ -6,17 +6,12 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-// Class FileAccPropList represents the HDF5 file access property list and
-// inherits from DataType.
-
#ifndef __H5FileAccPropList_H
#define __H5FileAccPropList_H
@@ -30,121 +25,127 @@ namespace H5 {
*/
class H5_DLLCPP FileAccPropList : public PropList {
public:
- ///\brief Default file access property list.
- static const FileAccPropList& DEFAULT;
+ ///\brief Default file access property list.
+ static const FileAccPropList& DEFAULT;
+
+ // Creates a file access property list.
+ FileAccPropList();
- // Creates a file access property list.
- FileAccPropList();
+ // Modifies this property list to use the H5FD_STDIO driver
+ void setStdio() const;
- // Modifies this property list to use the H5FD_STDIO driver
- void setStdio() const;
+ // Set file driver for this property list
+ void setDriver(hid_t new_driver_id, const void *new_driver_info) const;
- // Set file driver for this property list
- void setDriver(hid_t new_driver_id, const void *new_driver_info) const;
+ // Returns a low-level file driver identifier.
+ hid_t getDriver() const;
- // Returns a low-level file driver identifier.
- hid_t getDriver() const;
+ // Sets offset for family driver.
+ void setFamilyOffset(hsize_t offset) const;
- // Sets offset for family driver.
- void setFamilyOffset(hsize_t offset) const;
+ // Gets offset for family driver.
+ hsize_t getFamilyOffset() const;
- // Gets offset for family driver.
- hsize_t getFamilyOffset() const;
+ // Modifies this file access property list to use the sec2 driver.
+ void setSec2() const;
- // Modifies this file access property list to use the sec2 driver.
- void setSec2() const;
+ // Modifies this file access property list to use the H5FD_CORE
+ // driver.
+ void setCore (size_t increment, hbool_t backing_store) const;
- // Modifies this file access property list to use the H5FD_CORE
- // driver.
- void setCore (size_t increment, hbool_t backing_store) const;
+ // Queries H5FD_CORE driver properties.
+ void getCore (size_t& increment, hbool_t& backing_store) const;
- // Queries H5FD_CORE driver properties.
- void getCore (size_t& increment, hbool_t& backing_store) const;
+ // Sets this file access properties list to the family driver.
+ void setFamily(hsize_t memb_size, const FileAccPropList& memb_plist) const;
- // Sets this file access properties list to the family driver.
- void setFamily( hsize_t memb_size, const FileAccPropList& memb_plist ) const;
+ // Returns information about the family file access property list.
+ void getFamily(hsize_t& memb_size, FileAccPropList& memb_plist) const;
+ FileAccPropList getFamily(hsize_t& memb_size) const;
- // Returns information about the family file access property list.
- void getFamily(hsize_t& memb_size, FileAccPropList& memb_plist) const;
- FileAccPropList getFamily(hsize_t& memb_size) const;
+ // Emulates the old split file driver,
+ void setSplit(const FileAccPropList& meta_plist,
+ const FileAccPropList& raw_plist,
+ const char* meta_ext = ".meta",
+ const char* raw_ext = ".raw") const;
+ void setSplit(const FileAccPropList& meta_plist,
+ const FileAccPropList& raw_plist,
+ const H5std_string& meta_ext = ".meta",
+ const H5std_string& raw_ext = ".raw") const;
- // Emulates the old split file driver,
- void setSplit(const FileAccPropList& meta_plist,
- const FileAccPropList& raw_plist,
- const char* meta_ext = ".meta",
- const char* raw_ext = ".raw" ) const;
- void setSplit(const FileAccPropList& meta_plist,
- const FileAccPropList& raw_plist,
- const H5std_string& meta_ext = ".meta",
- const H5std_string& raw_ext = ".raw") const;
+ // Sets the maximum size of the data sieve buffer.
+ void setSieveBufSize(size_t bufsize) const;
- // Sets the maximum size of the data sieve buffer.
- void setSieveBufSize(size_t bufsize) const;
+ // Returns the current settings for the data sieve buffer size
+ // property
+ size_t getSieveBufSize() const;
- // Returns the current settings for the data sieve buffer size
- // property
- size_t getSieveBufSize() const;
+ // Sets the minimum size of metadata block allocations.
+ void setMetaBlockSize(hsize_t &block_size) const;
- // Sets the minimum size of metadata block allocations.
- void setMetaBlockSize(hsize_t &block_size) const;
+ // Returns the current metadata block size setting.
+ hsize_t getMetaBlockSize() const;
- // Returns the current metadata block size setting.
- hsize_t getMetaBlockSize() const;
+ // Modifies this file access property list to use the logging driver.
+ void setLog(const char *logfile, unsigned flags, size_t buf_size) const;
+ void setLog(const H5std_string& logfile, unsigned flags, size_t buf_size) const;
- // Modifies this file access property list to use the logging driver.
- void setLog(const char *logfile, unsigned flags, size_t buf_size) const;
- void setLog(const H5std_string& logfile, unsigned flags, size_t buf_size) const;
+ // Sets alignment properties of this file access property list
+ void setAlignment(hsize_t threshold = 1, hsize_t alignment = 1) const;
- // Sets alignment properties of this file access property list
- void setAlignment( hsize_t threshold = 1, hsize_t alignment = 1 ) const;
+ // Retrieves the current settings for alignment properties from
+ // this property list.
+ void getAlignment(hsize_t& threshold, hsize_t& alignment) const;
- // Retrieves the current settings for alignment properties from
- // this property list.
- void getAlignment( hsize_t& threshold, hsize_t& alignment ) const;
+ // Sets data type for multi driver.
+ void setMultiType(H5FD_mem_t dtype) const;
- // Sets data type for multi driver.
- void setMultiType(H5FD_mem_t dtype) const;
+ // Returns the data type property for MULTI driver.
+ H5FD_mem_t getMultiType() const;
- // Returns the data type property for MULTI driver.
- H5FD_mem_t getMultiType() const;
+ // Sets the meta data cache and raw data chunk cache parameters.
+ void setCache(int mdc_nelmts, size_t rdcc_nelmts, size_t rdcc_nbytes, double rdcc_w0) const;
- // Sets the meta data cache and raw data chunk cache parameters.
- void setCache( int mdc_nelmts, size_t rdcc_nelmts, size_t rdcc_nbytes, double rdcc_w0 ) const;
+ // Queries the meta data cache and raw data chunk cache parameters.
+ void getCache(int& mdc_nelmts, size_t& rdcc_nelmts, size_t& rdcc_nbytes, double& rdcc_w0) const;
- // Queries the meta data cache and raw data chunk cache parameters.
- void getCache( int& mdc_nelmts, size_t& rdcc_nelmts, size_t& rdcc_nbytes, double& rdcc_w0 ) const;
+ // Sets the degree for the file close behavior.
+ void setFcloseDegree(H5F_close_degree_t degree) const;
- // Sets the degree for the file close behavior.
- void setFcloseDegree(H5F_close_degree_t degree) const;
+ // Returns the degree for the file close behavior.
+ H5F_close_degree_t getFcloseDegree() const;
- // Returns the degree for the file close behavior.
- H5F_close_degree_t getFcloseDegree() const;
+ // Sets file access property list to use the H5FD_DIRECT driver.
+ void setFileAccDirect(size_t boundary, size_t block_size, size_t cbuf_size) const;
- // Sets garbage collecting references flag.
- void setGcReferences( unsigned gc_ref = 0 ) const;
+ // Retrieves information about the direct file access property list.
+ void getFileAccDirect(size_t &boundary, size_t &block_size, size_t &cbuf_size) const;
- // Returns garbage collecting references setting.
- unsigned getGcReferences() const;
+ // Sets garbage collecting references flag.
+ void setGcReferences(unsigned gc_ref = 0) const;
- // Sets bounds on versions of library format to be used when creating
- // or writing objects.
- void setLibverBounds(H5F_libver_t libver_low, H5F_libver_t libver_high) const;
+ // Returns garbage collecting references setting.
+ unsigned getGcReferences() const;
- // Gets the current settings for the library version format bounds.
- void getLibverBounds(H5F_libver_t& libver_low, H5F_libver_t& libver_high) const;
+ // Sets bounds on versions of library format to be used when creating
+ // or writing objects.
+ void setLibverBounds(H5F_libver_t libver_low, H5F_libver_t libver_high) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("FileAccPropList"); }
+ // Gets the current settings for the library version format bounds.
+ void getLibverBounds(H5F_libver_t& libver_low, H5F_libver_t& libver_high) const;
- // Copy constructor: creates a copy of a FileAccPropList object.
- FileAccPropList( const FileAccPropList& original );
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("FileAccPropList"); }
- // Creates a copy of an existing file access property list
- // using the property list id.
- FileAccPropList (const hid_t plist_id);
+ // Copy constructor: creates a copy of a FileAccPropList object.
+ FileAccPropList(const FileAccPropList& original);
- // Noop destructor
- virtual ~FileAccPropList();
+ // Creates a copy of an existing file access property list
+ // using the property list id.
+ FileAccPropList (const hid_t plist_id);
+
+ // Noop destructor
+ virtual ~FileAccPropList();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -159,6 +160,7 @@ class H5_DLLCPP FileAccPropList : public PropList {
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+}; // end of FileAccPropList
+} // namespace H5
+
#endif // __H5FileAccPropList_H
diff --git a/c++/src/H5FcreatProp.cpp b/c++/src/H5FcreatProp.cpp
index 5a99dba..9674c0a 100644
--- a/c++/src/H5FcreatProp.cpp
+++ b/c++/src/H5FcreatProp.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -33,7 +31,7 @@ FileCreatPropList* FileCreatPropList::DEFAULT_ = 0;
//--------------------------------------------------------------------------
// Function: FileCreatPropList::getConstant
-// Purpose: Creates a FileCreatPropList object representing the HDF5
+// Purpose Creates a FileCreatPropList object representing the HDF5
// constant H5P_FILE_ACCESS, pointed to by FileCreatPropList::DEFAULT_
// exception H5::PropListIException
// Description
@@ -62,7 +60,7 @@ FileCreatPropList* FileCreatPropList::getConstant()
//--------------------------------------------------------------------------
// Function: FileCreatPropList::deleteConstants
-// Purpose: Deletes the constant object that FileCreatPropList::DEFAULT_
+// Purpose Deletes the constant object that FileCreatPropList::DEFAULT_
// points to.
// Programmer Binh-Minh Ribler - 2015
//--------------------------------------------------------------------------
@@ -73,231 +71,316 @@ void FileCreatPropList::deleteConstants()
}
//--------------------------------------------------------------------------
-// Purpose Constant for default property
+// Purpose Constant for default property
//--------------------------------------------------------------------------
const FileCreatPropList& FileCreatPropList::DEFAULT = *getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: FileCreatPropList default constructor
-///\brief Default constructor: Creates a file create property list
-// Programmer Binh-Minh Ribler - 2000
+// Function: FileCreatPropList default constructor
+///\brief Default constructor: Creates a file create property list
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-FileCreatPropList::FileCreatPropList() : PropList( H5P_FILE_CREATE ) {}
+FileCreatPropList::FileCreatPropList() : PropList(H5P_FILE_CREATE) {}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList copy constructor
-///\brief Copy constructor: makes a copy of the original
-/// FileCreatPropList object.
-///\param original - IN: FileCreatPropList instance to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: FileCreatPropList copy constructor
+///\brief Copy constructor: makes a copy of the original
+/// FileCreatPropList object.
+///\param original - IN: FileCreatPropList instance to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-FileCreatPropList::FileCreatPropList( const FileCreatPropList& original ) : PropList( original ) {}
+FileCreatPropList::FileCreatPropList(const FileCreatPropList& original) : PropList( original ) {}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList overloaded constructor
-///\brief Creates a file creation property list using the id of an
-/// existing one.
-///\param plist_id - IN: FileCreatPropList id to use
-// Programmer Binh-Minh Ribler - 2000
+// Function: FileCreatPropList overloaded constructor
+///\brief Creates a file creation property list using the id of an
+/// existing one.
+///\param plist_id - IN: FileCreatPropList id to use
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FileCreatPropList::FileCreatPropList(const hid_t plist_id) : PropList(plist_id) {}
#ifndef H5_NO_DEPRECATED_SYMBOLS
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::getVersion
-///\brief Retrieves version information for various parts of a file.
-///\param super - OUT: The file super block.
-///\param freelist - OUT: The global free list.
-///\param stab - OUT: The root symbol table entry.
-///\param shhdr - OUT: Shared object headers.
-///\exception H5::PropListIException
+// Function: FileCreatPropList::getVersion
+///\brief Retrieves version information for various parts of a file.
+///\param super - OUT: The file super block.
+///\param freelist - OUT: The global free list.
+///\param stab - OUT: The root symbol table entry.
+///\param shhdr - OUT: Shared object headers.
+///\exception H5::PropListIException
///\par Description
-/// Any (or even all) of the output arguments can be null pointers.
-// Programmer Binh-Minh Ribler - 2000
+/// Any (or even all) of the output arguments can be null pointers.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void FileCreatPropList::getVersion(unsigned& super, unsigned& freelist, unsigned& stab, unsigned& shhdr) const
{
- herr_t ret_value = H5Pget_version( id, &super, &freelist, &stab, &shhdr );
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::getVersion",
- "H5Pget_version failed");
- }
+ herr_t ret_value = H5Pget_version(id, &super, &freelist, &stab, &shhdr);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::getVersion",
+ "H5Pget_version failed");
+ }
}
#endif /* H5_NO_DEPRECATED_SYMBOLS */
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::setUserblock
-///\brief Sets the user block size field of this file creation property list.
-///\param size - IN: User block size to be set, in bytes
-///\exception H5::PropListIException
+// Function: FileCreatPropList::setUserblock
+///\brief Sets the user block size field of this file creation property list.
+///\param size - IN: User block size to be set, in bytes
+///\exception H5::PropListIException
///\par Description
-/// The default user block size is 0; it may be set to any power
-/// of 2 equal to 512 or greater (512, 1024, 2048, etc.)
-// Programmer Binh-Minh Ribler - 2000
+/// The default user block size is 0; it may be set to any power
+/// of 2 equal to 512 or greater (512, 1024, 2048, etc.)
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileCreatPropList::setUserblock( hsize_t size ) const
+void FileCreatPropList::setUserblock(hsize_t size) const
{
- herr_t ret_value = H5Pset_userblock( id, size);
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::setUserblock",
- "H5Pset_userblock failed");
- }
+ herr_t ret_value = H5Pset_userblock(id, size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::setUserblock",
+ "H5Pset_userblock failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::getUserblock
-///\brief Returns the user block size of this file creation property list.
-///\return User block size
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: FileCreatPropList::getUserblock
+///\brief Returns the user block size of this file creation property list.
+///\return User block size
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
hsize_t FileCreatPropList::getUserblock() const
{
- hsize_t userblock_size;
- herr_t ret_value = H5Pget_userblock( id, &userblock_size );
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::getUserblock",
- "H5Pget_userblock failed");
- }
- return( userblock_size );
+ hsize_t userblock_size;
+ herr_t ret_value = H5Pget_userblock(id, &userblock_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::getUserblock",
+ "H5Pget_userblock failed");
+ }
+ return(userblock_size);
}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::setSizes
-///\brief Sets the byte size of the offsets and lengths used to
-/// address objects in an HDF5 file.
-///\param sizeof_addr - IN: Size of an object offset in bytes
-///\param sizeof_size - IN: Size of an object length in bytes.
-///\exception H5::PropListIException
+// Function: FileCreatPropList::setSizes
+///\brief Sets the byte size of the offsets and lengths used to
+/// address objects in an HDF5 file.
+///\param sizeof_addr - IN: Size of an object offset in bytes
+///\param sizeof_size - IN: Size of an object length in bytes.
+///\exception H5::PropListIException
///\par Description
-/// For information on setting sizes, please refer to the
-/// C layer Reference Manual at:
+/// For information on setting sizes, please refer to the
+/// C layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetSizes
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileCreatPropList::setSizes( size_t sizeof_addr, size_t sizeof_size ) const
+void FileCreatPropList::setSizes(size_t sizeof_addr, size_t sizeof_size) const
{
- herr_t ret_value = H5Pset_sizes( id, sizeof_addr, sizeof_size );
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::setSizes",
- "H5Pset_sizes failed");
- }
+ herr_t ret_value = H5Pset_sizes(id, sizeof_addr, sizeof_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::setSizes",
+ "H5Pset_sizes failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::getSizes
-///\brief Retrieves the size of the offsets and lengths used in an
-/// HDF5 file.
+// Function: FileCreatPropList::getSizes
+///\brief Retrieves the size of the offsets and lengths used in an
+/// HDF5 file.
///
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileCreatPropList::getSizes( size_t& sizeof_addr, size_t& sizeof_size ) const
+void FileCreatPropList::getSizes(size_t& sizeof_addr, size_t& sizeof_size) const
{
- herr_t ret_value = H5Pget_sizes( id, &sizeof_addr, &sizeof_size );
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::getSizes",
- "H5Pget_sizes failed");
- }
+ herr_t ret_value = H5Pget_sizes(id, &sizeof_addr, &sizeof_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::getSizes",
+ "H5Pget_sizes failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::setSymk
-///\brief Sets the size of parameters used to control the symbol table
-/// nodes.
-///\param ik - IN: Symbol table tree rank
-///\param lk - IN: Symbol table node size
-///\exception H5::PropListIException
+// Function: FileCreatPropList::setSymk
+///\brief Sets the size of parameters used to control the symbol table
+/// nodes.
+///\param ik - IN: Symbol table tree rank
+///\param lk - IN: Symbol table node size
+///\exception H5::PropListIException
///\par Description
-/// For information, please see the C layer Reference Manual at:
+/// For information, please see the C layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetSymK
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileCreatPropList::setSymk( unsigned ik, unsigned lk ) const
+void FileCreatPropList::setSymk(unsigned ik, unsigned lk) const
{
- herr_t ret_value = H5Pset_sym_k( id, ik, lk );
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::setSymk",
- "H5Pset_sym_k failed");
- }
+ herr_t ret_value = H5Pset_sym_k(id, ik, lk);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::setSymk",
+ "H5Pset_sym_k failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::getSymk
-///\brief Retrieves the size of the symbol table B-tree 1/2 rank and
-/// the symbol table leaf node 1/2 size.
+// Function: FileCreatPropList::getSymk
+///\brief Retrieves the size of the symbol table B-tree 1/2 rank and
+/// the symbol table leaf node 1/2 size.
///
-///\exception H5::PropListIException
+///\exception H5::PropListIException
///\par Description
-/// For information, please see
+/// For information, please see
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-GetSymK
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileCreatPropList::getSymk( unsigned& ik, unsigned& lk ) const
+void FileCreatPropList::getSymk(unsigned& ik, unsigned& lk) const
{
- herr_t ret_value = H5Pget_sym_k( id, &ik, &lk );
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::getSymk",
- "H5Pget_sym_k failed");
- }
+ herr_t ret_value = H5Pget_sym_k(id, &ik, &lk);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::getSymk",
+ "H5Pget_sym_k failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::setIstorek
-///\brief Sets the size of the parameter used to control the B-trees
-/// for indexing chunked datasets.
-///\param ik - IN: 1/2 rank of chunked storage B-tree
-///\exception H5::PropListIException
+// Function: FileCreatPropList::setIstorek
+///\brief Sets the size of the parameter used to control the B-trees
+/// for indexing chunked datasets.
+///\param ik - IN: 1/2 rank of chunked storage B-tree
+///\exception H5::PropListIException
///\par Description
-/// For information, please see the C layer Reference Manual at:
+/// For information, please see the C layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetIstoreK
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FileCreatPropList::setIstorek( unsigned ik ) const
+void FileCreatPropList::setIstorek(unsigned ik) const
{
- herr_t ret_value = H5Pset_istore_k( id, ik );
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::setIstorek",
- "H5Pset_istore_k failed");
- }
+ herr_t ret_value = H5Pset_istore_k(id, ik);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::setIstorek",
+ "H5Pset_istore_k failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList::getIstorek
-///\brief Returns the 1/2 rank of an indexed storage B-tree.
-///\return 1/2 rank of chunked storage B-tree
-///\exception H5::PropListIException
+// Function: FileCreatPropList::getIstorek
+///\brief Returns the 1/2 rank of an indexed storage B-tree.
+///\return 1/2 rank of chunked storage B-tree
+///\exception H5::PropListIException
///\par Description
-/// For information, please see
+/// For information, please see
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-GetIstoreK
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
unsigned FileCreatPropList::getIstorek() const
{
- unsigned ik;
- herr_t ret_value = H5Pget_istore_k( id, &ik );
- if( ret_value < 0 )
- {
- throw PropListIException("FileCreatPropList::getIstorek",
- "H5Pget_istore_k failed");
- }
- return( ik );
+ unsigned ik;
+ herr_t ret_value = H5Pget_istore_k(id, &ik);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::getIstorek",
+ "H5Pget_istore_k failed");
+ }
+ return(ik);
+}
+
+//--------------------------------------------------------------------------
+// Function: FileCreatPropList::setFileSpaceStrategy
+///\brief Sets the strategy and the threshold value that the library
+/// will employ in managing file space.
+///\param strategy - IN: Strategy for file space management
+///\param persist - IN: Whether to persist free-space
+///\param threshold - IN: Free-space section threshold. The library
+/// default is 1, which is to track all free-space sections.
+///\exception H5::PropListIException
+///\par Description
+/// If the given strategy is zero, the property will not be
+/// changed and the existing strategy will be retained.
+/// If the given threshold value is zero, the property will not be
+/// changed and the existing threshold will be retained.
+/// For information, please see the C layer Reference Manual at:
+/// https://support.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetFileSpace
+// Programmer Binh-Minh Ribler - Feb, 2017
+//--------------------------------------------------------------------------
+void FileCreatPropList::setFileSpaceStrategy(H5F_fspace_strategy_t strategy, hbool_t persist, hsize_t threshold) const
+{
+ herr_t ret_value = H5Pset_file_space_strategy(id, strategy, persist, threshold);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::setFileSpaceStrategy",
+ "H5Pset_file_space_strategy failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: FileCreatPropList::getFileSpaceStrategy
+///\brief Retrieves the strategy, persist, and threshold that the library
+/// uses in managing file space.
+///\param strategy - OUT: Strategy for file space management
+///\param persist - OUT: Whether to persist free-space
+///\param threshold - OUT: Free-space section threshold
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Feb, 2017
+//--------------------------------------------------------------------------
+void FileCreatPropList::getFileSpaceStrategy(H5F_fspace_strategy_t& strategy, hbool_t& persist, hsize_t& threshold) const
+{
+ herr_t ret_value = H5Pget_file_space_strategy(id, &strategy, &persist, &threshold);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::getFileSpaceStrategy",
+ "H5Pget_file_space_strategy failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: FileCreatPropList::setFileSpacePagesize
+///\brief Sets the file space page size for paged aggregation.
+///\param fsp_psize - IN: Filespace's page size
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Feb, 2017
+//--------------------------------------------------------------------------
+void FileCreatPropList::setFileSpacePagesize(hsize_t fsp_psize) const
+{
+ herr_t ret_value = H5Pset_file_space_page_size(id, fsp_psize);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::setFileSpacePagesize",
+ "H5Pset_file_space_page_size failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: FileCreatPropList::getFileSpacePagesize
+///\brief Returns the file space page size for aggregating small
+/// metadata or raw data.
+///\return File space page size
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Feb, 2017
+//--------------------------------------------------------------------------
+hsize_t FileCreatPropList::getFileSpacePagesize() const
+{
+ hsize_t fsp_psize = 0;
+ herr_t ret_value = H5Pget_file_space_page_size(id, &fsp_psize);
+ if (ret_value < 0)
+ {
+ throw PropListIException("FileCreatPropList::getFileSpacePagesize",
+ "H5Pget_file_space_page_size failed");
+ }
+ return(fsp_psize);
}
//--------------------------------------------------------------------------
-// Function: FileCreatPropList destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: FileCreatPropList destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FileCreatPropList::~FileCreatPropList() {}
diff --git a/c++/src/H5FcreatProp.h b/c++/src/H5FcreatProp.h
index 1ac925e..da620d4 100644
--- a/c++/src/H5FcreatProp.h
+++ b/c++/src/H5FcreatProp.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5FileCreatPropList_H
@@ -27,70 +25,85 @@ namespace H5 {
*/
class H5_DLLCPP FileCreatPropList : public PropList {
public:
- ///\brief Default file creation property list.
- static const FileCreatPropList& DEFAULT;
+ ///\brief Default file creation property list.
+ static const FileCreatPropList& DEFAULT;
- // Creates a file create property list.
- FileCreatPropList();
+ // Creates a file create property list.
+ FileCreatPropList();
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Retrieves version information for various parts of a file.
- void getVersion( unsigned& super, unsigned& freelist, unsigned& stab, unsigned& shhdr ) const;
+ // Retrieves version information for various parts of a file.
+ void getVersion(unsigned& super, unsigned& freelist, unsigned& stab, unsigned& shhdr) const;
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- // Sets the userblock size field of a file creation property list.
- void setUserblock( hsize_t size ) const;
+ // Sets the userblock size field of a file creation property list.
+ void setUserblock(hsize_t size) const;
- // Gets the size of a user block in this file creation property list.
- hsize_t getUserblock() const;
+ // Gets the size of a user block in this file creation property list.
+ hsize_t getUserblock() const;
- // Retrieves the size-of address and size quantities stored in a
- // file according to this file creation property list.
- void getSizes( size_t& sizeof_addr, size_t& sizeof_size ) const;
+ // Retrieves the size-of address and size quantities stored in a
+ // file according to this file creation property list.
+ void getSizes(size_t& sizeof_addr, size_t& sizeof_size) const;
- // Sets file size-of addresses and sizes.
- void setSizes( size_t sizeof_addr = 4, size_t sizeof_size = 4 ) const;
+ // Sets file size-of addresses and sizes.
+ void setSizes(size_t sizeof_addr = 4, size_t sizeof_size = 4) const;
- // Retrieves the size of the symbol table B-tree 1/2 rank and the
- // symbol table leaf node 1/2 size.
- void getSymk( unsigned& int_nodes_k, unsigned& leaf_nodes_k ) const;
+ // Retrieves the size of the symbol table B-tree 1/2 rank and the
+ // symbol table leaf node 1/2 size.
+ void getSymk(unsigned& int_nodes_k, unsigned& leaf_nodes_k) const;
- // Sets the size of parameters used to control the symbol table nodes.
- void setSymk( unsigned int_nodes_k, unsigned leaf_nodes_k ) const;
+ // Sets the size of parameters used to control the symbol table nodes.
+ void setSymk(unsigned int_nodes_k, unsigned leaf_nodes_k) const;
- // Returns the 1/2 rank of an indexed storage B-tree.
- unsigned getIstorek() const;
+ // Returns the 1/2 rank of an indexed storage B-tree.
+ unsigned getIstorek() const;
- // Sets the size of parameter used to control the B-trees for
- // indexing chunked datasets.
- void setIstorek( unsigned ik ) const;
+ // Sets the size of parameter used to control the B-trees for
+ // indexing chunked datasets.
+ void setIstorek(unsigned ik) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("FileCreatPropList"); }
+ // Sets the strategy and the threshold value that the library will
+ // will employ in managing file space.
+ void setFileSpaceStrategy(H5F_fspace_strategy_t strategy, hbool_t persist, hsize_t threshold) const;
- // Copy constructor: creates a copy of a FileCreatPropList object.
- FileCreatPropList(const FileCreatPropList& orig);
+ // Returns the strategy that the library uses in managing file space.
+ void getFileSpaceStrategy(H5F_fspace_strategy_t& strategy, hbool_t& persist, hsize_t& threshold) const;
- // Creates a copy of an existing file create property list
- // using the property list id.
- FileCreatPropList (const hid_t plist_id);
+ // Sets the file space page size for paged aggregation.
+ void setFileSpacePagesize(hsize_t fsp_psize) const;
- // Noop destructor
- virtual ~FileCreatPropList();
+ // Returns the threshold value that the library uses in tracking free
+ // space sections.
+ hsize_t getFileSpacePagesize() const;
+
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass() const { return("FileCreatPropList"); }
+
+ // Copy constructor: creates a copy of a FileCreatPropList object.
+ FileCreatPropList(const FileCreatPropList& orig);
+
+ // Creates a copy of an existing file create property list
+ // using the property list id.
+ FileCreatPropList(const hid_t plist_id);
+
+ // Noop destructor
+ virtual ~FileCreatPropList();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Deletes the global constant, should only be used by the library
- static void deleteConstants();
+ // Deletes the global constant, should only be used by the library
+ static void deleteConstants();
private:
- static FileCreatPropList* DEFAULT_;
+ static FileCreatPropList* DEFAULT_;
- // Creates the global constant, should only be used by the library
- static FileCreatPropList* getConstant();
+ // Creates the global constant, should only be used by the library
+ static FileCreatPropList* getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+}; // end of FileCreatPropList
+} // namespace H5
+
#endif // __H5FileCreatPropList_H
diff --git a/c++/src/H5File.cpp b/c++/src/H5File.cpp
index ff897d7..3a0b54d 100644
--- a/c++/src/H5File.cpp
+++ b/c++/src/H5File.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef OLD_HEADER_FILENAME
@@ -29,8 +27,10 @@
#include "H5OcreatProp.h"
#include "H5DxferProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
+#include "H5CommonFG.h"
#include "H5Group.h"
#include "H5AbstractDs.h"
#include "H5DataSpace.h"
@@ -43,74 +43,74 @@ namespace H5 {
using std::endl;
//--------------------------------------------------------------------------
-// Function H5File default constructor
-///\brief Default constructor: creates a stub H5File object.
-// Programmer Binh-Minh Ribler - 2000
+// Function H5File default constructor
+///\brief Default constructor: creates a stub H5File object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5File::H5File() : Group(), id(H5I_INVALID_HID) {}
//--------------------------------------------------------------------------
-// Function: H5File overloaded constructor
-///\brief Creates or opens an HDF5 file depending on the parameter flags.
-///\param name - IN: Name of the file
-///\param flags - IN: File access flags
-///\param create_plist - IN: File creation property list, used when
-/// modifying default file meta-data. Default to
-/// FileCreatPropList::DEFAULT
-///\param access_plist - IN: File access property list. Default to
-/// FileAccPropList::DEFAULT
+// Function: H5File overloaded constructor
+///\brief Creates or opens an HDF5 file depending on the parameter flags.
+///\param name - IN: Name of the file
+///\param flags - IN: File access flags
+///\param create_plist - IN: File creation property list, used when
+/// modifying default file meta-data. Default to
+/// FileCreatPropList::DEFAULT
+///\param access_plist - IN: File access property list. Default to
+/// FileAccPropList::DEFAULT
///\par Description
-/// Valid values of \a flags include:
-/// \li \c H5F_ACC_TRUNC - Truncate file, if it already exists,
-/// erasing all data previously stored in
-/// the file.
-/// \li \c H5F_ACC_EXCL - Fail if file already exists.
-/// \c H5F_ACC_TRUNC and \c H5F_ACC_EXCL are mutually exclusive
-/// \li \c H5F_ACC_RDONLY - Open file as read-only, if it already
-/// exists, and fail, otherwise
-/// \li \c H5F_ACC_RDWR - Open file for read/write, if it already
-/// exists, and fail, otherwise
+/// Valid values of \a flags include:
+/// \li \c H5F_ACC_TRUNC - Truncate file, if it already exists,
+/// erasing all data previously stored in
+/// the file.
+/// \li \c H5F_ACC_EXCL - Fail if file already exists.
+/// \c H5F_ACC_TRUNC and \c H5F_ACC_EXCL are mutually exclusive
+/// \li \c H5F_ACC_RDONLY - Open file as read-only, if it already
+/// exists, and fail, otherwise
+/// \li \c H5F_ACC_RDWR - Open file for read/write, if it already
+/// exists, and fail, otherwise
///\par
-/// For info on file creation in the case of an already-open file,
-/// please refer to the \b Special \b case section in the C layer
-/// Reference Manual at:
+/// For info on file creation in the case of an already-open file,
+/// please refer to the \b Special \b case section in the C layer
+/// Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5F.html#File-Create
-// Notes With a PGI compiler (~2012-2013), the exception thrown by p_get_file
-// could not be caught in the applications. Added try block here
-// to catch then re-throw it. -BMR 2013/03/21
-// Programmer Binh-Minh Ribler - 2000
+// Notes With a PGI compiler (~2012-2013,) the exception thrown by
+// p_get_file could not be caught in the applications. Added try
+// block here to catch then re-throw it. -BMR 2013/03/21
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5File::H5File( const char* name, unsigned int flags, const FileCreatPropList& create_plist, const FileAccPropList& access_plist ) : Group(), id(H5I_INVALID_HID)
+H5File::H5File(const char* name, unsigned int flags, const FileCreatPropList& create_plist, const FileAccPropList& access_plist) : Group(), id(H5I_INVALID_HID)
{
try {
- p_get_file(name, flags, create_plist, access_plist);
+ p_get_file(name, flags, create_plist, access_plist);
} catch (FileIException& open_file) {
- throw open_file;
+ throw open_file;
}
}
//--------------------------------------------------------------------------
-// Function: H5File overloaded constructor
-///\brief This is another overloaded constructor. It differs from the
-/// above constructor only in the type of the \a name argument.
-///\param name - IN: Name of the file - \c H5std_string
-///\param flags - IN: File access flags
-///\param create_plist - IN: File creation property list, used when
-/// modifying default file meta-data. Default to
-/// FileCreatPropList::DEFAULT
-///\param access_plist - IN: File access property list. Default to
-/// FileAccPropList::DEFAULT
-// Notes With a PGI compiler (~2012-2013), the exception thrown by p_get_file
-// could not be caught in the applications. Added try block here
-// to catch then re-throw it. -BMR 2013/03/21
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-H5File::H5File( const H5std_string& name, unsigned int flags, const FileCreatPropList& create_plist, const FileAccPropList& access_plist ) : Group(), id(H5I_INVALID_HID)
+// Function: H5File overloaded constructor
+///\brief This is another overloaded constructor. It differs from the
+/// above constructor only in the type of the \a name argument.
+///\param name - IN: Name of the file - \c H5std_string
+///\param flags - IN: File access flags
+///\param create_plist - IN: File creation property list, used when
+/// modifying default file meta-data. Default to
+/// FileCreatPropList::DEFAULT
+///\param access_plist - IN: File access property list. Default to
+/// FileAccPropList::DEFAULT
+// Notes With a PGI compiler (~2012-2013,) the exception thrown by
+// p_get_file could not be caught in the applications. Added try
+// block here to catch then re-throw it. -BMR 2013/03/21
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+H5File::H5File(const H5std_string& name, unsigned int flags, const FileCreatPropList& create_plist, const FileAccPropList& access_plist) : Group(), id(H5I_INVALID_HID)
{
try {
- p_get_file(name.c_str(), flags, create_plist, access_plist);
+ p_get_file(name.c_str(), flags, create_plist, access_plist);
} catch (FileIException& open_file) {
- throw open_file;
+ throw open_file;
}
}
@@ -118,49 +118,49 @@ H5File::H5File( const H5std_string& name, unsigned int flags, const FileCreatPro
//--------------------------------------------------------------------------
// This function is private and contains common code between the
// constructors taking a string or a char*
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - removed H5F_ACC_CREAT because H5Fcreate will fail with
-// H5F_ACC_CREAT. - BMR, Sep 17, 2014
+// - removed H5F_ACC_CREAT because H5Fcreate will fail with
+// H5F_ACC_CREAT. - BMR, Sep 17, 2014
//--------------------------------------------------------------------------
void H5File::p_get_file(const char* name, unsigned int flags, const FileCreatPropList& create_plist, const FileAccPropList& access_plist)
{
// These bits only set for creation, so if any of them are set,
// create the file.
- if( flags & (H5F_ACC_EXCL|H5F_ACC_TRUNC))
+ if (flags & (H5F_ACC_EXCL|H5F_ACC_TRUNC))
{
- hid_t create_plist_id = create_plist.getId();
- hid_t access_plist_id = access_plist.getId();
- id = H5Fcreate( name, flags, create_plist_id, access_plist_id );
- if( id < 0 ) // throw an exception when open/create fail
- {
- throw FileIException("H5File constructor", "H5Fcreate failed");
- }
+ hid_t create_plist_id = create_plist.getId();
+ hid_t access_plist_id = access_plist.getId();
+ id = H5Fcreate(name, flags, create_plist_id, access_plist_id);
+ if (id < 0) // throw an exception when open/create fail
+ {
+ throw FileIException("H5File constructor", "H5Fcreate failed");
+ }
}
// Open the file if none of the bits above are set.
else
{
- hid_t access_plist_id = access_plist.getId();
- id = H5Fopen( name, flags, access_plist_id );
- if( id < 0 ) // throw an exception when open/create fail
- {
- throw FileIException("H5File constructor", "H5Fopen failed");
- }
+ hid_t access_plist_id = access_plist.getId();
+ id = H5Fopen(name, flags, access_plist_id);
+ if (id < 0) // throw an exception when open/create fail
+ {
+ throw FileIException("H5File constructor", "H5Fopen failed");
+ }
}
}
//--------------------------------------------------------------------------
-// Function: H5File overloaded constructor
-///\brief Creates an H5File object using an existing file id.
-///\param existing_id - IN: Id of an existing file
-// Programmer Binh-Minh Ribler - 2015
+// Function: H5File overloaded constructor
+///\brief Creates an H5File object using an existing file id.
+///\param existing_id - IN: Id of an existing file
+// Programmer Binh-Minh Ribler - 2015
// Description
-// Mar 29, 2015
-// Added in responding to a request from user Jason Newton.
-// However, it is not recommended to use the private member "id"
-// in applications. Unlike other situations, where similar
-// constructor is needed by the library in order to return
-// an object, H5File doesn't need it. -BMR (HDFFV-8766 partially)
+// Mar 29, 2015
+// Added in responding to a request from user Jason Newton.
+// However, it is not recommended to use the private member "id"
+// in applications. Unlike other situations, where similar
+// constructor is needed by the library in order to return
+// an object, H5File doesn't need it. -BMR (HDFFV-8766 partially)
//--------------------------------------------------------------------------
H5File::H5File(hid_t existing_id) : Group()
{
@@ -171,11 +171,11 @@ H5File::H5File(hid_t existing_id) : Group()
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5File copy constructor
-///\brief Copy constructor: makes a copy of the original
-/// H5File object.
-///\param original - IN: H5File instance to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5File copy constructor
+///\brief Copy constructor: makes a copy of the original
+/// H5File object.
+///\param original - IN: H5File instance to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5File::H5File(const H5File& original) : Group()
{
@@ -184,57 +184,57 @@ H5File::H5File(const H5File& original) : Group()
}
//--------------------------------------------------------------------------
-// Function: H5File::isHdf5 (static)
-///\brief Determines whether a file in HDF5 format. (Static)
-///\param name - IN: Name of the file
-///\return true if the file is in HDF5 format, and false, otherwise
-///\exception H5::FileIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5File::isHdf5 (static)
+///\brief Determines whether a file in HDF5 format. (Static)
+///\param name - IN: Name of the file
+///\return true if the file is in HDF5 format, and false, otherwise
+///\exception H5::FileIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
bool H5File::isHdf5(const char* name)
{
- // Calls C routine H5Fis_hdf5 to determine whether the file is in
- // HDF5 format. It returns positive value, 0, or negative value
- htri_t ret_value = H5Fis_hdf5( name );
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else // Raise exception when H5Fis_hdf5 returns a negative value
- {
- throw FileIException("H5File::isHdf5", "H5Fis_hdf5 returned negative value");
- }
+ // Calls C routine H5Fis_hdf5 to determine whether the file is in
+ // HDF5 format. It returns positive value, 0, or negative value
+ htri_t ret_value = H5Fis_hdf5(name);
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else // Raise exception when H5Fis_hdf5 returns a negative value
+ {
+ throw FileIException("H5File::isHdf5", "H5Fis_hdf5 returned negative value");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5File::isHdf5 (static)
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes an \c H5std_string for \a name. (Static)
-///\param name - IN: Name of the file - \c H5std_string
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5File::isHdf5 (static)
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes an \c H5std_string for \a name. (Static)
+///\param name - IN: Name of the file - \c H5std_string
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-bool H5File::isHdf5(const H5std_string& name )
+bool H5File::isHdf5(const H5std_string& name)
{
- return( isHdf5( name.c_str()) );
+ return(isHdf5( name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: openFile
-///\brief Opens an HDF5 file
-///\param name - IN: Name of the file
-///\param flags - IN: File access flags
-///\param access_plist - IN: File access property list. Default to
-/// FileAccPropList::DEFAULT
+// Function: openFile
+///\brief Opens an HDF5 file
+///\param name - IN: Name of the file
+///\param flags - IN: File access flags
+///\param access_plist - IN: File access property list. Default to
+/// FileAccPropList::DEFAULT
///\par Description
-/// Valid values of \a flags include:
-/// H5F_ACC_RDWR: Open with read/write access. If the file is
-/// currently open for read-only access then it
-/// will be reopened. Absence of this flag
-/// implies read-only access.
+/// Valid values of \a flags include:
+/// H5F_ACC_RDWR: Open with read/write access. If the file is
+/// currently open for read-only access then it
+/// will be reopened. Absence of this flag
+/// implies read-only access.
///
-/// H5F_ACC_RDONLY: Open with read only access. - default
+/// H5F_ACC_RDONLY: Open with read only access. - default
///
-// Programmer Binh-Minh Ribler - Oct, 2005
+// Programmer Binh-Minh Ribler - Oct, 2005
//--------------------------------------------------------------------------
void H5File::openFile(const char* name, unsigned int flags, const FileAccPropList& access_plist)
{
@@ -249,19 +249,19 @@ void H5File::openFile(const char* name, unsigned int flags, const FileAccPropLis
id = H5Fopen (name, flags, access_plist_id);
if (id < 0) // throw an exception when open fails
{
- throw FileIException("H5File::openFile", "H5Fopen failed");
+ throw FileIException("H5File::openFile", "H5Fopen failed");
}
}
//--------------------------------------------------------------------------
-// Function: H5File::openFile
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes an \c H5std_string for \a name.
-///\param name - IN: Name of the file - \c H5std_string
-///\param flags - IN: File access flags
-///\param access_plist - IN: File access property list. Default to
-/// FileAccPropList::DEFAULT
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5File::openFile
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes an \c H5std_string for \a name.
+///\param name - IN: Name of the file - \c H5std_string
+///\param flags - IN: File access flags
+///\param access_plist - IN: File access property list. Default to
+/// FileAccPropList::DEFAULT
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5File::openFile(const H5std_string& name, unsigned int flags, const FileAccPropList& access_plist)
{
@@ -269,21 +269,21 @@ void H5File::openFile(const H5std_string& name, unsigned int flags, const FileAc
}
//--------------------------------------------------------------------------
-// Function: H5File::reOpen
-///\brief Reopens this file.
+// Function: H5File::reOpen
+///\brief Reopens this file.
///
-///\exception H5::FileIException
+///\exception H5::FileIException
// Description
-// If this object has represented another HDF5 file, the previous
-// HDF5 file need to be closed first.
-// Programmer Binh-Minh Ribler - 2000
+// If this object has represented another HDF5 file, the previous
+// HDF5 file need to be closed first.
+// Programmer Binh-Minh Ribler - 2000
// Note: This wrapper doesn't seem right regarding the 'id' and should
// be investigated. BMR - 2/20/2005
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Feb 20, 2005
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Feb 20, 2005
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
void H5File::reOpen()
{
@@ -294,129 +294,148 @@ void H5File::reOpen()
throw FileIException("H5File::reOpen", close_error.getDetailMsg());
}
- // call C routine to reopen the file - Note: not sure about this,
- // which id to be the parameter when closing?
- id = H5Freopen( id );
- if( id < 0 ) // Raise exception when H5Freopen returns a neg value
- throw FileIException("H5File::reOpen", "H5Freopen failed");
+ // call C routine to reopen the file - Note: not sure about this,
+ // which id to be the parameter when closing?
+ id = H5Freopen(id);
+ if (id < 0) // Raise exception when H5Freopen returns a neg value
+ throw FileIException("H5File::reOpen", "H5Freopen failed");
}
//--------------------------------------------------------------------------
-// Function: H5File::getCreatePlist
-///\brief Returns the creation property list of this file
-///\return FileCreatPropList object
-///\exception H5::FileIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5File::getCreatePlist
+///\brief Returns the creation property list of this file
+///\return FileCreatPropList object
+///\exception H5::FileIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FileCreatPropList H5File::getCreatePlist() const
{
- hid_t create_plist_id = H5Fget_create_plist( id );
-
- // if H5Fget_create_plist returns a valid id, create and return
- // the FileCreatPropList object for this property list
- if( create_plist_id > 0 )
- {
- FileCreatPropList create_plist( create_plist_id );
- return( create_plist );
- }
- else
- {
- throw FileIException("H5File::getCreatePlist", "H5Fget_create_plist failed");
- }
+ hid_t create_plist_id = H5Fget_create_plist(id);
+
+ // if H5Fget_create_plist returns a valid id, create and return
+ // the FileCreatPropList object for this property list
+ if (create_plist_id > 0)
+ {
+ FileCreatPropList create_plist(create_plist_id);
+ return(create_plist);
+ }
+ else
+ {
+ throw FileIException("H5File::getCreatePlist", "H5Fget_create_plist failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5File::getAccessPlist
-///\brief Returns the access property list of this file
-///\return FileAccPropList object
-///\exception H5::FileIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5File::getAccessPlist
+///\brief Returns the access property list of this file
+///\return FileAccPropList object
+///\exception H5::FileIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FileAccPropList H5File::getAccessPlist() const
{
- hid_t access_plist_id = H5Fget_access_plist( id );
+ hid_t access_plist_id = H5Fget_access_plist(id);
- // if H5Fget_access_plist returns a valid id, create and return
- // the FileAccPropList object for this property list
- if( access_plist_id > 0 )
- {
- FileAccPropList access_plist( access_plist_id );
+ // if H5Fget_access_plist returns a valid id, create and return
+ // the FileAccPropList object for this property list
+ if (access_plist_id > 0)
+ {
+ FileAccPropList access_plist(access_plist_id);
return access_plist;
- }
- else // Raise an exception
- {
- throw FileIException("H5File::getAccessPlist", "H5Fget_access_plist failed");
- }
+ }
+ else // Raise an exception
+ {
+ throw FileIException("H5File::getAccessPlist", "H5Fget_access_plist failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: H5File::getFileInfo
+///\brief Retrieves the general information of this file.
+///
+///\exception H5::FileIException
+///\par Description
+/// The retrieved information may include information about
+/// superblock extension, free space management, and shared object
+// Programmer Binh-Minh Ribler - February 2017
+//--------------------------------------------------------------------------
+void H5File::getFileInfo(H5F_info2_t& file_info) const
+{
+ herr_t ret_value = H5Fget_info2(id, &file_info);
+ if (ret_value < 0)
+ {
+ throw FileIException("H5File::getFileInfo", "H5Fget_info2 failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5File::getFreeSpace
-///\brief Returns the amount of free space in the file.
-///\return Amount of free space
-///\exception H5::FileIException
+// Function: H5File::getFreeSpace
+///\brief Returns the amount of free space in the file.
+///\return Amount of free space
+///\exception H5::FileIException
// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
hssize_t H5File::getFreeSpace() const
{
- hssize_t free_space = H5Fget_freespace(id);
- if( free_space < 0 )
- {
- throw FileIException("H5File::getFreeSpace", "H5Fget_freespace failed");
- }
- return (free_space);
+ hssize_t free_space = H5Fget_freespace(id);
+ if (free_space < 0)
+ {
+ throw FileIException("H5File::getFreeSpace", "H5Fget_freespace failed");
+ }
+ return (free_space);
}
//--------------------------------------------------------------------------
-// Function: H5File::getObjCount
-///\brief Returns the number of opened object IDs (files, datasets,
-/// groups and datatypes) in the same file.
-///\param types - Type of object to retrieve the count
-///\return Number of opened object IDs
-///\exception H5::FileIException
+// Function: H5File::getObjCount
+///\brief Returns the number of opened object IDs (files, datasets,
+/// groups and datatypes) in the same file.
+///\param types - Type of object to retrieve the count
+///\return Number of opened object IDs
+///\exception H5::FileIException
///\par Description
-/// The valid values for \a types include:
-/// \li \c H5F_OBJ_FILE - Files only
-/// \li \c H5F_OBJ_DATASET - Datasets only
-/// \li \c H5F_OBJ_GROUP - Groups only
-/// \li \c H5F_OBJ_DATATYPE - Named datatypes only
-/// \li \c H5F_OBJ_ATTR - Attributes only
-/// \li \c H5F_OBJ_ALL - All of the above, i.e., \c H5F_OBJ_FILE
-/// | \c H5F_OBJ_DATASET | \c H5F_OBJ_GROUP
-/// | \c H5F_OBJ_DATATYPE | \c H5F_OBJ_ATTR
+/// The valid values for \a types include:
+/// \li \c H5F_OBJ_FILE - Files only
+/// \li \c H5F_OBJ_DATASET - Datasets only
+/// \li \c H5F_OBJ_GROUP - Groups only
+/// \li \c H5F_OBJ_DATATYPE - Named datatypes only
+/// \li \c H5F_OBJ_ATTR - Attributes only
+/// \li \c H5F_OBJ_ALL - All of the above, i.e., \c H5F_OBJ_FILE
+/// | \c H5F_OBJ_DATASET | \c H5F_OBJ_GROUP
+/// | \c H5F_OBJ_DATATYPE | \c H5F_OBJ_ATTR
///\par
/// Multiple object types can be combined with the logical OR operator (|).
// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
ssize_t H5File::getObjCount(unsigned types) const
{
- ssize_t num_objs = H5Fget_obj_count(id, types);
- if( num_objs < 0 )
- {
- throw FileIException("H5File::getObjCount", "H5Fget_obj_count failed");
- }
- return (num_objs);
+ ssize_t num_objs = H5Fget_obj_count(id, types);
+ if (num_objs < 0)
+ {
+ throw FileIException("H5File::getObjCount", "H5Fget_obj_count failed");
+ }
+ return (num_objs);
}
//--------------------------------------------------------------------------
-// Function: H5File::getObjIDs
-///\brief Retrieves a list of opened object IDs (files, datasets,
-/// groups and datatypes) in the same file.
-///\param types - Type of object to retrieve the count
-///\param max_objs - Maximum number of object identifiers to place
-/// into obj_id_list.
-///\param oid_list - List of open object identifiers
-///\exception H5::FileIException
+// Function: H5File::getObjIDs
+///\brief Retrieves a list of opened object IDs (files, datasets,
+/// groups and datatypes) in the same file.
+///\param types - Type of object to retrieve the count
+///\param max_objs - Maximum number of object identifiers to place
+/// into obj_id_list.
+///\param oid_list - List of open object identifiers
+///\exception H5::FileIException
///\par Description
-/// The valid values for \a types include:
-/// \li \c H5F_OBJ_FILE - Files only
-/// \li \c H5F_OBJ_DATASET - Datasets only
-/// \li \c H5F_OBJ_GROUP - Groups only
-/// \li \c H5F_OBJ_DATATYPE - Named datatypes only
-/// \li \c H5F_OBJ_ATTR - Attributes only
-/// \li \c H5F_OBJ_ALL - All of the above, i.e., \c H5F_OBJ_FILE
-/// | \c H5F_OBJ_DATASET | \c H5F_OBJ_GROUP
-/// | \c H5F_OBJ_DATATYPE | \c H5F_OBJ_ATTR
+/// The valid values for \a types include:
+/// \li \c H5F_OBJ_FILE - Files only
+/// \li \c H5F_OBJ_DATASET - Datasets only
+/// \li \c H5F_OBJ_GROUP - Groups only
+/// \li \c H5F_OBJ_DATATYPE - Named datatypes only
+/// \li \c H5F_OBJ_ATTR - Attributes only
+/// \li \c H5F_OBJ_ALL - All of the above, i.e., \c H5F_OBJ_FILE
+/// | \c H5F_OBJ_DATASET | \c H5F_OBJ_GROUP
+/// | \c H5F_OBJ_DATATYPE | \c H5F_OBJ_ATTR
///\par
/// Multiple object types can be combined with the logical OR operator (|).
//
@@ -425,56 +444,56 @@ ssize_t H5File::getObjCount(unsigned types) const
//--------------------------------------------------------------------------
void H5File::getObjIDs(unsigned types, size_t max_objs, hid_t *oid_list) const
{
- ssize_t ret_value = H5Fget_obj_ids(id, types, max_objs, oid_list);
- if( ret_value < 0 )
- {
- throw FileIException("H5File::getObjIDs", "H5Fget_obj_ids failed");
- }
+ ssize_t ret_value = H5Fget_obj_ids(id, types, max_objs, oid_list);
+ if (ret_value < 0)
+ {
+ throw FileIException("H5File::getObjIDs", "H5Fget_obj_ids failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5File::getVFDHandle
-///\brief Returns the pointer to the file handle of the low-level file
-/// driver.
-///\param fapl - File access property list
-///\param file_handle - Pointer to the file handle being used by
-/// the low-level virtual file driver
-///\exception H5::FileIException
+// Function: H5File::getVFDHandle
+///\brief Returns the pointer to the file handle of the low-level file
+/// driver.
+///\param fapl - File access property list
+///\param file_handle - Pointer to the file handle being used by
+/// the low-level virtual file driver
+///\exception H5::FileIException
///\par Description
-/// For the \c FAMILY or \c MULTI drivers, \a fapl should be
-/// defined through the property list functions:
-/// \c FileAccPropList::setFamilyOffset for the \c FAMILY driver
-/// and \c FileAccPropList::setMultiType for the \c MULTI driver.
+/// For the \c FAMILY or \c MULTI drivers, \a fapl should be
+/// defined through the property list functions:
+/// \c FileAccPropList::setFamilyOffset for the \c FAMILY driver
+/// and \c FileAccPropList::setMultiType for the \c MULTI driver.
///
-/// The obtained file handle is dynamic and is valid only while
-/// the file remains open; it will be invalid if the file is
-/// closed and reopened or opened during a subsequent session.
+/// The obtained file handle is dynamic and is valid only while
+/// the file remains open; it will be invalid if the file is
+/// closed and reopened or opened during a subsequent session.
// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
void H5File::getVFDHandle(const FileAccPropList& fapl, void **file_handle) const
{
- hid_t fapl_id = fapl.getId();
- herr_t ret_value = H5Fget_vfd_handle(id, fapl_id, file_handle);
- if( ret_value < 0 )
- {
- throw FileIException("H5File::getVFDHandle", "H5Fget_vfd_handle failed");
- }
+ hid_t fapl_id = fapl.getId();
+ herr_t ret_value = H5Fget_vfd_handle(id, fapl_id, file_handle);
+ if (ret_value < 0)
+ {
+ throw FileIException("H5File::getVFDHandle", "H5Fget_vfd_handle failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5File::getVFDHandle
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const's. This wrapper will be removed in future release.
-// Param fapl - File access property list
-// Param file_handle - Pointer to the file handle being used by
-// the low-level virtual file driver
-// Exception H5::FileIException
+// Function: H5File::getVFDHandle
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const's. This wrapper will be removed in future release.
+// Param fapl - File access property list
+// Param file_handle - Pointer to the file handle being used by
+// the low-level virtual file driver
+// Exception H5::FileIException
// Programmer Binh-Minh Ribler - May 2004
// Modification
-// Planned for removal. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Planned for removal. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
//void H5File::getVFDHandle(FileAccPropList& fapl, void **file_handle) const
//{
@@ -482,103 +501,104 @@ void H5File::getVFDHandle(const FileAccPropList& fapl, void **file_handle) const
//}
//--------------------------------------------------------------------------
-// Function: H5File::getVFDHandle
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param file_handle - Pointer to the file handle being used by
-/// the low-level virtual file driver
-///\exception H5::FileIException
+// Function: H5File::getVFDHandle
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param file_handle - Pointer to the file handle being used by
+/// the low-level virtual file driver
+///\exception H5::FileIException
// Programmer Binh-Minh Ribler - May 2004
//--------------------------------------------------------------------------
void H5File::getVFDHandle(void **file_handle) const
{
- herr_t ret_value = H5Fget_vfd_handle(id, H5P_DEFAULT, file_handle);
- if( ret_value < 0 )
- {
- throw FileIException("H5File::getVFDHandle", "H5Fget_vfd_handle failed");
- }
+ herr_t ret_value = H5Fget_vfd_handle(id, H5P_DEFAULT, file_handle);
+ if (ret_value < 0)
+ {
+ throw FileIException("H5File::getVFDHandle", "H5Fget_vfd_handle failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5File::getFileSize
-///\brief Returns the file size of the HDF5 file.
-///\return File size
-///\exception H5::FileIException
+// Function: H5File::getFileSize
+///\brief Returns the file size of the HDF5 file.
+///\return File size
+///\exception H5::FileIException
///\par Description
-/// This function is called after an existing file is opened in
-/// order to learn the true size of the underlying file.
+/// This function is called after an existing file is opened in
+/// order to learn the true size of the underlying file.
// Programmer Raymond Lu - June 24, 2004
//--------------------------------------------------------------------------
hsize_t H5File::getFileSize() const
{
- hsize_t file_size;
- herr_t ret_value = H5Fget_filesize(id, &file_size);
- if (ret_value < 0)
- {
- throw FileIException("H5File::getFileSize", "H5Fget_filesize failed");
- }
- return (file_size);
+ hsize_t file_size;
+ herr_t ret_value = H5Fget_filesize(id, &file_size);
+ if (ret_value < 0)
+ {
+ throw FileIException("H5File::getFileSize", "H5Fget_filesize failed");
+ }
+ return (file_size);
}
//--------------------------------------------------------------------------
-// Function: H5File::getId
-///\brief Get the id of this file
-///\return File identifier
-// Modification:
-// May 2008 - BMR
-// Class hierarchy is revised to address bugzilla 1068. Class
-// AbstractDS and Attribute are moved out of H5Object. In
-// addition, member IdComponent::id is moved into subclasses, and
-// IdComponent::getId now becomes pure virtual function.
-// Programmer Binh-Minh Ribler - May, 2008
+// Function: H5File::getId
+///\brief Get the id of this file
+///\return File identifier
+// Modification
+// May 2008 - BMR
+// Class hierarchy is revised to address bugzilla 1068. Class
+// AbstractDS and Attribute are moved out of H5Object. In
+// addition, member IdComponent::id is moved into subclasses, and
+// IdComponent::getId now becomes pure virtual function.
+// Programmer Binh-Minh Ribler - May, 2008
//--------------------------------------------------------------------------
hid_t H5File::getId() const
{
- return(id);
+ return(id);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5File::reopen
-// Purpose: Reopens this file.
-// Exception H5::FileIException
+// Function: H5File::reopen
+// Purpose Reopens this file.
+// Exception H5::FileIException
// Description
-// This function is replaced by the above function reOpen.
-// Programmer Binh-Minh Ribler - 2000
+// This function is replaced by the above function reOpen.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5File::reopen()
{
- H5File::reOpen();
+ H5File::reOpen();
}
//--------------------------------------------------------------------------
-// Function: H5File::getLocId
-// Purpose: Get the id of this file
+// Function: H5File::getLocId
+// Purpose Get the id of this file
// Description
-// This function is a redefinition of CommonFG::getLocId. It
-// is used by CommonFG member functions to get the file id.
-// Programmer Binh-Minh Ribler - 2000
+// This function is a redefinition of CommonFG::getLocId. It
+// is used by CommonFG member functions to get the file id.
+// Programmer Binh-Minh Ribler - 2000
// Deprecated:
-// After HDFFV-9920, the Group's methods can use getId() and getLocId()
-// is kept for backward compatibility. Aug 18, 2016 -BMR
+// Aug 18, 2016 -BMR
+// After HDFFV-9920, the Group's methods can use getId() and
+// getLocId() is kept for backward compatibility.
//--------------------------------------------------------------------------
hid_t H5File::getLocId() const
{
- return( getId() );
+ return(getId());
}
//--------------------------------------------------------------------------
-// Function: H5File::p_setId (protected)
-///\brief Sets the identifier of this object to a new value.
+// Function: H5File::p_setId (protected)
+///\brief Sets the identifier of this object to a new value.
///
-///\exception H5::IdComponentException when the attempt to close the HDF5
-/// object fails
-// Description:
-// The underlaying reference counting in the C library ensures
-// that the current valid id of this object is properly closed.
-// Then the object's id is reset to the new id.
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::IdComponentException when the attempt to close the HDF5
+/// object fails
+// Description
+// The underlaying reference counting in the C library ensures
+// that the current valid id of this object is properly closed.
+// Then the object's id is reset to the new id.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5File::p_setId(const hid_t new_id)
{
@@ -589,68 +609,68 @@ void H5File::p_setId(const hid_t new_id)
catch (Exception& E) {
throw FileIException("H5File::p_setId", E.getDetailMsg());
}
- // reset object's id to the given id
- id = new_id;
+ // reset object's id to the given id
+ id = new_id;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5File::close
-///\brief Closes this HDF5 file.
+// Function: H5File::close
+///\brief Closes this HDF5 file.
///
-///\exception H5::FileIException
-// Programmer Binh-Minh Ribler - Mar 9, 2005
+///\exception H5::FileIException
+// Programmer Binh-Minh Ribler - Mar 9, 2005
//--------------------------------------------------------------------------
void H5File::close()
{
if (p_valid_id(id))
{
- herr_t ret_value = H5Fclose( id );
- if( ret_value < 0 )
- {
- throw FileIException("H5File::close", "H5Fclose failed");
- }
- // reset the id
- id = H5I_INVALID_HID;
+ herr_t ret_value = H5Fclose(id);
+ if (ret_value < 0)
+ {
+ throw FileIException("H5File::close", "H5Fclose failed");
+ }
+ // reset the id
+ id = H5I_INVALID_HID;
}
}
//--------------------------------------------------------------------------
-// Function: H5File::throwException
-///\brief Throws file exception - initially implemented for CommonFG
-///\param func_name - Name of the function where failure occurs
-///\param msg - Message describing the failure
-///\exception H5::FileIException
+// Function: H5File::throwException
+///\brief Throws file exception - initially implemented for CommonFG
+///\param func_name - Name of the function where failure occurs
+///\param msg - Message describing the failure
+///\exception H5::FileIException
// Description
-// This function is also used in H5Location implementation so that
-// proper exception can be thrown for file or group. The
-// "H5File::" will be inserted to indicate the function called is
-// an implementation of H5File.
-// Programmer Binh-Minh Ribler - 2000
+// This function is also used in H5Location implementation so that
+// proper exception can be thrown for file or group. The
+// "H5File::" will be inserted to indicate the function called is
+// an implementation of H5File.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5File::throwException(const H5std_string& func_name, const H5std_string& msg) const
{
- H5std_string full_name = func_name;
- full_name.insert(0, "H5File::");
- throw FileIException(full_name, msg);
+ H5std_string full_name = func_name;
+ full_name.insert(0, "H5File::");
+ throw FileIException(full_name, msg);
}
//--------------------------------------------------------------------------
-// Function: H5File destructor
-///\brief Properly terminates access to this file.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5File destructor
+///\brief Properly terminates access to this file.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Feb 20, 2005
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Feb 20, 2005
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
H5File::~H5File()
{
try {
- close();
+ close();
} catch (Exception& close_error) {
- cerr << "H5File::~H5File - " << close_error.getDetailMsg() << endl;
+ cerr << "H5File::~H5File - " << close_error.getDetailMsg() << endl;
}
}
diff --git a/c++/src/H5File.h b/c++/src/H5File.h
index dca6c67..b428a40 100644
--- a/c++/src/H5File.h
+++ b/c++/src/H5File.h
@@ -6,18 +6,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5File_H
#define __H5File_H
-
namespace H5 {
/*! \class H5File
@@ -28,99 +25,103 @@ namespace H5 {
*/
class H5_DLLCPP H5File : public Group {
public:
- // Creates or opens an HDF5 file.
- H5File( const char* name, unsigned int flags,
- const FileCreatPropList& create_plist = FileCreatPropList::DEFAULT,
- const FileAccPropList& access_plist = FileAccPropList::DEFAULT );
- H5File( const H5std_string& name, unsigned int flags,
- const FileCreatPropList& create_plist = FileCreatPropList::DEFAULT,
- const FileAccPropList& access_plist = FileAccPropList::DEFAULT );
+ // Creates or opens an HDF5 file.
+ H5File(const char* name, unsigned int flags,
+ const FileCreatPropList& create_plist = FileCreatPropList::DEFAULT,
+ const FileAccPropList& access_plist = FileAccPropList::DEFAULT);
+ H5File(const H5std_string& name, unsigned int flags,
+ const FileCreatPropList& create_plist = FileCreatPropList::DEFAULT,
+ const FileAccPropList& access_plist = FileAccPropList::DEFAULT);
+
+ // Open the file
+ void openFile(const H5std_string& name, unsigned int flags,
+ const FileAccPropList& access_plist = FileAccPropList::DEFAULT);
+ void openFile(const char* name, unsigned int flags,
+ const FileAccPropList& access_plist = FileAccPropList::DEFAULT);
- // Open the file
- void openFile(const H5std_string& name, unsigned int flags,
- const FileAccPropList& access_plist = FileAccPropList::DEFAULT);
- void openFile(const char* name, unsigned int flags,
- const FileAccPropList& access_plist = FileAccPropList::DEFAULT);
+ // Close this file.
+ virtual void close();
- // Close this file.
- virtual void close();
+ // Gets the access property list of this file.
+ FileAccPropList getAccessPlist() const;
- // Gets the access property list of this file.
- FileAccPropList getAccessPlist() const;
+ // Gets the creation property list of this file.
+ FileCreatPropList getCreatePlist() const;
- // Gets the creation property list of this file.
- FileCreatPropList getCreatePlist() const;
+ // Gets general information about this file.
+ void getFileInfo(H5F_info2_t& file_info) const;
- // Retrieves the file size of an opened file.
- hsize_t getFileSize() const;
+ // Returns the amount of free space in the file.
+ hssize_t getFreeSpace() const;
- // Returns the amount of free space in the file.
- hssize_t getFreeSpace() const;
+ // Returns the number of opened object IDs (files, datasets, groups
+ // and datatypes) in the same file.
+ ssize_t getObjCount(unsigned types = H5F_OBJ_ALL) const;
- // Returns the number of opened object IDs (files, datasets, groups
- // and datatypes) in the same file.
- ssize_t getObjCount(unsigned types = H5F_OBJ_ALL) const;
+ // Retrieves a list of opened object IDs (files, datasets, groups
+ // and datatypes) in the same file.
+ void getObjIDs(unsigned types, size_t max_objs, hid_t *oid_list) const;
- // Retrieves a list of opened object IDs (files, datasets, groups
- // and datatypes) in the same file.
- void getObjIDs(unsigned types, size_t max_objs, hid_t *oid_list) const;
+ // Returns the pointer to the file handle of the low-level file driver.
+ void getVFDHandle(void **file_handle) const;
+ void getVFDHandle(const FileAccPropList& fapl, void **file_handle) const;
+ //void getVFDHandle(FileAccPropList& fapl, void **file_handle) const; // removed from 1.8.18 and 1.10.1
- // Returns the pointer to the file handle of the low-level file driver.
- void getVFDHandle(void **file_handle) const;
- void getVFDHandle(const FileAccPropList& fapl, void **file_handle) const;
- //void getVFDHandle(FileAccPropList& fapl, void **file_handle) const; // removed from 1.8.18 and 1.10.1
+ // Returns the file size of the HDF5 file.
+ hsize_t getFileSize() const;
- // Determines if a file, specified by its name, is in HDF5 format
- static bool isHdf5(const char* name );
- static bool isHdf5(const H5std_string& name );
+ // Determines if a file, specified by its name, is in HDF5 format
+ static bool isHdf5(const char* name);
+ static bool isHdf5(const H5std_string& name);
- // Reopens this file.
- void reOpen(); // added for better name
+ // Reopens this file.
+ void reOpen(); // added for better name
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- void reopen(); // obsolete in favor of reOpen()
+ void reopen(); // obsolete in favor of reOpen()
- // Creates an H5File using an existing file id. Not recommended
- // in applications.
- H5File(hid_t existing_id);
+ // Creates an H5File using an existing file id. Not recommended
+ // in applications.
+ H5File(hid_t existing_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("H5File"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("H5File"); }
- // Throw file exception.
- virtual void throwException(const H5std_string& func_name, const H5std_string& msg) const;
+ // Throw file exception.
+ virtual void throwException(const H5std_string& func_name, const H5std_string& msg) const;
- // for CommonFG to get the file id.
- virtual hid_t getLocId() const;
+ // for CommonFG to get the file id.
+ virtual hid_t getLocId() const;
- // Default constructor
- H5File();
+ // Default constructor
+ H5File();
- // Copy constructor: makes a copy of the original H5File object.
- H5File(const H5File& original);
+ // Copy constructor: makes a copy of the original H5File object.
+ H5File(const H5File& original);
- // Gets the HDF5 file id.
- virtual hid_t getId() const;
+ // Gets the HDF5 file id.
+ virtual hid_t getId() const;
- // H5File destructor.
- virtual ~H5File();
+ // H5File destructor.
+ virtual ~H5File();
protected:
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Sets the HDF5 file id.
- virtual void p_setId(const hid_t new_id);
+ // Sets the HDF5 file id.
+ virtual void p_setId(const hid_t new_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
private:
- hid_t id; // HDF5 file id
+ hid_t id; // HDF5 file id
+
+ // This function is private and contains common code between the
+ // constructors taking a string or a char*
+ void p_get_file(const char* name, unsigned int flags, const FileCreatPropList& create_plist, const FileAccPropList& access_plist);
- // This function is private and contains common code between the
- // constructors taking a string or a char*
- void p_get_file( const char* name, unsigned int flags, const FileCreatPropList& create_plist, const FileAccPropList& access_plist );
+}; // end of H5File
+} // namespace H5
-};
-}
#endif // __H5File_H
diff --git a/c++/src/H5FloatType.cpp b/c++/src/H5FloatType.cpp
index 9ce06f7..6bb3fd6 100644
--- a/c++/src/H5FloatType.cpp
+++ b/c++/src/H5FloatType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -22,6 +20,7 @@
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -35,251 +34,289 @@
namespace H5 {
//--------------------------------------------------------------------------
-// Function: FloatType default constructor
-///\brief Default constructor: Creates a stub floating-point datatype
-// Programmer Binh-Minh Ribler - 2000
+// Function: FloatType default constructor
+///\brief Default constructor: Creates a stub floating-point datatype
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FloatType::FloatType() {}
//--------------------------------------------------------------------------
-// Function: FloatType overloaded constructor
-///\brief Creates a floating-point datatype using a predefined type.
-///\param pred_type - IN: Predefined datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: FloatType overloaded constructor
+///\brief Creates a floating-point datatype using a predefined type.
+///\param pred_type - IN: Predefined datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-FloatType::FloatType( const PredType& pred_type ) : AtomType()
+FloatType::FloatType(const PredType& pred_type) : AtomType()
{
- // use DataType::copy to make a copy of this predefined type
- copy( pred_type );
+ // use DataType::copy to make a copy of this predefined type
+ copy(pred_type);
}
//--------------------------------------------------------------------------
-// Function: FloatType overloaded constructor
-///\brief Creates an FloatType object using the id of an existing
-/// datatype.
-///\param existing_id - IN: Id of an existing datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: FloatType overloaded constructor
+///\brief Creates an FloatType object using the id of an existing
+/// datatype.
+///\param existing_id - IN: Id of an existing datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-FloatType::FloatType( const hid_t existing_id ) : AtomType( existing_id ) {}
+FloatType::FloatType(const hid_t existing_id) : AtomType( existing_id ) {}
//--------------------------------------------------------------------------
-// Function: FloatType copy constructor
-///\brief Copy constructor: makes a copy of the original FloatType object.
-// Programmer Binh-Minh Ribler - 2000
+// Function: FloatType copy constructor
+///\brief Copy constructor: makes a copy of the original FloatType object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-FloatType::FloatType( const FloatType& original ) : AtomType( original ){}
+FloatType::FloatType(const FloatType& original) : AtomType( original ){}
//--------------------------------------------------------------------------
-// Function: EnumType overloaded constructor
-///\brief Gets the floating-point datatype of the specified dataset
-///\param dataset - IN: Dataset that this floating-point datatype
-/// associates with
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: EnumType overloaded constructor
+///\brief Gets the floating-point datatype of the specified dataset
+///\param dataset - IN: Dataset that this floating-point datatype
+/// associates with
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-FloatType::FloatType( const DataSet& dataset ) : AtomType()
+FloatType::FloatType(const DataSet& dataset) : AtomType()
{
- // Calls C function H5Dget_type to get the id of the datatype
- id = H5Dget_type( dataset.getId() );
+ // Calls C function H5Dget_type to get the id of the datatype
+ id = H5Dget_type(dataset.getId());
- if( id < 0 )
- {
- throw DataSetIException("FloatType constructor", "H5Dget_type failed");
- }
+ if (id < 0)
+ {
+ throw DataSetIException("FloatType constructor", "H5Dget_type failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FloatType::getFields
-///\brief Retrieves floating point datatype bit field information.
-///\param spos - OUT: Retrieved floating-point sign bit
-///\param epos - OUT: Retrieved exponent bit-position
-///\param esize - OUT: Retrieved size of exponent, in bits
-///\param mpos - OUT: Retrieved mantissa bit-position
-///\param msize - OUT: Retrieved size of mantissa, in bits
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void FloatType::getFields( size_t& spos, size_t& epos, size_t& esize, size_t& mpos, size_t& msize ) const
+// Function: FloatType overloaded constructor
+///\brief Creates an FloatType instance by opening an HDF5 float datatype
+/// given its name, provided as a C character string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Float type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openFloatType(const char*)
+// to improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+FloatType::FloatType(const H5Location& loc, const char *dtype_name) : AtomType()
+{
+ id = p_opentype(loc, dtype_name);
+}
+
+//--------------------------------------------------------------------------
+// Function: FloatType overloaded constructor
+///\brief Creates an FloatType instance by opening an HDF5 float datatype
+/// given its name, provided as an \c H5std_string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Float type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openFloatType(const H5std_string&)
+// to improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+FloatType::FloatType(const H5Location& loc, const H5std_string& dtype_name) : AtomType()
{
- herr_t ret_value = H5Tget_fields( id, &spos, &epos, &esize, &mpos, &msize );
- if( ret_value < 0 )
- {
- throw DataTypeIException("FloatType::getFields", "H5Tget_fields failed");
- }
+ id = p_opentype(loc, dtype_name.c_str());
}
//--------------------------------------------------------------------------
-// Function: FloatType::setFields
-///\brief Sets locations and sizes of floating point bit fields.
-///\param spos - OUT: Sign position, i.e., the bit offset of the
-/// floating-point sign bit.
-///\param epos - OUT: Exponent bit position
-///\param esize - OUT: Size of exponent, in bits
-///\param mpos - OUT: Mantissa bit-position
-///\param msize - OUT: Size of mantissa, in bits
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void FloatType::setFields( size_t spos, size_t epos, size_t esize, size_t mpos, size_t msize ) const
+// Function: FloatType::getFields
+///\brief Retrieves floating point datatype bit field information.
+///\param spos - OUT: Retrieved floating-point sign bit
+///\param epos - OUT: Retrieved exponent bit-position
+///\param esize - OUT: Retrieved size of exponent, in bits
+///\param mpos - OUT: Retrieved mantissa bit-position
+///\param msize - OUT: Retrieved size of mantissa, in bits
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void FloatType::getFields(size_t& spos, size_t& epos, size_t& esize, size_t& mpos, size_t& msize) const
+{
+ herr_t ret_value = H5Tget_fields(id, &spos, &epos, &esize, &mpos, &msize);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("FloatType::getFields", "H5Tget_fields failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: FloatType::setFields
+///\brief Sets locations and sizes of floating point bit fields.
+///\param spos - OUT: Sign position, i.e., the bit offset of the
+/// floating-point sign bit.
+///\param epos - OUT: Exponent bit position
+///\param esize - OUT: Size of exponent, in bits
+///\param mpos - OUT: Mantissa bit-position
+///\param msize - OUT: Size of mantissa, in bits
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void FloatType::setFields(size_t spos, size_t epos, size_t esize, size_t mpos, size_t msize) const
{
- herr_t ret_value = H5Tset_fields( id, spos, epos, esize, mpos, msize );
- if( ret_value < 0 )
- {
- throw DataTypeIException("FloatType::setFields", "H5Tset_fields failed");
- }
+ herr_t ret_value = H5Tset_fields(id, spos, epos, esize, mpos, msize);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("FloatType::setFields", "H5Tset_fields failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FloatType::getEbias
-///\brief Retrieves the exponent bias of a floating-point type.
-///\return Exponent bias
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: FloatType::getEbias
+///\brief Retrieves the exponent bias of a floating-point type.
+///\return Exponent bias
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
size_t FloatType::getEbias() const
{
- size_t ebias = H5Tget_ebias( id );
- // Returns the bias if successful
- if( ebias == 0 )
- {
- throw DataTypeIException("FloatType::getEbias", "H5Tget_ebias failed - returned exponent bias as 0");
- }
- return( ebias );
+ size_t ebias = H5Tget_ebias(id);
+ // Returns the bias if successful
+ if (ebias == 0)
+ {
+ throw DataTypeIException("FloatType::getEbias", "H5Tget_ebias failed - returned exponent bias as 0");
+ }
+ return(ebias);
}
//--------------------------------------------------------------------------
-// Function: FloatType::setEbias
-///\brief Sets the exponent bias of a floating-point type.
-///\param ebias - Exponent bias value
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: FloatType::setEbias
+///\brief Sets the exponent bias of a floating-point type.
+///\param ebias - Exponent bias value
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FloatType::setEbias( size_t ebias ) const
+void FloatType::setEbias(size_t ebias) const
{
- herr_t ret_value = H5Tset_ebias( id, ebias );
- if( ret_value < 0 )
- {
- throw DataTypeIException("FloatType::setEbias", "H5Tset_ebias failed");
- }
+ herr_t ret_value = H5Tset_ebias(id, ebias);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("FloatType::setEbias", "H5Tset_ebias failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FloatType::getNorm
-///\brief Retrieves mantissa normalization of a floating-point datatype.
-///\param norm_string - OUT: Text string of the normalization type
-///\return Valid normalization type, which can be:
-/// \li \c H5T_NORM_IMPLIED (0) - MSB of mantissa is not stored
-/// \li \c H5T_NORM_MSBSET (1) - MSB of mantissa is always 1
-/// \li \c H5T_NORM_NONE (2) - Mantissa is not normalized
-///\exception H5::DataTypeIException
+// Function: FloatType::getNorm
+///\brief Retrieves mantissa normalization of a floating-point datatype.
+///\param norm_string - OUT: Text string of the normalization type
+///\return Valid normalization type, which can be:
+/// \li \c H5T_NORM_IMPLIED (0) - MSB of mantissa is not stored
+/// \li \c H5T_NORM_MSBSET (1) - MSB of mantissa is always 1
+/// \li \c H5T_NORM_NONE (2) - Mantissa is not normalized
+///\exception H5::DataTypeIException
///\par Description
-/// For your convenience, this function also provides the text
-/// string of the returned normalization type, via parameter
-/// \a norm_string.
-// Programmer Binh-Minh Ribler - 2000
+/// For your convenience, this function also provides the text
+/// string of the returned normalization type, via parameter
+/// \a norm_string.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5T_norm_t FloatType::getNorm( H5std_string& norm_string ) const
+H5T_norm_t FloatType::getNorm(H5std_string& norm_string) const
{
- H5T_norm_t norm = H5Tget_norm( id ); // C routine
- // Returns a valid normalization type if successful
- if( norm == H5T_NORM_ERROR )
- {
- throw DataTypeIException("FloatType::getNorm", "H5Tget_norm failed - returned H5T_NORM_ERROR");
- }
- if( norm == H5T_NORM_IMPLIED )
- norm_string = "H5T_NORM_IMPLIED (0)";
- else if( norm == H5T_NORM_MSBSET )
- norm_string = "H5T_NORM_MSBSET (1)";
- else if( norm == H5T_NORM_NONE )
- norm_string = "H5T_NORM_NONE (2)";
- return( norm );
+ H5T_norm_t norm = H5Tget_norm(id); // C routine
+ // Returns a valid normalization type if successful
+ if (norm == H5T_NORM_ERROR)
+ {
+ throw DataTypeIException("FloatType::getNorm", "H5Tget_norm failed - returned H5T_NORM_ERROR");
+ }
+ if (norm == H5T_NORM_IMPLIED)
+ norm_string = "H5T_NORM_IMPLIED (0)";
+ else if (norm == H5T_NORM_MSBSET)
+ norm_string = "H5T_NORM_MSBSET (1)";
+ else if (norm == H5T_NORM_NONE)
+ norm_string = "H5T_NORM_NONE (2)";
+ return(norm);
}
//--------------------------------------------------------------------------
-// Function: FloatType::setNorm
-///\brief Sets the mantissa normalization of a floating-point datatype.
-///\param norm - IN: Mantissa normalization type
-///\exception H5::DataTypeIException
+// Function: FloatType::setNorm
+///\brief Sets the mantissa normalization of a floating-point datatype.
+///\param norm - IN: Mantissa normalization type
+///\exception H5::DataTypeIException
///\par Description
-/// Valid values for normalization type include:
-/// \li \c H5T_NORM_IMPLIED (0) - MSB of mantissa is not stored
-/// \li \c H5T_NORM_MSBSET (1) - MSB of mantissa is always 1
-/// \li \c H5T_NORM_NONE (2) - Mantissa is not normalized
-// Programmer Binh-Minh Ribler - 2000
+/// Valid values for normalization type include:
+/// \li \c H5T_NORM_IMPLIED (0) - MSB of mantissa is not stored
+/// \li \c H5T_NORM_MSBSET (1) - MSB of mantissa is always 1
+/// \li \c H5T_NORM_NONE (2) - Mantissa is not normalized
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FloatType::setNorm( H5T_norm_t norm ) const
+void FloatType::setNorm(H5T_norm_t norm) const
{
- herr_t ret_value = H5Tset_norm( id, norm );
- if( ret_value < 0 )
- {
- throw DataTypeIException("FloatType::setNorm", "H5Tset_norm failed");
- }
+ herr_t ret_value = H5Tset_norm(id, norm);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("FloatType::setNorm", "H5Tset_norm failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FloatType::getInpad
-///\brief Retrieves the internal padding type for unused bits in
-/// this floating-point datatypes.
-///\return Internal padding type, which can be:
-/// \li \c H5T_PAD_ZERO (0) - Set background to zeros
-/// \li \c H5T_PAD_ONE (1) - Set background to ones
-/// \li \c H5T_PAD_BACKGROUND (2) - Leave background alone
-///\exception H5::DataTypeIException
+// Function: FloatType::getInpad
+///\brief Retrieves the internal padding type for unused bits in
+/// this floating-point datatypes.
+///\return Internal padding type, which can be:
+/// \li \c H5T_PAD_ZERO (0) - Set background to zeros
+/// \li \c H5T_PAD_ONE (1) - Set background to ones
+/// \li \c H5T_PAD_BACKGROUND (2) - Leave background alone
+///\exception H5::DataTypeIException
///\par Description
-/// For your convenience, this function also provides the text
-/// string of the returned internal padding type, via parameter
-/// \a pad_string.
-// Programmer Binh-Minh Ribler - 2000
+/// For your convenience, this function also provides the text
+/// string of the returned internal padding type, via parameter
+/// \a pad_string.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5T_pad_t FloatType::getInpad( H5std_string& pad_string ) const
+H5T_pad_t FloatType::getInpad(H5std_string& pad_string) const
{
- H5T_pad_t pad_type = H5Tget_inpad( id );
- // Returns a valid padding type if successful
- if( pad_type == H5T_PAD_ERROR )
- {
- throw DataTypeIException("FloatType::getInpad", "H5Tget_inpad failed - returned H5T_PAD_ERROR");
- }
- if( pad_type == H5T_PAD_ZERO )
- pad_string = "H5T_PAD_ZERO (0)";
- else if( pad_type == H5T_PAD_ONE )
- pad_string = "H5T_PAD_ONE (1)";
- else if( pad_type == H5T_PAD_BACKGROUND )
- pad_string = "H5T_PAD_BACKGROUD (2)";
- return( pad_type );
+ H5T_pad_t pad_type = H5Tget_inpad(id);
+ // Returns a valid padding type if successful
+ if (pad_type == H5T_PAD_ERROR)
+ {
+ throw DataTypeIException("FloatType::getInpad", "H5Tget_inpad failed - returned H5T_PAD_ERROR");
+ }
+ if (pad_type == H5T_PAD_ZERO)
+ pad_string = "H5T_PAD_ZERO (0)";
+ else if (pad_type == H5T_PAD_ONE)
+ pad_string = "H5T_PAD_ONE (1)";
+ else if (pad_type == H5T_PAD_BACKGROUND)
+ pad_string = "H5T_PAD_BACKGROUD (2)";
+ return(pad_type);
}
//--------------------------------------------------------------------------
-// Function: FloatType::setInpad
-///\brief Fills unused internal floating point bits.
-///\param inpad - IN: Internal padding type
-///\exception H5::DataTypeIException
+// Function: FloatType::setInpad
+///\brief Fills unused internal floating point bits.
+///\param inpad - IN: Internal padding type
+///\exception H5::DataTypeIException
///\par Description
-/// If any internal bits of a floating point type are unused
-/// (that is, those significant bits which are not part of the
-/// sign, exponent, or mantissa), then they will be filled
-/// according to the padding value provided by \a inpad.
+/// If any internal bits of a floating point type are unused
+/// (that is, those significant bits which are not part of the
+/// sign, exponent, or mantissa), then they will be filled
+/// according to the padding value provided by \a inpad.
///\par
-/// Valid values for normalization type include:
-/// \li \c H5T_PAD_ZERO (0) - Set background to zeros
-/// \li \c H5T_PAD_ONE (1) - Set background to ones
-/// \li \c H5T_PAD_BACKGROUND (2) - Leave background alone
-// Programmer Binh-Minh Ribler - 2000
+/// Valid values for normalization type include:
+/// \li \c H5T_PAD_ZERO (0) - Set background to zeros
+/// \li \c H5T_PAD_ONE (1) - Set background to ones
+/// \li \c H5T_PAD_BACKGROUND (2) - Leave background alone
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void FloatType::setInpad( H5T_pad_t inpad ) const
+void FloatType::setInpad(H5T_pad_t inpad) const
{
- herr_t ret_value = H5Tset_inpad( id, inpad );
- if( ret_value < 0 )
- {
- throw DataTypeIException("FloatType::setInpad", "H5Tset_inpad failed");
- }
+ herr_t ret_value = H5Tset_inpad(id, inpad);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("FloatType::setInpad", "H5Tset_inpad failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: FloatType destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: FloatType destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
FloatType::~FloatType() {}
diff --git a/c++/src/H5FloatType.h b/c++/src/H5FloatType.h
index c0119eb..e84f50b 100644
--- a/c++/src/H5FloatType.h
+++ b/c++/src/H5FloatType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5FloatType_H
@@ -27,50 +25,56 @@ namespace H5 {
*/
class H5_DLLCPP FloatType : public AtomType {
public:
- // Creates a floating-point type using a predefined type.
- FloatType( const PredType& pred_type );
+ // Creates a floating-point type using a predefined type.
+ FloatType(const PredType& pred_type);
- // Gets the floating-point datatype of the specified dataset.
- FloatType( const DataSet& dataset );
+ // Gets the floating-point datatype of the specified dataset.
+ FloatType(const DataSet& dataset);
- // Retrieves the exponent bias of a floating-point type.
- size_t getEbias() const;
+ // Constructors that open an HDF5 float datatype, given a location.
+ FloatType(const H5Location& loc, const char* name);
+ FloatType(const H5Location& loc, const H5std_string& name);
- // Sets the exponent bias of a floating-point type.
- void setEbias( size_t ebias ) const;
+ // Retrieves the exponent bias of a floating-point type.
+ size_t getEbias() const;
- // Retrieves floating point datatype bit field information.
- void getFields( size_t& spos, size_t& epos, size_t& esize, size_t& mpos, size_t& msize ) const;
+ // Sets the exponent bias of a floating-point type.
+ void setEbias(size_t ebias) const;
- // Sets locations and sizes of floating point bit fields.
- void setFields( size_t spos, size_t epos, size_t esize, size_t mpos, size_t msize ) const;
+ // Retrieves floating point datatype bit field information.
+ void getFields(size_t& spos, size_t& epos, size_t& esize, size_t& mpos, size_t& msize) const;
- // Retrieves the internal padding type for unused bits in floating-point datatypes.
- H5T_pad_t getInpad( H5std_string& pad_string ) const;
+ // Sets locations and sizes of floating point bit fields.
+ void setFields(size_t spos, size_t epos, size_t esize, size_t mpos, size_t msize) const;
- // Fills unused internal floating point bits.
- void setInpad( H5T_pad_t inpad ) const;
+ // Retrieves the internal padding type for unused bits in floating-point datatypes.
+ H5T_pad_t getInpad(H5std_string& pad_string) const;
- // Retrieves mantissa normalization of a floating-point datatype.
- H5T_norm_t getNorm( H5std_string& norm_string ) const;
+ // Fills unused internal floating point bits.
+ void setInpad(H5T_pad_t inpad) const;
- // Sets the mantissa normalization of a floating-point datatype.
- void setNorm( H5T_norm_t norm ) const;
+ // Retrieves mantissa normalization of a floating-point datatype.
+ H5T_norm_t getNorm(H5std_string& norm_string) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("FloatType"); }
+ // Sets the mantissa normalization of a floating-point datatype.
+ void setNorm(H5T_norm_t norm) const;
- // Default constructor
- FloatType();
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("FloatType"); }
- // Creates a floating-point datatype using an existing id.
- FloatType( const hid_t existing_id );
+ // Default constructor
+ FloatType();
- // Copy constructor: makes a copy of the original FloatType object.
- FloatType( const FloatType& original );
+ // Creates a floating-point datatype using an existing id.
+ FloatType(const hid_t existing_id);
+
+ // Copy constructor: makes a copy of the original FloatType object.
+ FloatType(const FloatType& original);
+
+ // Noop destructor.
+ virtual ~FloatType();
+
+}; // end of FloatType
+} // namespace H5
- // Noop destructor.
- virtual ~FloatType();
-};
-}
#endif // __H5FloatType_H
diff --git a/c++/src/H5Group.cpp b/c++/src/H5Group.cpp
index 20f14a3..994d9ff 100644
--- a/c++/src/H5Group.cpp
+++ b/c++/src/H5Group.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef OLD_HEADER_FILENAME
@@ -29,114 +27,174 @@
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5AbstractDs.h"
#include "H5DataSpace.h"
#include "H5DataSet.h"
+#include "H5CommonFG.h"
#include "H5Attribute.h"
#include "H5Group.h"
#include "H5File.h"
#include "H5Alltypes.h"
-#include "H5private.h" // for HDstrcpy
namespace H5 {
using std::cerr;
using std::endl;
//--------------------------------------------------------------------------
-// Function: Group default constructor
-///\brief Default constructor: creates a stub Group.
-// Programmer Binh-Minh Ribler - 2000
+// Function: Group default constructor
+///\brief Default constructor: creates a stub Group.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Group::Group() : H5Object(), id(H5I_INVALID_HID) {}
+Group::Group() : H5Object(), CommonFG(), id(H5I_INVALID_HID) {}
//--------------------------------------------------------------------------
-// Function: Group copy constructor
-///\brief Copy constructor: makes a copy of the original Group object.
-///\param original - IN: Original group to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: Group copy constructor
+///\brief Copy constructor: makes a copy of the original Group object.
+///\param original - IN: Original group to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Group::Group(const Group& original) : H5Object(), id(original.id)
+Group::Group(const Group& original) : H5Object(), CommonFG(), id(original.id)
{
incRefCount(); // increment number of references to this id
}
//--------------------------------------------------------------------------
-// Function: Group::getLocId
-// Purpose: Get the id of this group
-// Programmer Binh-Minh Ribler - 2000
+// Function: Group::getObjId
+///\brief Opens an object via object header.
+///\param obj_name - IN: Path to the object
+///\param plist - IN: Access property list for the link pointing to
+/// the object
+///\exception H5::FileIException or H5::GroupIException
+///\par Description
+/// This function opens an object in a group or file, using
+/// H5Oopen. Thus, an object can be opened without knowing
+/// the object's type.
+// Programmer Binh-Minh Ribler - March, 2017
+//--------------------------------------------------------------------------
+hid_t Group::getObjId(const char* obj_name, const PropList& plist) const
+{
+ hid_t ret_value = H5Oopen(getId(), obj_name, plist.getId());
+ if (ret_value < 0)
+ {
+ throwException("Group::getObjId", "H5Oopen failed");
+ }
+ return(ret_value);
+}
+
+//--------------------------------------------------------------------------
+// Function: Group::getObjId
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes a reference to a \c H5std_string for the object's name.
+///\param obj_name - IN: Path to the object
+///\param plist - IN: Access property list for the link pointing to
+/// the object
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - March, 2017
+//--------------------------------------------------------------------------
+hid_t Group::getObjId(const H5std_string& obj_name, const PropList& plist) const
+{
+ return(getObjId(obj_name.c_str(), plist));
+}
+
+//--------------------------------------------------------------------------
+// Function: Group::closeObjId
+///\brief Closes an object, which was opened with Group::getObjId
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - March, 2017
+//--------------------------------------------------------------------------
+void Group::closeObjId(hid_t obj_id) const
+{
+ herr_t ret_value = H5Oclose(obj_id);
+ if (ret_value < 0)
+ {
+ throwException("Group::closeObjId", "H5Oclose failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: Group::getLocId
+// Purpose: Get the id of this group
+// Programmer Binh-Minh Ribler - 2000
// Description
-// This function is a redefinition of CommonFG::getLocId. It
-// is used by CommonFG member functions to get the file id.
+// This function is a redefinition of CommonFG::getLocId. It
+// is used by CommonFG member functions to get the file id.
// Deprecated:
-// After HDFFV-9920, the Group's methods can use getId() and getLocId()
-// is kept for backward compatibility. Aug 18, 2016 -BMR
+// Aug 18, 2016 -BMR
+// After HDFFV-9920, the Group's methods can use getId() and
+// getLocId() is kept for backward compatibility.
//--------------------------------------------------------------------------
hid_t Group::getLocId() const
{
- return( getId() );
+ return(getId());
}
//--------------------------------------------------------------------------
-// Function: Group overloaded constructor
-///\brief Creates a Group object using the id of an existing group.
-///\param existing_id - IN: Id of an existing group
-// Programmer Binh-Minh Ribler - 2000
+// Function: Group overloaded constructor
+///\brief Creates a Group object using the id of an existing group.
+///\param existing_id - IN: Id of an existing group
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Group::Group(const hid_t existing_id) : H5Object(), id(existing_id)
+Group::Group(const hid_t existing_id) : H5Object(), CommonFG(), id(existing_id)
{
incRefCount(); // increment number of references to this id
}
//--------------------------------------------------------------------------
-// Function: Group overload constructor - dereference
-///\brief Given a reference, ref, to an hdf5 group, creates a Group object
-///\param loc - IN: Specifying location referenced object is in
-///\param ref - IN: Reference pointer
-///\param ref_type - IN: Reference type - default to H5R_OBJECT
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\exception H5::ReferenceException
+// Function: Group overload constructor - dereference
+///\brief Given a reference, ref, to an hdf5 group, creates a Group object
+///\param loc - IN: Specifying location referenced object is in
+///\param ref - IN: Reference pointer
+///\param ref_type - IN: Reference type - default to H5R_OBJECT
+///\param plist - IN: Property list - default to PropList::DEFAULT
+///\exception H5::ReferenceException
///\par Description
-/// \c obj can be DataSet, Group, or named DataType, that
-/// is a datatype that has been named by DataType::commit.
-// Programmer Binh-Minh Ribler - Oct, 2006
+/// \c obj can be DataSet, Group, or named DataType, that
+/// is a datatype that has been named by DataType::commit.
+// Programmer Binh-Minh Ribler - Oct, 2006
//--------------------------------------------------------------------------
-Group::Group(const H5Location& loc, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object(), id(H5I_INVALID_HID)
+Group::Group(const H5Location& loc, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object(), CommonFG(), id(H5I_INVALID_HID)
{
id = H5Location::p_dereference(loc.getId(), ref, ref_type, plist, "constructor - by dereference");
}
//--------------------------------------------------------------------------
-// Function: Group overload constructor - dereference
-///\brief Given a reference, ref, to an hdf5 group, creates a Group object
-///\param attr - IN: Specifying location where the referenced object is in
-///\param ref - IN: Reference pointer
-///\param ref_type - IN: Reference type - default to H5R_OBJECT
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - Oct, 2006
+// Function: Group overload constructor - dereference
+// brief Given a reference, ref, to an hdf5 group, creates a Group objec
+// param attr - IN: Specifying location where the referenced object is i
+// param ref - IN: Reference pointer
+// param ref_type - IN: Reference type - default to H5R_OBJECT
+// param plist - IN: Property list - default to PropList::DEFAULT
+// exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - Oct, 2006
+// Modification
+// Mar, 2017
+// Removed in 1.10.1 because H5Location is Attribute's baseclass
+// now. -BMR
//--------------------------------------------------------------------------
-Group::Group(const Attribute& attr, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object(), id(H5I_INVALID_HID)
+/* Group::Group(const Attribute& attr, const void* ref, H5R_type_t ref_type, const PropList& plist) : H5Object(), id(H5I_INVALID_HID)
{
id = H5Location::p_dereference(attr.getId(), ref, ref_type, plist, "constructor - by dereference");
}
+ */
//--------------------------------------------------------------------------
// Function: Group::getId
-///\brief Get the id of this group
-///\return Group identifier
+///\brief Get the id of this group
+///\return Group identifier
// Modification:
// May 2008 - BMR
-// Class hierarchy is revised to address bugzilla 1068. Class
-// AbstractDS and Attribute are moved out of H5Object. In
-// addition, member IdComponent::id is moved into subclasses, and
-// IdComponent::getId now becomes pure virtual function.
+// Class hierarchy is revised to address bugzilla 1068. Class
+// AbstractDS and Attribute are moved out of H5Object. In
+// addition, member IdComponent::id is moved into subclasses, and
+// IdComponent::getId now becomes pure virtual function.
// Programmer Binh-Minh Ribler - May, 2008
//--------------------------------------------------------------------------
hid_t Group::getId() const
{
- return(id);
+ return(id);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -145,11 +203,11 @@ hid_t Group::getId() const
///\brief Sets the identifier of this object to a new value.
///
///\exception H5::IdComponentException when the attempt to close the HDF5
-/// object fails
+/// object fails
// Description:
-// The underlaying reference counting in the C library ensures
-// that the current valid id of this object is properly closed.
-// Then the object's id is reset to the new id.
+// The underlaying reference counting in the C library ensures
+// that the current valid id of this object is properly closed.
+// Then the object's id is reset to the new id.
// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void Group::p_setId(const hid_t new_id)
@@ -161,69 +219,69 @@ void Group::p_setId(const hid_t new_id)
catch (Exception& close_error) {
throwException("Group::p_setId", close_error.getDetailMsg());
}
- // reset object's id to the given id
- id = new_id;
+ // reset object's id to the given id
+ id = new_id;
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: Group::close
-///\brief Closes this group.
+// Function: Group::close
+///\brief Closes this group.
///
-///\exception H5::GroupIException
-// Programmer Binh-Minh Ribler - Mar 9, 2005
+///\exception H5::GroupIException
+// Programmer Binh-Minh Ribler - Mar 9, 2005
//--------------------------------------------------------------------------
void Group::close()
{
if (p_valid_id(id))
{
- herr_t ret_value = H5Gclose( id );
- if( ret_value < 0 )
- {
- throwException("Group::close", "H5Gclose failed");
- }
- // reset the id
- id = H5I_INVALID_HID;
+ herr_t ret_value = H5Gclose(id);
+ if (ret_value < 0)
+ {
+ throwException("Group::close", "H5Gclose failed");
+ }
+ // reset the id
+ id = H5I_INVALID_HID;
}
}
//--------------------------------------------------------------------------
-// Function: Group::throwException
-///\brief Throws H5::GroupIException.
-///\param func_name - Name of the function where failure occurs
-///\param msg - Message describing the failure
-///\exception H5::GroupIException
+// Function: Group::throwException
+///\brief Throws H5::GroupIException.
+///\param func_name - Name of the function where failure occurs
+///\param msg - Message describing the failure
+///\exception H5::GroupIException
// Description
-// This function is also used in H5Location's methods so that
-// proper exception can be thrown for file or group. The
-// "Group::" will be inserted to indicate the function called is
-// an implementation of Group.
-// Programmer Binh-Minh Ribler - 2000
+// This function is also used in H5Location's methods so that
+// proper exception can be thrown for file or group. The
+// "Group::" will be inserted to indicate the function called is
+// an implementation of Group.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void Group::throwException(const H5std_string& func_name, const H5std_string& msg) const
{
- H5std_string full_name = func_name;
- full_name.insert(0, "Group::");
- throw GroupIException(full_name, msg);
+ H5std_string full_name = func_name;
+ full_name.insert(0, "Group::");
+ throw GroupIException(full_name, msg);
}
//--------------------------------------------------------------------------
-// Function: Group destructor
-///\brief Properly terminates access to this group.
-// Programmer Binh-Minh Ribler - 2000
+// Function: Group destructor
+///\brief Properly terminates access to this group.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Feb 20, 2005
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Feb 20, 2005
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
Group::~Group()
{
try {
- close();
+ close();
}
catch (Exception& close_error) {
- cerr << "Group::~Group - " << close_error.getDetailMsg() << endl;
+ cerr << "Group::~Group - " << close_error.getDetailMsg() << endl;
}
}
diff --git a/c++/src/H5Group.h b/c++/src/H5Group.h
index 5f4b0f3..a7e1f7c 100644
--- a/c++/src/H5Group.h
+++ b/c++/src/H5Group.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __Group_H
@@ -28,56 +26,65 @@ namespace H5 {
class ArrayType;
class VarLenType;
-class H5_DLLCPP Group : public H5Object {
+class H5_DLLCPP Group : public H5Object, public CommonFG {
public:
- // Group constructor to create a group or file (aka root group).
- Group(const char* name, size_t size_hint = 0);
- Group(const H5std_string& name, size_t size_hint = 0);
+ // Group constructor to create a group or file (aka root group).
+ Group(const char* name, size_t size_hint = 0);
+ Group(const H5std_string& name, size_t size_hint = 0);
- // Group constructor to open a group or file (aka root group).
- Group(const char* name);
- Group(const H5std_string& name);
+ // Group constructor to open a group or file (aka root group).
+ Group(const char* name);
+ Group(const H5std_string& name);
- // Close this group.
- virtual void close();
+ // Close this group.
+ virtual void close();
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("Group"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("Group"); }
- // Throw group exception.
- virtual void throwException(const H5std_string& func_name, const H5std_string& msg) const;
+ // Throw group exception.
+ virtual void throwException(const H5std_string& func_name, const H5std_string& msg) const;
- // for CommonFG to get the file id.
- virtual hid_t getLocId() const;
+ // for CommonFG to get the file id.
+ virtual hid_t getLocId() const;
- // Creates a group by way of dereference.
- Group(const H5Location& loc, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
- Group(const Attribute& attr, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
+ // Creates a group by way of dereference.
+ Group(const H5Location& loc, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
+ // Removed in 1.10.1, because H5Location is baseclass
+// Group(const Attribute& attr, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
- // default constructor
- Group();
+ // Opens an object within a group or a file, i.e., root group.
+ hid_t getObjId(const char* name, const PropList& plist = PropList::DEFAULT) const;
+ hid_t getObjId(const H5std_string& name, const PropList& plist = PropList::DEFAULT) const;
- // Copy constructor: makes a copy of the original object
- Group(const Group& original);
+ // Closes an object opened by getObjId().
+ void closeObjId(hid_t obj_id) const;
- // Gets the group id.
- virtual hid_t getId() const;
+ // default constructor
+ Group();
- // Destructor
- virtual ~Group();
+ // Copy constructor: makes a copy of the original object
+ Group(const Group& original);
- // Creates a copy of an existing group using its id.
- Group( const hid_t group_id );
+ // Gets the group id.
+ virtual hid_t getId() const;
+
+ // Destructor
+ virtual ~Group();
+
+ // Creates a copy of an existing group using its id.
+ Group(const hid_t group_id);
protected:
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Sets the group id.
- virtual void p_setId(const hid_t new_id);
+ // Sets the group id.
+ virtual void p_setId(const hid_t new_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
private:
- hid_t id; // HDF5 group id
-};
+ hid_t id; // HDF5 group id
+
+}; // end of Group
+} // namespace H5
-}
#endif // __Group_H
diff --git a/c++/src/H5IdComponent.cpp b/c++/src/H5IdComponent.cpp
index d869b07..f9a08cd 100644
--- a/c++/src/H5IdComponent.cpp
+++ b/c++/src/H5IdComponent.cpp
@@ -5,25 +5,24 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
+#include "H5private.h" // for HDmemset
#include "H5Include.h"
#include "H5Exception.h"
#include "H5Library.h"
#include "H5IdComponent.h"
#include "H5DataSpace.h"
-#include "H5private.h" // for HDmemset
namespace H5 {
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
// This flag indicates whether H5Library::initH5cpp has been called to register
// the terminating functions with atexit()
bool IdComponent::H5cppinit = false;
@@ -32,23 +31,24 @@ bool IdComponent::H5cppinit = false;
// Subclasses that have global constants use it. This is a temporary
// work-around in 1.8.16. It will be removed after HDFFV-9540 is fixed.
bool IdComponent::H5dontAtexit_called = false;
+#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: IdComponent::incRefCount
-///\brief Increment reference counter for a given id.
-// Programmer Binh-Minh Ribler - May 2005
+// Function: IdComponent::incRefCount
+///\brief Increment reference counter for a given id.
+// Programmer Binh-Minh Ribler - May 2005
//--------------------------------------------------------------------------
void IdComponent::incRefCount(const hid_t obj_id) const
{
if (p_valid_id(obj_id))
- if (H5Iinc_ref(obj_id) < 0)
- throw IdComponentException(inMemFunc("incRefCount"), "incrementing object ref count failed");
+ if (H5Iinc_ref(obj_id) < 0)
+ throw IdComponentException(inMemFunc("incRefCount"), "incrementing object ref count failed");
}
//--------------------------------------------------------------------------
-// Function: IdComponent::incRefCount
-///\brief Increment reference counter for the id of this object.
-// Programmer Binh-Minh Ribler - 2000
+// Function: IdComponent::incRefCount
+///\brief Increment reference counter for the id of this object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void IdComponent::incRefCount() const
{
@@ -56,31 +56,31 @@ void IdComponent::incRefCount() const
}
//--------------------------------------------------------------------------
-// Function: IdComponent::decRefCount
-///\brief Decrement reference counter for a given id.
-// Programmer Binh-Minh Ribler - May 2005
+// Function: IdComponent::decRefCount
+///\brief Decrement reference counter for a given id.
+// Programmer Binh-Minh Ribler - May 2005
// Modification:
-// Added the check for ref counter to give a little more info
-// on why H5Idec_ref fails in some cases - BMR 5/19/2005
+// Added the check for ref counter to give a little more info
+// on why H5Idec_ref fails in some cases - BMR 5/19/2005
//--------------------------------------------------------------------------
void IdComponent::decRefCount(const hid_t obj_id) const
{
if (p_valid_id(obj_id))
- if (H5Idec_ref(obj_id) < 0)
- {
- if (H5Iget_ref(obj_id) <= 0)
- throw IdComponentException(inMemFunc("decRefCount"),
- "object ref count is 0 or negative");
- else
- throw IdComponentException(inMemFunc("decRefCount"),
- "decrementing object ref count failed");
- }
+ if (H5Idec_ref(obj_id) < 0)
+ {
+ if (H5Iget_ref(obj_id) <= 0)
+ throw IdComponentException(inMemFunc("decRefCount"),
+ "object ref count is 0 or negative");
+ else
+ throw IdComponentException(inMemFunc("decRefCount"),
+ "decrementing object ref count failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: IdComponent::decRefCount
-///\brief Decrement reference counter for the id of this object.
-// Programmer Binh-Minh Ribler - 2000
+// Function: IdComponent::decRefCount
+///\brief Decrement reference counter for the id of this object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void IdComponent::decRefCount() const
{
@@ -88,28 +88,28 @@ void IdComponent::decRefCount() const
}
//--------------------------------------------------------------------------
-// Function: IdComponent::getCounter
-///\brief Returns the reference counter for a given id.
-///\return Reference count
-// Programmer Binh-Minh Ribler - May 2005
+// Function: IdComponent::getCounter
+///\brief Returns the reference counter for a given id.
+///\return Reference count
+// Programmer Binh-Minh Ribler - May 2005
//--------------------------------------------------------------------------
int IdComponent::getCounter(const hid_t obj_id) const
{
int counter = 0;
if (p_valid_id(obj_id))
{
- counter = H5Iget_ref(obj_id);
- if (counter < 0)
- throw IdComponentException(inMemFunc("incRefCount"), "getting object ref count failed - negative");
+ counter = H5Iget_ref(obj_id);
+ if (counter < 0)
+ throw IdComponentException(inMemFunc("incRefCount"), "getting object ref count failed - negative");
}
return (counter);
}
//--------------------------------------------------------------------------
-// Function: IdComponent::getCounter
-///\brief Returns the reference counter for the id of this object.
-///\return Reference count
-// Programmer Binh-Minh Ribler - 2000
+// Function: IdComponent::getCounter
+///\brief Returns the reference counter for the id of this object.
+///\return Reference count
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
int IdComponent::getCounter() const
{
@@ -117,43 +117,43 @@ int IdComponent::getCounter() const
}
//--------------------------------------------------------------------------
-// Function: getHDFObjType (static)
-///\brief Given an id, returns the type of the object.
-///\return a valid HDF object type, which may be one of the following:
-/// \li \c H5I_FILE
-/// \li \c H5I_GROUP
-/// \li \c H5I_DATATYPE
-/// \li \c H5I_DATASPACE
-/// \li \c H5I_DATASET
-/// \li \c H5I_ATTR
-/// \li or \c H5I_BADID, if no valid type can be determined or the
-/// input object id is invalid.
+// Function: getHDFObjType (static)
+///\brief Given an id, returns the type of the object.
+///\return a valid HDF object type, which may be one of the following:
+/// \li \c H5I_FILE
+/// \li \c H5I_GROUP
+/// \li \c H5I_DATATYPE
+/// \li \c H5I_DATASPACE
+/// \li \c H5I_DATASET
+/// \li \c H5I_ATTR
+/// \li or \c H5I_BADID, if no valid type can be determined or the
+/// input object id is invalid.
// Programmer Binh-Minh Ribler - Jul, 2005
//--------------------------------------------------------------------------
H5I_type_t IdComponent::getHDFObjType(const hid_t obj_id)
{
if (obj_id <= 0)
- return H5I_BADID; // invalid
+ return H5I_BADID; // invalid
H5I_type_t id_type = H5Iget_type(obj_id);
if (id_type <= H5I_BADID || id_type >= H5I_NTYPES)
- return H5I_BADID; // invalid
+ return H5I_BADID; // invalid
else
- return id_type; // valid type
+ return id_type; // valid type
}
//--------------------------------------------------------------------------
-// Function: getHDFObjType
-///\brief Returns the type of the object. It is an overloaded function
-/// of the above function.
-///\return a valid HDF object type, which may be one of the following:
-/// \li \c H5I_FILE
-/// \li \c H5I_GROUP
-/// \li \c H5I_DATATYPE
-/// \li \c H5I_DATASPACE
-/// \li \c H5I_DATASET
-/// \li \c H5I_ATTR
-/// \li or \c H5I_BADID, if no valid type can be determined or the
-/// input object id is invalid.
+// Function: getHDFObjType
+///\brief Returns the type of the object. It is an overloaded function
+/// of the above function.
+///\return a valid HDF object type, which may be one of the following:
+/// \li \c H5I_FILE
+/// \li \c H5I_GROUP
+/// \li \c H5I_DATATYPE
+/// \li \c H5I_DATASPACE
+/// \li \c H5I_DATASET
+/// \li \c H5I_ATTR
+/// \li or \c H5I_BADID, if no valid type can be determined or the
+/// input object id is invalid.
// Programmer Binh-Minh Ribler - Mar, 2014
//--------------------------------------------------------------------------
H5I_type_t IdComponent::getHDFObjType() const
@@ -162,74 +162,161 @@ H5I_type_t IdComponent::getHDFObjType() const
}
//--------------------------------------------------------------------------
-// Function: IdComponent::operator=
-///\brief Assignment operator.
-///\param rhs - IN: Reference to the existing object
-///\return Reference to IdComponent instance
-///\exception H5::IdComponentException when attempt to close the HDF5
-/// object fails
+// Function: getNumMembers (static)
+///\brief Returns the number of members of the given type.
+///\return Number of members
+///\par Description
+/// If there is no member of the given type, getNumMembers will
+/// return 0. Valid types are:
+/// \li \c H5I_FILE (= 1)
+/// \li \c H5I_GROUP
+/// \li \c H5I_DATATYPE
+/// \li \c H5I_DATASPACE
+/// \li \c H5I_DATASET
+/// \li \c H5I_ATTR
+/// \li \c H5I_REFERENCE
+/// \li \c H5I_VFL
+/// \li \c H5I_GENPROP_CLS
+/// \li \c H5I_GENPROP_LST
+/// \li \c H5I_ERROR_CLASS
+/// \li \c H5I_ERROR_MSG
+/// \li \c H5I_ERROR_STACK
+// Programmer Binh-Minh Ribler - Feb, 2017
+//--------------------------------------------------------------------------
+hsize_t IdComponent::getNumMembers(H5I_type_t type)
+{
+ hsize_t nmembers = 0;
+ herr_t ret_value = H5Inmembers(type, &nmembers);
+ if (ret_value < 0)
+ throw IdComponentException("getNumMembers", "H5Inmembers failed");
+ else
+ return(nmembers);
+}
+
+//--------------------------------------------------------------------------
+// Function: isValid (static)
+///\brief Checks if the given ID is valid.
+///\return true if the given identifier is valid, and false, otherwise.
+///\par Description
+/// A valid ID is one that is in use and has an application
+/// reference count of at least 1.
+// Programmer Binh-Minh Ribler - Mar 1, 2017
+//--------------------------------------------------------------------------
+bool IdComponent::isValid(hid_t an_id)
+{
+ // Call C function
+ htri_t ret_value = H5Iis_valid(an_id);
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else // Raise exception when H5Iis_valid returns a negative value
+ throw IdComponentException("isValid", "H5Iis_valid failed");
+}
+
+//--------------------------------------------------------------------------
+// Function: typeExists (static)
+///\brief Queries if a given type is currently registered with the
+/// library.
+///\return true if the given type exists, and false, otherwise.
+///\par Description
+/// Valid types are:
+/// \li \c H5I_FILE (= 1)
+/// \li \c H5I_GROUP
+/// \li \c H5I_DATATYPE
+/// \li \c H5I_DATASPACE
+/// \li \c H5I_DATASET
+/// \li \c H5I_ATTR
+/// \li \c H5I_REFERENCE
+/// \li \c H5I_VFL
+/// \li \c H5I_GENPROP_CLS
+/// \li \c H5I_GENPROP_LST
+/// \li \c H5I_ERROR_CLASS
+/// \li \c H5I_ERROR_MSG
+/// \li \c H5I_ERROR_STACK
+// Programmer Binh-Minh Ribler - Feb, 2017
+//--------------------------------------------------------------------------
+bool IdComponent::typeExists(H5I_type_t type)
+{
+ // Call C function
+ htri_t ret_value = H5Itype_exists(type);
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else // Raise exception when H5Itype_exists returns a negative value
+ throw IdComponentException("typeExists", "H5Itype_exists failed");
+}
+
+//--------------------------------------------------------------------------
+// Function: IdComponent::operator=
+///\brief Assignment operator.
+///\param rhs - IN: Reference to the existing object
+///\return Reference to IdComponent instance
+///\exception H5::IdComponentException when attempt to close the HDF5
+/// object fails
// Description
-// First, close the current valid id of this object. Then
-// copy the id from rhs to this object, and increment the
-// reference counter of the id to indicate that another object
-// is referencing that id.
+// First, close the current valid id of this object. Then
+// copy the id from rhs to this object, and increment the
+// reference counter of the id to indicate that another object
+// is referencing that id.
// Modification
-// 2010/5/9 - BMR
-// Removed close() and incRefCount() because setId/p_setId takes
-// care of close() and setId takes care incRefCount().
-// Programmer Binh-Minh Ribler - 2000
+// 2010/5/9 - BMR
+// Removed close() and incRefCount() because setId/p_setId takes
+// care of close() and setId takes care incRefCount().
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-IdComponent& IdComponent::operator=( const IdComponent& rhs )
+IdComponent& IdComponent::operator=(const IdComponent& rhs)
{
if (this != &rhs)
{
- // handling references to this id
- try {
- setId(rhs.getId());
- // Note: a = b, so there are two objects with the same hdf5 id
- // that's why incRefCount is needed, and it is called by setId
- }
- catch (Exception& close_error) {
- throw FileIException(inMemFunc("operator="), close_error.getDetailMsg());
- }
+ // handling references to this id
+ try {
+ setId(rhs.getId());
+ // Note: a = b, so there are two objects with the same hdf5 id
+ // that's why incRefCount is needed, and it is called by setId
+ }
+ catch (Exception& close_error) {
+ throw FileIException(inMemFunc("operator="), close_error.getDetailMsg());
+ }
}
return *this;
}
//--------------------------------------------------------------------------
-// Function: IdComponent::setId
-///\brief Sets the identifier of this object to a new value.
-///\param new_id - IN: New identifier to be set to
-///\exception H5::IdComponentException when the attempt to close the HDF5
-/// object fails
+// Function: IdComponent::setId
+///\brief Sets the identifier of this object to a new value.
+///\param new_id - IN: New identifier to be set to
+///\exception H5::IdComponentException when the attempt to close the HDF5
+/// object fails
// Description:
-// p_setId ensures that the current valid id of this object is
-// properly closed before resetting the object's id to the new id.
-// Programmer Binh-Minh Ribler - 2000
+// p_setId ensures that the current valid id of this object is
+// properly closed before resetting the object's id to the new id.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// 2008/7/23 - BMR
-// Changed all subclasses' setId to p_setId and put back setId
-// here. p_setId is used in the library where the id provided
-// by a C API passed on to user's application in the form of a
-// C++ API object, which will be destroyed properly, and so
-// p_setId does not call incRefCount. On the other hand, the
-// public version setId is used by other applications, in which
-// the id passed to setId is that of another C++ API object, so
-// setId must call incRefCount.
+// 2008/7/23 - BMR
+// Changed all subclasses' setId to p_setId and put back setId
+// here. p_setId is used in the library where the id provided
+// by a C API passed on to user's application in the form of a
+// C++ API object, which will be destroyed properly, and so
+// p_setId does not call incRefCount. On the other hand, the
+// public version setId is used by other applications, in which
+// the id passed to setId is that of another C++ API object, so
+// setId must call incRefCount.
//--------------------------------------------------------------------------
void IdComponent::setId(const hid_t new_id)
{
- // set to new_id
- p_setId(new_id);
+ // set to new_id
+ p_setId(new_id);
- // increment the reference counter of the new id
- incRefCount();
+ // increment the reference counter of the new id
+ incRefCount();
}
//--------------------------------------------------------------------------
-// Function: IdComponent destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: IdComponent destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
IdComponent::~IdComponent() {}
@@ -240,29 +327,29 @@ IdComponent::~IdComponent() {}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: IdComponent::inMemFunc
-///\brief Makes and returns string "<class-name>::<func_name>"
-///\param func_name - Name of the function where failure occurs
+// Function: IdComponent::inMemFunc
+///\brief Makes and returns string "<class-name>::<func_name>"
+///\param func_name - Name of the function where failure occurs
// Description
-/// Concatenates the class name of this object with the
-/// passed-in function name to create a string that indicates
-/// where the failure occurs. The class-name is provided by
-/// fromClass(). This string will be used by a base class when
-/// an exception is thrown.
-// Programmer Binh-Minh Ribler - Aug 6, 2005
+/// Concatenates the class name of this object with the
+/// passed-in function name to create a string that indicates
+/// where the failure occurs. The class-name is provided by
+/// fromClass(). This string will be used by a base class when
+/// an exception is thrown.
+// Programmer Binh-Minh Ribler - Aug 6, 2005
//--------------------------------------------------------------------------
H5std_string IdComponent::inMemFunc(const char* func_name) const
{
- H5std_string full_name = func_name;
- full_name.insert(0, "::");
- full_name.insert(0, fromClass());
- return (full_name);
+ H5std_string full_name = func_name;
+ full_name.insert(0, "::");
+ full_name.insert(0, fromClass());
+ return (full_name);
}
//--------------------------------------------------------------------------
-// Function: IdComponent default constructor - private
-///\brief Default constructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: IdComponent default constructor - private
+///\brief Default constructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
IdComponent::IdComponent()
{
@@ -276,45 +363,45 @@ IdComponent::IdComponent()
}
//--------------------------------------------------------------------------
-// Function: IdComponent::p_get_file_name (protected)
-// Purpose: Gets the name of the file, in which this object belongs.
-// Exception: H5::IdComponentException
+// Function: IdComponent::p_get_file_name (protected)
+// Purpose Gets the name of the file, in which this object belongs.
+// Exception: H5::IdComponentException
// Description:
-// This function is protected so that the user applications can
-// only have access to its code via allowable classes, namely,
-// Attribute and H5Location subclasses.
-// Programmer Binh-Minh Ribler - Jul, 2004
+// This function is protected so that the user applications can
+// only have access to its code via allowable classes, namely,
+// Attribute and H5Location subclasses.
+// Programmer Binh-Minh Ribler - Jul, 2004
//--------------------------------------------------------------------------
H5std_string IdComponent::p_get_file_name() const
{
- hid_t temp_id = getId();
+ hid_t temp_id = getId();
- // Preliminary call to H5Fget_name to get the length of the file name
- ssize_t name_size = H5Fget_name(temp_id, NULL, 0);
+ // Preliminary call to H5Fget_name to get the length of the file name
+ ssize_t name_size = H5Fget_name(temp_id, NULL, 0);
- // If H5Aget_name returns a negative value, raise an exception,
- if( name_size < 0 )
- {
- throw IdComponentException("", "H5Fget_name failed");
- }
+ // If H5Aget_name returns a negative value, raise an exception,
+ if (name_size < 0)
+ {
+ throw IdComponentException("", "H5Fget_name failed");
+ }
- // Call H5Fget_name again to get the actual file name
- char* name_C = new char[name_size+1]; // temporary C-string for C API
- HDmemset(name_C, 0, name_size+1); // clear buffer
+ // Call H5Fget_name again to get the actual file name
+ char* name_C = new char[name_size+1]; // temporary C-string for C API
+ HDmemset(name_C, 0, name_size+1); // clear buffer
- name_size = H5Fget_name(temp_id, name_C, name_size+1);
+ name_size = H5Fget_name(temp_id, name_C, name_size+1);
- // Check for failure again
- if( name_size < 0 )
- {
+ // Check for failure again
+ if (name_size < 0)
+ {
delete []name_C;
- throw IdComponentException("", "H5Fget_name failed");
- }
+ throw IdComponentException("", "H5Fget_name failed");
+ }
- // Convert the C file name and return
- H5std_string file_name(name_C);
- delete []name_C;
- return(file_name);
+ // Convert the C file name and return
+ H5std_string file_name(name_C);
+ delete []name_C;
+ return(file_name);
}
//
@@ -322,22 +409,22 @@ H5std_string IdComponent::p_get_file_name() const
//
//--------------------------------------------------------------------------
-// Function: p_valid_id
-// Purpose: Verifies that the given id is a valid id so it can be passed
-// into an H5I C function.
-// Return true if id is valid, false, otherwise
-// Programmer Binh-Minh Ribler - May, 2005
+// Function: p_valid_id
+// Purpose Verifies that the given id is a valid id so it can be passed
+// into an H5I C function.
+// Return true if id is valid, false, otherwise
+// Programmer Binh-Minh Ribler - May, 2005
//--------------------------------------------------------------------------
bool IdComponent::p_valid_id(const hid_t obj_id)
{
if (obj_id <= 0)
- return false;
+ return false;
H5I_type_t id_type = H5Iget_type(obj_id);
if (id_type <= H5I_BADID || id_type >= H5I_NTYPES)
- return false;
+ return false;
else
- return true;
+ return true;
}
// Notes about IdComponent::id
diff --git a/c++/src/H5IdComponent.h b/c++/src/H5IdComponent.h
index d3d9b9f..baf939e 100644
--- a/c++/src/H5IdComponent.h
+++ b/c++/src/H5IdComponent.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __IdComponent_H
@@ -19,7 +17,6 @@
namespace H5 {
-class DataSpace;
/*! \class IdComponent
\brief Class IdComponent provides wrappers of the C functions that
operate on an HDF5 identifier.
@@ -30,91 +27,100 @@ class DataSpace;
class H5_DLLCPP IdComponent {
public:
- // Increment reference counter.
- void incRefCount(const hid_t obj_id) const;
- void incRefCount() const;
+ // Increment reference counter.
+ void incRefCount(const hid_t obj_id) const;
+ void incRefCount() const;
- // Decrement reference counter.
- void decRefCount(const hid_t obj_id) const;
- void decRefCount() const;
+ // Decrement reference counter.
+ void decRefCount(const hid_t obj_id) const;
+ void decRefCount() const;
- // Get the reference counter to this identifier.
- int getCounter(const hid_t obj_id) const;
- int getCounter() const;
+ // Get the reference counter to this identifier.
+ int getCounter(const hid_t obj_id) const;
+ int getCounter() const;
- // Returns an HDF5 object type, given the object id.
- static H5I_type_t getHDFObjType(const hid_t obj_id);
+ // Returns an HDF5 object type, given the object id.
+ static H5I_type_t getHDFObjType(const hid_t obj_id);
- // Returns an HDF5 object type of this object.
- H5I_type_t getHDFObjType() const;
+ // Returns an HDF5 object type of this object.
+ H5I_type_t getHDFObjType() const;
- // Assignment operator.
- IdComponent& operator=( const IdComponent& rhs );
+ // Returns the number of members in a type.
+ static hsize_t getNumMembers(H5I_type_t type);
- // Sets the identifier of this object to a new value.
- void setId(const hid_t new_id);
+ // Checks if the given ID is valid.
+ static bool isValid(hid_t an_id);
- // *** Deprecation warning ***
- // The following two constructors are no longer appropriate after the
- // data member "id" had been moved to the sub-classes.
- // The copy constructor is a noop and is removed in 1.8.15 and the
- // other will be removed from 1.10 release, and then from 1.8 if its
- // removal does not raise any problems in two 1.10 releases.
+ // Determines if an type exists.
+ static bool typeExists(H5I_type_t type);
- // Creates an object to hold an HDF5 identifier.
- IdComponent( const hid_t h5_id );
+ // Assignment operator.
+ IdComponent& operator=(const IdComponent& rhs);
+
+ // Sets the identifier of this object to a new value.
+ void setId(const hid_t new_id);
+
+ // *** Deprecation warning ***
+ // The following two constructors are no longer appropriate after the
+ // data member "id" had been moved to the sub-classes.
+ // The copy constructor is a noop and is removed in 1.8.15 and the
+ // other will be removed from 1.10 release, and then from 1.8 if its
+ // removal does not raise any problems in two 1.10 releases.
+
+ // Creates an object to hold an HDF5 identifier.
+ // IdComponent(const hid_t h5_id); - removed from 1.10.1
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Copy constructor: makes copy of the original IdComponent object.
- // IdComponent( const IdComponent& original ); - removed from 1.8.15
+ // Copy constructor: makes copy of the original IdComponent object.
+ // IdComponent(const IdComponent& original); - removed from 1.8.15
- // Gets the identifier of this object.
- virtual hid_t getId () const = 0;
+ // Gets the identifier of this object.
+ virtual hid_t getId () const = 0;
- // Pure virtual function for there are various H5*close for the
- // subclasses.
- virtual void close() = 0;
+ // Pure virtual function for there are various H5*close for the
+ // subclasses.
+ virtual void close() = 0;
- // Makes and returns the string "<class-name>::<func_name>";
- // <class-name> is returned by fromClass().
- H5std_string inMemFunc(const char* func_name) const;
+ // Makes and returns the string "<class-name>::<func_name>";
+ // <class-name> is returned by fromClass().
+ H5std_string inMemFunc(const char* func_name) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass() const { return("IdComponent");}
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass() const { return("IdComponent");}
#endif // DOXYGEN_SHOULD_SKIP_THIS
- // Destructor
- virtual ~IdComponent();
+ // Destructor
+ virtual ~IdComponent();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
protected:
- // Default constructor.
- IdComponent();
+ // Default constructor.
+ IdComponent();
- // Gets the name of the file, in which an HDF5 object belongs.
- H5std_string p_get_file_name() const;
+ // Gets the name of the file, in which an HDF5 object belongs.
+ H5std_string p_get_file_name() const;
- // Verifies that the given id is valid.
- static bool p_valid_id(const hid_t obj_id);
+ // Verifies that the given id is valid.
+ static bool p_valid_id(const hid_t obj_id);
- // Sets the identifier of this object to a new value. - this one
- // doesn't increment reference count
- virtual void p_setId(const hid_t new_id) = 0;
+ // Sets the identifier of this object to a new value. - this one
+ // doesn't increment reference count
+ virtual void p_setId(const hid_t new_id) = 0;
- // This flag is used to decide whether H5dont_atexit should be called
- static bool H5dontAtexit_called;
+ // This flag is used to decide whether H5dont_atexit should be called
+ static bool H5dontAtexit_called;
private:
- // This flag indicates whether H5Library::initH5cpp has been called
- // to register various terminating functions with atexit()
+ // This flag indicates whether H5Library::initH5cpp has been called
+ // to register various terminating functions with atexit()
static bool H5cppinit;
#endif // DOXYGEN_SHOULD_SKIP_THIS
}; // end class IdComponent
+} // namespace H5
-}
#endif // __IdComponent_H
diff --git a/c++/src/H5Include.h b/c++/src/H5Include.h
index 1e0e952..a180430 100644
--- a/c++/src/H5Include.h
+++ b/c++/src/H5Include.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <hdf5.h>
@@ -33,10 +31,10 @@ const bool true = 1;
// so re-define them here for now.
/* Initial version of the object header format */
-#define H5O_VERSION_1 1
+#define H5O_VERSION_1 1
/* Revised version - leaves out reserved bytes and alignment padding, and adds
* magic number as prefix and checksum as suffix for all chunks.
*/
-#define H5O_VERSION_2 2
+#define H5O_VERSION_2 2
diff --git a/c++/src/H5IntType.cpp b/c++/src/H5IntType.cpp
index 69fbedb..fb7e476 100644
--- a/c++/src/H5IntType.cpp
+++ b/c++/src/H5IntType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -22,6 +20,7 @@
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -35,101 +34,139 @@
namespace H5 {
//--------------------------------------------------------------------------
-// Function: IntType default constructor
-///\brief Default constructor: Creates a stub integer datatype
-// Programmer Binh-Minh Ribler - 2000
+// Function: IntType default constructor
+///\brief Default constructor: Creates a stub integer datatype
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
IntType::IntType() {}
//--------------------------------------------------------------------------
-// Function: IntType copy constructor
-///\brief Copy constructor: makes a copy of the original IntType object.
-// Programmer Binh-Minh Ribler - 2000
+// Function: IntType copy constructor
+///\brief Copy constructor: makes a copy of the original IntType object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-IntType::IntType( const IntType& original ) : AtomType( original ) {}
+IntType::IntType(const IntType& original) : AtomType( original ) {}
//--------------------------------------------------------------------------
-// Function: IntType overloaded constructor
-///\brief Creates a integer type using a predefined type
-///\param pred_type - IN: Predefined datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: IntType overloaded constructor
+///\brief Creates a integer type using a predefined type
+///\param pred_type - IN: Predefined datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-IntType::IntType( const PredType& pred_type ) : AtomType()
+IntType::IntType(const PredType& pred_type) : AtomType()
{
- // use DataType::copy to make a copy of this predefined type
- copy( pred_type );
+ // use DataType::copy to make a copy of this predefined type
+ copy(pred_type);
}
//--------------------------------------------------------------------------
-// Function: IntType overloaded constructor
-///\brief Creates an integer datatype using the id of an existing
-/// datatype.
-///\param existing_id - IN: Id of an existing datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: IntType overloaded constructor
+///\brief Creates an integer datatype using the id of an existing
+/// datatype.
+///\param existing_id - IN: Id of an existing datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-IntType::IntType( const hid_t existing_id ) : AtomType( existing_id ) {}
+IntType::IntType(const hid_t existing_id) : AtomType( existing_id ) {}
//--------------------------------------------------------------------------
-// Function: IntType overloaded constructor
-///\brief Gets the integer datatype of the specified dataset.
-///\param dataset - IN: Dataset that this integer datatype associates with
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: IntType overloaded constructor
+///\brief Gets the integer datatype of the specified dataset.
+///\param dataset - IN: Dataset that this integer datatype associates with
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-IntType::IntType( const DataSet& dataset ) : AtomType()
+IntType::IntType(const DataSet& dataset) : AtomType()
{
- // Calls C function H5Dget_type to get the id of the datatype
- id = H5Dget_type( dataset.getId() );
+ // Calls C function H5Dget_type to get the id of the datatype
+ id = H5Dget_type(dataset.getId());
+
+ if (id < 0)
+ {
+ throw DataSetIException("IntType constructor", "H5Dget_type failed");
+ }
+}
- if( id < 0 )
- {
- throw DataSetIException("IntType constructor", "H5Dget_type failed");
- }
+//--------------------------------------------------------------------------
+// Function: IntType overloaded constructor
+///\brief Creates a IntType instance by opening an HDF5 integer datatype
+/// given its name as a char*.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Integer type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openIntType(const char*) to
+// improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+IntType::IntType(const H5Location& loc, const char *dtype_name) : AtomType()
+{
+ id = p_opentype(loc, dtype_name);
+}
+
+//--------------------------------------------------------------------------
+// Function: IntType overloaded constructor
+///\brief Creates a IntType instance by opening an HDF5 integer datatype
+/// given its name, provided as an \c H5std_string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Integer type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openArrayType(const H5std_string&)
+// to improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+IntType::IntType(const H5Location& loc, const H5std_string& dtype_name) : AtomType()
+{
+ id = p_opentype(loc, dtype_name.c_str());
}
//--------------------------------------------------------------------------
-// Function: IntType::getSign
-///\brief Retrieves the sign type for an integer type.
-///\return Valid sign type
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: IntType::getSign
+///\brief Retrieves the sign type for an integer type.
+///\return Valid sign type
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5T_sign_t IntType::getSign() const
{
- H5T_sign_t type_sign = H5Tget_sign( id ); // C routine
+ H5T_sign_t type_sign = H5Tget_sign(id); // C routine
- // Returns a valid sign type if no errors
- if( type_sign == H5T_SGN_ERROR )
- {
- throw DataTypeIException("IntType::getSign",
- "H5Tget_sign failed - returned H5T_SGN_ERROR for the sign type");
- }
- return( type_sign );
+ // Returns a valid sign type if no errors
+ if (type_sign == H5T_SGN_ERROR)
+ {
+ throw DataTypeIException("IntType::getSign",
+ "H5Tget_sign failed - returned H5T_SGN_ERROR for the sign type");
+ }
+ return(type_sign);
}
//--------------------------------------------------------------------------
-// Function: IntType::getSign
-///\brief Sets the sign property for an integer type.
-///\param sign - IN: Sign type
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: IntType::getSign
+///\brief Sets the sign property for an integer type.
+///\param sign - IN: Sign type
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void IntType::setSign( H5T_sign_t sign ) const
+void IntType::setSign(H5T_sign_t sign) const
{
- // Call C routine to set the sign property
- herr_t ret_value = H5Tset_sign( id, sign );
- if( ret_value < 0 )
- {
- throw DataTypeIException("IntType::setSign", "H5Tset_sign failed");
- }
+ // Call C routine to set the sign property
+ herr_t ret_value = H5Tset_sign(id, sign);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("IntType::setSign", "H5Tset_sign failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: IntType destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: IntType destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
IntType::~IntType() {}
diff --git a/c++/src/H5IntType.h b/c++/src/H5IntType.h
index 54c0d57..82a7cfd 100644
--- a/c++/src/H5IntType.h
+++ b/c++/src/H5IntType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5IntType_H
@@ -27,32 +25,38 @@ namespace H5 {
*/
class H5_DLLCPP IntType : public AtomType {
public:
- // Creates an integer type using a predefined type
- IntType(const PredType& pred_type);
+ // Creates an integer type using a predefined type
+ IntType(const PredType& pred_type);
- // Gets the integer datatype of the specified dataset
- IntType(const DataSet& dataset);
+ // Gets the integer datatype of the specified dataset
+ IntType(const DataSet& dataset);
- // Retrieves the sign type for an integer type
- H5T_sign_t getSign() const;
+ // Constructors that open an HDF5 integer datatype, given a location.
+ IntType(const H5Location& loc, const char* name);
+ IntType(const H5Location& loc, const H5std_string& name);
- // Sets the sign proprety for an integer type.
- void setSign( H5T_sign_t sign ) const;
+ // Retrieves the sign type for an integer type
+ H5T_sign_t getSign() const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("IntType"); }
+ // Sets the sign proprety for an integer type.
+ void setSign(H5T_sign_t sign) const;
- // Default constructor
- IntType();
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("IntType"); }
- // Creates a integer datatype using an existing id
- IntType(const hid_t existing_id);
+ // Default constructor
+ IntType();
- // Copy constructor: makes copy of IntType object
- IntType(const IntType& original);
+ // Creates a integer datatype using an existing id
+ IntType(const hid_t existing_id);
+
+ // Copy constructor: makes copy of IntType object
+ IntType(const IntType& original);
+
+ // Noop destructor.
+ virtual ~IntType();
+
+}; // end of IntType
+} // namespace H5
- // Noop destructor.
- virtual ~IntType();
-};
-}
#endif // __H5IntType_H
diff --git a/c++/src/H5LaccProp.cpp b/c++/src/H5LaccProp.cpp
new file mode 100644
index 0000000..1267286
--- /dev/null
+++ b/c++/src/H5LaccProp.cpp
@@ -0,0 +1,149 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include <string>
+
+#include "H5Include.h"
+#include "H5Exception.h"
+#include "H5IdComponent.h"
+#include "H5PropList.h"
+#include "H5LaccProp.h"
+
+namespace H5 {
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+// This DOXYGEN_SHOULD_SKIP_THIS block is a work-around approach to control
+// the order of creation and deletion of the global constants. See Design Notes
+// in "H5PredType.cpp" for information.
+
+// Initialize a pointer for the constant
+LinkAccPropList* LinkAccPropList::DEFAULT_ = 0;
+
+//--------------------------------------------------------------------------
+// Function: LinkAccPropList::getConstant
+// Creates a LinkAccPropList object representing the HDF5 constant
+// H5P_LINK_ACCESS, pointed to by LinkAccPropList::DEFAULT_
+// exception H5::PropListIException
+// Description
+// If LinkAccPropList::DEFAULT_ already points to an allocated
+// object, throw a PropListIException. This scenario should not
+// happen.
+// Programmer Binh-Minh Ribler - December, 2016
+//--------------------------------------------------------------------------
+LinkAccPropList* LinkAccPropList::getConstant()
+{
+ // Tell the C library not to clean up, H5Library::termH5cpp will call
+ // H5close - more dependency if use H5Library::dontAtExit()
+ if (!IdComponent::H5dontAtexit_called)
+ {
+ (void) H5dont_atexit();
+ IdComponent::H5dontAtexit_called = true;
+ }
+
+ // If the constant pointer is not allocated, allocate it. Otherwise,
+ // throw because it shouldn't be.
+ if (DEFAULT_ == 0)
+ DEFAULT_ = new LinkAccPropList(H5P_LINK_ACCESS);
+ else
+ throw PropListIException("LinkAccPropList::getConstant", "LinkAccPropList::getConstant is being invoked on an allocated DEFAULT_");
+ return(DEFAULT_);
+}
+
+//--------------------------------------------------------------------------
+// Function: LinkAccPropList::deleteConstants
+// Purpose: Deletes the constant object that LinkAccPropList::DEFAULT_
+// points to.
+// exception H5::PropListIException
+// Programmer Binh-Minh Ribler - December, 2016
+//--------------------------------------------------------------------------
+void LinkAccPropList::deleteConstants()
+{
+ if (DEFAULT_ != 0)
+ delete DEFAULT_;
+}
+
+//--------------------------------------------------------------------------
+// Purpose: Constant for default property
+//--------------------------------------------------------------------------
+const LinkAccPropList& LinkAccPropList::DEFAULT = *getConstant();
+
+#endif // DOXYGEN_SHOULD_SKIP_THIS
+
+//--------------------------------------------------------------------------
+// Function: Default Constructor
+///\brief Creates a file access property list
+// Programmer Binh-Minh Ribler - December, 2016
+//--------------------------------------------------------------------------
+LinkAccPropList::LinkAccPropList() : PropList(H5P_LINK_ACCESS) {}
+
+//--------------------------------------------------------------------------
+// Function: LinkAccPropList copy constructor
+///\brief Copy Constructor: makes a copy of the original
+///\param original - IN: LinkAccPropList instance to copy
+// Programmer Binh-Minh Ribler - December, 2016
+//--------------------------------------------------------------------------
+LinkAccPropList::LinkAccPropList(const LinkAccPropList& original) : PropList(original) {}
+
+//--------------------------------------------------------------------------
+// Function: LinkAccPropList overloaded constructor
+///\brief Creates a file access property list using the id of an
+/// existing one.
+// Programmer Binh-Minh Ribler - December, 2016
+//--------------------------------------------------------------------------
+LinkAccPropList::LinkAccPropList(const hid_t plist_id) : PropList(plist_id) {}
+
+//--------------------------------------------------------------------------
+// Function: LinkAccPropList::setNumLinks
+///\brief Set the number of soft or user-defined link traversals allowed
+/// before the library assumes it has found a cycle and aborts the
+/// traversal.
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - March 1, 2017
+//--------------------------------------------------------------------------
+void LinkAccPropList::setNumLinks(size_t nlinks) const
+{
+ herr_t ret_value = H5Pset_nlinks(id, nlinks);
+ // Throw exception if H5Pset_nlinks returns failure
+ if (ret_value < 0)
+ {
+ throw PropListIException("setNumLinks", "H5Pset_nlinks failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: LinkAccPropList::getNumLinks
+///\brief Gets the number of soft or user-defined links that can be
+/// traversed before a failure occurs.
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - March 1, 2017
+//--------------------------------------------------------------------------
+size_t LinkAccPropList::getNumLinks() const
+{
+ size_t nlinks = 0;
+ herr_t ret_value = H5Pget_nlinks(id, &nlinks);
+ // Throw exception if H5Pget_nlinks returns failure
+ if (ret_value < 0)
+ {
+ throw PropListIException("getNumLinks", "H5Pget_nlinks failed");
+ }
+ return(nlinks);
+}
+
+//--------------------------------------------------------------------------
+// Function: LinkAccPropList destructor
+///\brief Noop destructor
+// Programmer Binh-Minh Ribler - December, 2016
+//--------------------------------------------------------------------------
+LinkAccPropList::~LinkAccPropList() {}
+
+} // end namespace
diff --git a/c++/src/H5LaccProp.h b/c++/src/H5LaccProp.h
new file mode 100644
index 0000000..9772cde
--- /dev/null
+++ b/c++/src/H5LaccProp.h
@@ -0,0 +1,73 @@
+// C++ informative line for the emacs editor: -*- C++ -*-
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+// Class LinkAccPropList represents the HDF5 file access property list and
+// inherits from DataType.
+
+#ifndef __H5LinkAccPropList_H
+#define __H5LinkAccPropList_H
+
+namespace H5 {
+
+/*! \class LinkAccPropList
+ \brief Class LinkAccPropList inherits from PropList and provides
+ wrappers for the HDF5 file access property list.
+
+ Inheritance: PropList -> IdComponent
+*/
+class H5_DLLCPP LinkAccPropList : public PropList {
+ public:
+ ///\brief Default file access property list.
+ static const LinkAccPropList& DEFAULT;
+
+ // Creates a file access property list.
+ LinkAccPropList();
+
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("LinkAccPropList"); }
+
+ // Copy constructor: creates a copy of a LinkAccPropList object.
+ LinkAccPropList(const LinkAccPropList& original);
+
+ // Creates a copy of an existing file access property list
+ // using the property list id.
+ LinkAccPropList (const hid_t plist_id);
+
+ // Sets the number of soft or user-defined links that can be
+ // traversed before a failure occurs.
+ void setNumLinks(size_t nlinks) const;
+
+ // Gets the number of soft or user-defined link traversals allowed
+ size_t getNumLinks() const;
+
+ // Noop destructor
+ virtual ~LinkAccPropList();
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
+
+ // Deletes the global constant, should only be used by the library
+ static void deleteConstants();
+
+ private:
+ static LinkAccPropList* DEFAULT_;
+
+ // Creates the global constant, should only be used by the library
+ static LinkAccPropList* getConstant();
+
+#endif // DOXYGEN_SHOULD_SKIP_THIS
+
+}; // end of LinkAccPropList
+} // namespace H5
+
+#endif // __H5LinkAccPropList_H
diff --git a/c++/src/H5Library.cpp b/c++/src/H5Library.cpp
index 1726121..9293622 100644
--- a/c++/src/H5Library.cpp
+++ b/c++/src/H5Library.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -26,6 +24,7 @@
#include "H5OcreatProp.h"
#include "H5DxferProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -37,46 +36,46 @@
namespace H5 {
//--------------------------------------------------------------------------
-// Function: H5Library::open (static)
-///\brief Initializes the HDF5 library.
+// Function: H5Library::open (static)
+///\brief Initializes the HDF5 library.
///
-///\exception H5::LibraryIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::LibraryIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5Library::open()
{
- herr_t ret_value = H5open();
- if( ret_value < 0 )
- {
- throw LibraryIException("H5Library::open", "H5open failed");
- }
+ herr_t ret_value = H5open();
+ if (ret_value < 0)
+ {
+ throw LibraryIException("H5Library::open", "H5open failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Library::close (static)
-///\brief Flushes all data to disk, closes files, and cleans up memory.
+// Function: H5Library::close (static)
+///\brief Flushes all data to disk, closes files, and cleans up memory.
///
-///\exception H5::LibraryIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::LibraryIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5Library::close()
{
- herr_t ret_value = H5close();
- if( ret_value < 0 )
- {
- throw LibraryIException("H5Library::close", "H5close failed");
- }
+ herr_t ret_value = H5close();
+ if (ret_value < 0)
+ {
+ throw LibraryIException("H5Library::close", "H5close failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Library::dontAtExit (static)
-///\brief Instructs library not to install the C \c atexit cleanup routine
+// Function: H5Library::dontAtExit (static)
+///\brief Instructs library not to install the C \c atexit cleanup routine
///
-///\exception H5::LibraryIException
-// Programmer Binh-Minh Ribler - 2000
+///\exception H5::LibraryIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Removed the check for failure returned from H5dont_atexit.
-// will be fixed to not fail (HDFFV-9540)
+// Removed the check for failure returned from H5dont_atexit.
+// will be fixed to not fail (HDFFV-9540)
//--------------------------------------------------------------------------
void H5Library::dontAtExit()
{
@@ -84,80 +83,80 @@ void H5Library::dontAtExit()
}
//--------------------------------------------------------------------------
-// Function: H5Library::getLibVersion (static)
-///\brief Returns the HDF library release number.
-///\param majnum - OUT: Major version of the library
-///\param minnum - OUT: Minor version of the library
-///\param relnum - OUT: Release number of the library
-///\exception H5::LibraryIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Library::getLibVersion (static)
+///\brief Returns the HDF library release number.
+///\param majnum - OUT: Major version of the library
+///\param minnum - OUT: Minor version of the library
+///\param relnum - OUT: Release number of the library
+///\exception H5::LibraryIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void H5Library::getLibVersion( unsigned& majnum, unsigned& minnum, unsigned& relnum )
+void H5Library::getLibVersion(unsigned& majnum, unsigned& minnum, unsigned& relnum)
{
- herr_t ret_value = H5get_libversion( &majnum, &minnum, &relnum );
- if( ret_value < 0 )
- {
- throw LibraryIException("H5Library::getLibVersion", "H5get_libversion failed");
- }
+ herr_t ret_value = H5get_libversion(&majnum, &minnum, &relnum);
+ if (ret_value < 0)
+ {
+ throw LibraryIException("H5Library::getLibVersion", "H5get_libversion failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Library::checkVersion (static)
-///\brief Verifies that the arguments match the version numbers
-/// compiled into the library
-///\param majnum - IN: Major version of the library
-///\param minnum - IN: Minor version of the library
-///\param relnum - IN: Release number of the library
-///\exception H5::LibraryIException
+// Function: H5Library::checkVersion (static)
+///\brief Verifies that the arguments match the version numbers
+/// compiled into the library
+///\param majnum - IN: Major version of the library
+///\param minnum - IN: Minor version of the library
+///\param relnum - IN: Release number of the library
+///\exception H5::LibraryIException
///\par Description
-/// For information about library version, please refer to
-/// the C layer Reference Manual at:
+/// For information about library version, please refer to
+/// the C layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5.html#Library-VersCheck
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5Library::checkVersion(unsigned majnum, unsigned minnum, unsigned relnum)
{
- herr_t ret_value = H5check_version(majnum, minnum, relnum);
- if( ret_value < 0 )
- {
- throw LibraryIException("H5Library::checkVersion", "H5check_version failed");
- }
+ herr_t ret_value = H5check_version(majnum, minnum, relnum);
+ if (ret_value < 0)
+ {
+ throw LibraryIException("H5Library::checkVersion", "H5check_version failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Library::garbageCollect (static)
-///\brief Walks through all the garbage collection routines for the
-/// library, which are supposed to free any unused memory they
-/// have allocated.
+// Function: H5Library::garbageCollect (static)
+///\brief Walks through all the garbage collection routines for the
+/// library, which are supposed to free any unused memory they
+/// have allocated.
///
-///\exception H5::LibraryIException
+///\exception H5::LibraryIException
///\par Description
-/// It is not required that H5Library::garbageCollect be called
-/// at any particular time; it is only necessary in certain
-/// situations, such as when the application has performed actions
-/// that cause the library to allocate many objects. The
-/// application should call H5Library::garbageCollect if it
-/// eventually releases those objects and wants to reduce the
-/// memory used by the library from the peak usage required.
+/// It is not required that H5Library::garbageCollect be called
+/// at any particular time; it is only necessary in certain
+/// situations, such as when the application has performed actions
+/// that cause the library to allocate many objects. The
+/// application should call H5Library::garbageCollect if it
+/// eventually releases those objects and wants to reduce the
+/// memory used by the library from the peak usage required.
///\par
-/// The library automatically garbage collects all the free
-/// lists when the application ends.
-// Programmer Binh-Minh Ribler - May, 2004
+/// The library automatically garbage collects all the free
+/// lists when the application ends.
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
void H5Library::garbageCollect()
{
- herr_t ret_value = H5garbage_collect();
- if( ret_value < 0 )
- {
- throw LibraryIException("H5Library::garbageCollect", "H5garbage_collect failed");
- }
+ herr_t ret_value = H5garbage_collect();
+ if (ret_value < 0)
+ {
+ throw LibraryIException("H5Library::garbageCollect", "H5garbage_collect failed");
+ }
}
//--------------------------------------------------------------------------
// Function: H5Library::initH5cpp (static)
///\brief Initializes C++ library and registers terminating functions at
-/// exit. Only for the library functions, not for user-defined
-/// functions.
+/// exit. Only for the library functions, not for user-defined
+/// functions.
// Description
// initH5cpp registers the following functions with std::atexit():
// termH5cpp() - calls H5close() after all cleanup in
@@ -185,23 +184,27 @@ void H5Library::initH5cpp()
if (ret_value != 0)
throw LibraryIException("H5Library::initH5cpp", "Registrating PropList::deleteConstants failed");
- ret_value = std::atexit(FileAccPropList::deleteConstants);
+ ret_value = std::atexit(LinkAccPropList::deleteConstants);
+ if (ret_value != 0)
+ throw LibraryIException("H5Library::initH5cpp", "Registrating LinkAccPropList::deleteConstants failed");
+
+ ret_value = std::atexit(FileAccPropList::deleteConstants);
if (ret_value != 0)
throw LibraryIException("H5Library::initH5cpp", "Registrating FileAccPropList::deleteConstants failed");
- ret_value = std::atexit(FileCreatPropList::deleteConstants);
+ ret_value = std::atexit(FileCreatPropList::deleteConstants);
if (ret_value != 0)
throw LibraryIException("H5Library::initH5cpp", "Registrating FileCreatPropList::deleteConstants failed");
- ret_value = std::atexit(DSetMemXferPropList::deleteConstants);
+ ret_value = std::atexit(DSetMemXferPropList::deleteConstants);
if (ret_value != 0)
throw LibraryIException("H5Library::initH5cpp", "Registrating DSetMemXferPropList::deleteConstants failed");
- ret_value = std::atexit(DSetCreatPropList::deleteConstants);
+ ret_value = std::atexit(DSetCreatPropList::deleteConstants);
if (ret_value != 0)
throw LibraryIException("H5Library::initH5cpp", "Registrating DSetCreatPropList::deleteConstants failed");
- ret_value = std::atexit(ObjCreatPropList::deleteConstants);
+ ret_value = std::atexit(ObjCreatPropList::deleteConstants);
if (ret_value != 0)
throw LibraryIException("H5Library::initH5cpp", "Registrating ObjCreatPropList::deleteConstants failed");
@@ -226,45 +229,45 @@ void H5Library::termH5cpp()
}
//--------------------------------------------------------------------------
-// Function: H5Library::setFreeListLimits (static)
-///\brief Sets limits on the different kinds of free lists.
-///\param reg_global_lim - IN: Limit on all "regular" free list memory used
-///\param reg_list_lim - IN: Limit on memory used in each "regular" free list
-///\param arr_global_lim - IN: Limit on all "array" free list memory used
-///\param arr_list_lim - IN: Limit on memory used in each "array" free list
-///\param blk_global_lim - IN: Limit on all "block" free list memory used
-///\param blk_list_lim - IN: Limit on memory used in each "block" free list
-///\exception H5::LibraryIException
+// Function: H5Library::setFreeListLimits (static)
+///\brief Sets limits on the different kinds of free lists.
+///\param reg_global_lim - IN: Limit on all "regular" free list memory used
+///\param reg_list_lim - IN: Limit on memory used in each "regular" free list
+///\param arr_global_lim - IN: Limit on all "array" free list memory used
+///\param arr_list_lim - IN: Limit on memory used in each "array" free list
+///\param blk_global_lim - IN: Limit on all "block" free list memory used
+///\param blk_list_lim - IN: Limit on memory used in each "block" free list
+///\exception H5::LibraryIException
///\par Description
-/// Setting a value of -1 for a limit means no limit of that type.
-/// For more information on free list limits, please refer to C
-/// layer Reference Manual at:
+/// Setting a value of -1 for a limit means no limit of that type.
+/// For more information on free list limits, please refer to C
+/// layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5.html#Library-SetFreeListLimits
-// Programmer Binh-Minh Ribler - May, 2004
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
void H5Library::setFreeListLimits(int reg_global_lim, int reg_list_lim,
- int arr_global_lim, int arr_list_lim, int blk_global_lim,
- int blk_list_lim)
+ int arr_global_lim, int arr_list_lim, int blk_global_lim,
+ int blk_list_lim)
{
- herr_t ret_value = H5set_free_list_limits(reg_global_lim, reg_list_lim, arr_global_lim, arr_list_lim, blk_global_lim, blk_list_lim);
- if( ret_value < 0 )
- {
- throw LibraryIException("H5Library::setFreeListLimits", "H5set_free_list_limits failed");
- }
+ herr_t ret_value = H5set_free_list_limits(reg_global_lim, reg_list_lim, arr_global_lim, arr_list_lim, blk_global_lim, blk_list_lim);
+ if (ret_value < 0)
+ {
+ throw LibraryIException("H5Library::setFreeListLimits", "H5set_free_list_limits failed");
+ }
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Library default constructor - private
-///\brief Default constructor: Creates a stub H5Library object
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Library default constructor - private
+///\brief Default constructor: Creates a stub H5Library object
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5Library::H5Library(){}
//--------------------------------------------------------------------------
-// Function: H5Library destructor
-///\brief Noop destructor
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Library destructor
+///\brief Noop destructor
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5Library::~H5Library(){}
#endif // DOXYGEN_SHOULD_SKIP_THIS
diff --git a/c++/src/H5Library.h b/c++/src/H5Library.h
index 694b052..9b4150e 100644
--- a/c++/src/H5Library.h
+++ b/c++/src/H5Library.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5Library_H
@@ -27,32 +25,32 @@ namespace H5 {
*/
class H5_DLLCPP H5Library {
public:
- // Initializes the HDF5 library.
- static void open();
+ // Initializes the HDF5 library.
+ static void open();
- // Flushes all data to disk, closes files, and cleans up memory.
- static void close();
+ // Flushes all data to disk, closes files, and cleans up memory.
+ static void close();
- // Instructs library not to install atexit cleanup routine
- static void dontAtExit();
+ // Instructs library not to install atexit cleanup routine
+ static void dontAtExit();
- // Returns the HDF library release number.
- static void getLibVersion( unsigned& majnum, unsigned& minnum, unsigned& relnum );
+ // Returns the HDF library release number.
+ static void getLibVersion(unsigned& majnum, unsigned& minnum, unsigned& relnum);
- // Verifies that the arguments match the version numbers compiled
- // into the library
- static void checkVersion( unsigned majnum, unsigned minnum, unsigned relnum );
+ // Verifies that the arguments match the version numbers compiled
+ // into the library
+ static void checkVersion(unsigned majnum, unsigned minnum, unsigned relnum);
- // Walks through all the garbage collection routines for the library,
- // which are supposed to free any unused memory they have allocated.
- static void garbageCollect();
+ // Walks through all the garbage collection routines for the library,
+ // which are supposed to free any unused memory they have allocated.
+ static void garbageCollect();
- // Sets limits on the different kinds of free lists.
- static void setFreeListLimits(int reg_global_lim, int reg_list_lim, int
- arr_global_lim, int arr_list_lim, int blk_global_lim, int blk_list_lim);
+ // Sets limits on the different kinds of free lists.
+ static void setFreeListLimits(int reg_global_lim, int reg_list_lim, int
+ arr_global_lim, int arr_list_lim, int blk_global_lim, int blk_list_lim);
// Initializes C++ library and registers terminating functions at exit.
- // Only for the library functions, not for user-defined functions.
+ // Only for the library functions, not for user-defined functions.
static void initH5cpp(void);
// Sends request for terminating the HDF5 library.
@@ -69,6 +67,7 @@ class H5_DLLCPP H5Library {
~H5Library();
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+}; // end of H5Library
+} // namespace H5
+
#endif // __H5Library_H
diff --git a/c++/src/H5Location.cpp b/c++/src/H5Location.cpp
index cb4fd12..e820b0d 100644
--- a/c++/src/H5Location.cpp
+++ b/c++/src/H5Location.cpp
@@ -5,51 +5,50 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
+#include "H5private.h" // for HDmemset
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
+#include "H5DataSpace.h"
#include "H5PropList.h"
#include "H5FaccProp.h"
#include "H5FcreatProp.h"
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
+#include "H5DataType.h"
#include "H5AbstractDs.h"
-#include "H5DataSpace.h"
#include "H5DataSet.h"
-#include "H5Attribute.h"
+#include "H5CommonFG.h"
#include "H5Group.h"
#include "H5File.h"
-#include "H5Alltypes.h"
-#include "H5private.h" // for HDmemset
namespace H5 {
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location default constructor (protected)
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location default constructor (protected)
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5Location::H5Location() : IdComponent() {}
//--------------------------------------------------------------------------
-// Function: H5Location overloaded constructor (protected)
-// Purpose Creates an H5Location object using the id of an existing HDF5
-// object.
-// Parameters object_id - IN: Id of an existing HDF5 object
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location overloaded constructor (protected)
+// Purpose Creates an H5Location object using the id of an existing HDF5
+// object.
+// Parameters object_id - IN: Id of an existing HDF5 object
+// Programmer Binh-Minh Ribler - 2000
// *** Deprecation warning ***
// This constructor is no longer appropriate because the data member "id" had
@@ -61,11 +60,11 @@ H5Location::H5Location() : IdComponent() {}
// H5Location::H5Location(const hid_t object_id) : IdComponent() {}
//--------------------------------------------------------------------------
-// Function: H5Location copy constructor
-// Purpose: This noop copy constructor is removed as a result of the data
-// member "id" being moved down to sub-classes. (Mar 2015)
-///\param original - IN: H5Location instance to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location copy constructor
+// Purpose This noop copy constructor is removed as a result of the data
+// member "id" being moved down to sub-classes. (Mar 2015)
+///\param original - IN: H5Location instance to copy
+// Programmer Binh-Minh Ribler - 2000
//
// *** Deprecation warning ***
// This constructor is no longer appropriate because the data member "id" had
@@ -77,156 +76,192 @@ H5Location::H5Location() : IdComponent() {}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location::flush
-///\brief Flushes all buffers associated with a location to disk.
-///\param scope - IN: Specifies the scope of the flushing action,
-/// which can be either of these values:
-/// \li \c H5F_SCOPE_GLOBAL - Flushes the entire virtual file
-/// \li \c H5F_SCOPE_LOCAL - Flushes only the specified file
-///\exception H5::Exception
+// Function: H5Location::exists
+///\brief Checks if a link of a given name exists in a location
+///\param name - IN: Searched name
+///\param lapl - IN: Link access property list
+///\exception H5::LocationException
+// Programmer Binh-Minh Ribler - Nov, 2016
+// Modification
+//--------------------------------------------------------------------------
+bool H5Location::exists(const char* name, const LinkAccPropList& lapl) const
+{
+ htri_t ret_value = H5Lexists(getId(), name, lapl.getId());
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else // Raise exception when H5Lexists returns a negative value
+ {
+ throwException("exists", "H5Lexists failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: H5Location::exists
+///\brief Checks if a link of a given name exists in a location
+///\param name - IN: Searched name
+///\param lapl - IN: Link access property list
+///\exception H5::LocationException
+// Programmer Binh-Minh Ribler - Dec, 2016
+// Modification
+//--------------------------------------------------------------------------
+bool H5Location::exists(const H5std_string& name, const LinkAccPropList& lapl) const
+{
+ return(exists(name.c_str(), lapl));
+}
+
+//--------------------------------------------------------------------------
+// Function: H5Location::flush
+///\brief Flushes all buffers associated with a location to disk.
+///\param scope - IN: Specifies the scope of the flushing action,
+/// which can be either of these values:
+/// \li \c H5F_SCOPE_GLOBAL - Flushes the entire virtual file
+/// \li \c H5F_SCOPE_LOCAL - Flushes only the specified file
+///\exception H5::Exception
///\par Description
-/// This location is used to identify the file to be flushed.
-// Programmer Binh-Minh Ribler - 2012
+/// This location is used to identify the file to be flushed.
+// Programmer Binh-Minh Ribler - 2012
// Modification
-// Sep 2012 - BMR
-// Moved from H5File/H5Object
+// Sep 2012 - BMR
+// Moved from H5File/H5Object
//--------------------------------------------------------------------------
void H5Location::flush(H5F_scope_t scope) const
{
- herr_t ret_value = H5Fflush(getId(), scope);
- if( ret_value < 0 )
- {
- throw LocationException(inMemFunc("flush"), "H5Fflush failed");
- }
+ herr_t ret_value = H5Fflush(getId(), scope);
+ if (ret_value < 0)
+ {
+ throw LocationException(inMemFunc("flush"), "H5Fflush failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Location::getFileName
-///\brief Gets the name of the file, in which this HDF5 object belongs.
-///\return File name
-///\exception H5::LocationException
-// Programmer Binh-Minh Ribler - Jul, 2004
+// Function: H5Location::getFileName
+///\brief Gets the name of the file, in which this HDF5 object belongs.
+///\return File name
+///\exception H5::LocationException
+// Programmer Binh-Minh Ribler - Jul, 2004
//--------------------------------------------------------------------------
H5std_string H5Location::getFileName() const
{
- try {
+ try {
return(p_get_file_name());
- }
- catch (LocationException& E) {
- throw FileIException(inMemFunc("getFileName"), E.getDetailMsg());
- }
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::setComment
-///\brief Sets or resets the comment for an object specified by its name.
-///\param name - IN: Name of the object
-///\param comment - IN: New comment
-///\exception H5::LocationException
-///\par Description
-/// If \a comment is an empty string or a null pointer, the comment
-/// message is removed from the object.
-/// Comments should be relatively short, null-terminated, ASCII
-/// strings. They can be attached to any object that has an
-/// object header, e.g., data sets, groups, named data types,
-/// and data spaces, but not symbolic links.
-// Programmer Binh-Minh Ribler - 2000 (moved from CommonFG, Sep 2013)
+ }
+ catch (LocationException& E) {
+ throw FileIException(inMemFunc("getFileName"), E.getDetailMsg());
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: H5Location::setComment
+///\brief Sets or resets the comment for an object specified by its name.
+///\param name - IN: Name of the object
+///\param comment - IN: New comment
+///\exception H5::LocationException
+///\par Description
+/// If \a comment is an empty string or a null pointer, the comment
+/// message is removed from the object.
+/// Comments should be relatively short, null-terminated, ASCII
+/// strings. They can be attached to any object that has an
+/// object header, e.g., data sets, groups, named data types,
+/// and data spaces, but not symbolic links.
+// Programmer Binh-Minh Ribler - 2000 (moved from CommonFG, Sep 2013)
// Modification
-// 2007: QAK modified to use H5O APIs; however the first parameter is
-// no longer just file or group, this function should be moved
-// to another class to accommodate attribute, dataset, and named
-// datatype. - BMR
+// 2007: QAK modified to use H5O APIs; however the first parameter is
+// no longer just file or group, this function should be moved
+// to another class to accommodate attribute, dataset, and named
+// datatype. - BMR
//--------------------------------------------------------------------------
void H5Location::setComment(const char* name, const char* comment) const
{
- herr_t ret_value = H5Oset_comment_by_name(getId(), name, comment, H5P_DEFAULT);
- if( ret_value < 0 )
- throw LocationException(inMemFunc("setComment"), "H5Oset_comment_by_name failed");
+ herr_t ret_value = H5Oset_comment_by_name(getId(), name, comment, H5P_DEFAULT);
+ if (ret_value < 0)
+ throw LocationException(inMemFunc("setComment"), "H5Oset_comment_by_name failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::setComment
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name and \a comment.
-// Programmer Binh-Minh Ribler - 2000 (moved from CommonFG, Sep 2013)
+// Function: H5Location::setComment
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name and \a comment.
+// Programmer Binh-Minh Ribler - 2000 (moved from CommonFG, Sep 2013)
//--------------------------------------------------------------------------
void H5Location::setComment(const H5std_string& name, const H5std_string& comment) const
{
- setComment(name.c_str(), comment.c_str());
+ setComment(name.c_str(), comment.c_str());
}
//--------------------------------------------------------------------------
-// Function: H5Location::setComment
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it doesn't take
-/// an object name.
-// Programmer Binh-Minh Ribler - Sep 2013
+// Function: H5Location::setComment
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it doesn't take
+/// an object name.
+// Programmer Binh-Minh Ribler - Sep 2013
// Modification
//--------------------------------------------------------------------------
void H5Location::setComment(const char* comment) const
{
- herr_t ret_value = H5Oset_comment_by_name(getId(), ".", comment, H5P_DEFAULT);
- if( ret_value < 0 )
- throw LocationException(inMemFunc("setComment"), "H5Oset_comment_by_name failed");
+ herr_t ret_value = H5Oset_comment_by_name(getId(), ".", comment, H5P_DEFAULT);
+ if (ret_value < 0)
+ throw LocationException(inMemFunc("setComment"), "H5Oset_comment_by_name failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::setComment
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a comment.
-// Programmer Binh-Minh Ribler - Sep 2013
+// Function: H5Location::setComment
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a comment.
+// Programmer Binh-Minh Ribler - Sep 2013
//--------------------------------------------------------------------------
void H5Location::setComment(const H5std_string& comment) const
{
- setComment(comment.c_str());
+ setComment(comment.c_str());
}
//--------------------------------------------------------------------------
-// Function: H5Location::removeComment
-///\brief Removes the comment from an object specified by its name.
-///\param name - IN: Name of the object
-///\exception H5::LocationException
-// Programmer Binh-Minh Ribler - May 2005 (moved from CommonFG, Sep 2013)
-// 2007: QAK modified to use H5O APIs; however the first parameter is
-// no longer just file or group, this function should be moved
-// to another class to accommodate attribute, dataset, and named
-// datatype. - BMR
+// Function: H5Location::removeComment
+///\brief Removes the comment from an object specified by its name.
+///\param name - IN: Name of the object
+///\exception H5::LocationException
+// Programmer Binh-Minh Ribler - May 2005 (moved from CommonFG, Sep 2013)
+// 2007: QAK modified to use H5O APIs; however the first parameter is
+// no longer just file or group, this function should be moved
+// to another class to accommodate attribute, dataset, and named
+// datatype. - BMR
//--------------------------------------------------------------------------
void H5Location::removeComment(const char* name) const
{
- herr_t ret_value = H5Oset_comment_by_name(getId(), name, NULL, H5P_DEFAULT);
- if( ret_value < 0 )
- throw LocationException(inMemFunc("removeComment"), "H5Oset_comment_by_name failed");
+ herr_t ret_value = H5Oset_comment_by_name(getId(), name, NULL, H5P_DEFAULT);
+ if (ret_value < 0)
+ throw LocationException(inMemFunc("removeComment"), "H5Oset_comment_by_name failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::removeComment
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - May 2005 (moved from CommonFG, Sep 2013)
+// Function: H5Location::removeComment
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - May 2005 (moved from CommonFG, Sep 2013)
//--------------------------------------------------------------------------
void H5Location::removeComment(const H5std_string& name) const
{
- removeComment (name.c_str());
+ removeComment (name.c_str());
}
//--------------------------------------------------------------------------
-// Function: H5Location::getComment
-///\brief Retrieves the comment for this location, returning its length.
-///\param name - IN: Name of the object
-///\param buf_size - IN: Length of the comment to retrieve
-///\param comment - OUT: Retrieved comment
-///\return Actual length of the comment
-///\exception H5::LocationException
+// Function: H5Location::getComment
+///\brief Retrieves the comment for this location, returning its length.
+///\param name - IN: Name of the object
+///\param buf_size - IN: Length of the comment to retrieve
+///\param comment - OUT: Retrieved comment
+///\return Actual length of the comment
+///\exception H5::LocationException
///\par Description
-/// This function retrieves \a buf_size characters of the comment
-/// including the null terminator. Thus, if the actual length
-/// of the comment is more than buf_size-1, the retrieved comment
-/// will be truncated to accommodate the null terminator.
-// Programmer Binh-Minh Ribler - Mar 2014
+/// This function retrieves \a buf_size characters of the comment
+/// including the null terminator. Thus, if the actual length
+/// of the comment is more than buf_size-1, the retrieved comment
+/// will be truncated to accommodate the null terminator.
+// Programmer Binh-Minh Ribler - Mar 2014
//--------------------------------------------------------------------------
ssize_t H5Location::getComment(const char* name, size_t buf_size, char* comment) const
{
@@ -237,27 +272,27 @@ ssize_t H5Location::getComment(const char* name, size_t buf_size, char* comment)
// If H5Oget_comment_by_name returns a negative value, raise an exception
if (comment_len < 0)
- {
+ {
throw LocationException("H5Location::getComment", "H5Oget_comment_by_name failed");
}
// If the comment is longer than the provided buffer size, the C library
// will not null terminate it
if (static_cast<size_t>(comment_len) >= buf_size)
- comment[buf_size-1] = '\0';
+ comment[buf_size-1] = '\0';
// Return the actual comment length, which might be different from buf_size
return(comment_len);
}
//--------------------------------------------------------------------------
-// Function: H5Location::getComment
-///\brief Returns the comment as \a string for this location,
-/// returning its length.
-///\param name - IN: Name of the object
-///\param buf_size - IN: Length of the comment to retrieve, default to 0
-///\return Comment string
-///\exception H5::LocationException
-// Programmer Binh-Minh Ribler - 2000 (moved from CommonFG, Sep 2013)
+// Function: H5Location::getComment
+///\brief Returns the comment as \a string for this location,
+/// returning its length.
+///\param name - IN: Name of the object
+///\param buf_size - IN: Length of the comment to retrieve, default to 0
+///\return Comment string
+///\exception H5::LocationException
+// Programmer Binh-Minh Ribler - 2000 (moved from CommonFG, Sep 2013)
//--------------------------------------------------------------------------
H5std_string H5Location::getComment(const char* name, size_t buf_size) const
{
@@ -270,36 +305,36 @@ H5std_string H5Location::getComment(const char* name, size_t buf_size) const
// If H5Oget_comment_by_name returns a negative value, raise an exception
if (comment_len < 0)
- {
+ {
throw LocationException("H5Location::getComment", "H5Oget_comment_by_name failed");
}
// If comment exists, calls C routine again to get it
else if (comment_len > 0)
- {
- size_t tmp_len = buf_size;
-
- // If buffer size is not provided, use comment length
- if (tmp_len == 0)
- tmp_len = comment_len;
-
- // Temporary buffer for char* comment
- char* comment_C = new char[tmp_len+1];
- HDmemset(comment_C, 0, tmp_len+1); // clear buffer
-
- // Used overloaded function
- ssize_t temp_len = getComment(name, tmp_len+1, comment_C);
- if (temp_len < 0)
- {
- delete []comment_C;
- throw LocationException("H5Location::getComment", "H5Oget_comment_by_name failed");
- }
-
- // Convert the C comment to return
- comment = comment_C;
-
- // Clean up resource
- delete []comment_C;
+ {
+ size_t tmp_len = buf_size;
+
+ // If buffer size is not provided, use comment length
+ if (tmp_len == 0)
+ tmp_len = comment_len;
+
+ // Temporary buffer for char* comment
+ char* comment_C = new char[tmp_len+1];
+ HDmemset(comment_C, 0, tmp_len+1); // clear buffer
+
+ // Used overloaded function
+ ssize_t temp_len = getComment(name, tmp_len+1, comment_C);
+ if (temp_len < 0)
+ {
+ delete []comment_C;
+ throw LocationException("H5Location::getComment", "H5Oget_comment_by_name failed");
+ }
+
+ // Convert the C comment to return
+ comment = comment_C;
+
+ // Clean up resource
+ delete []comment_C;
}
// Return the string comment
@@ -307,11 +342,11 @@ H5std_string H5Location::getComment(const char* name, size_t buf_size) const
}
//--------------------------------------------------------------------------
-// Function: H5Location::getComment
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000 (moved from CommonFG, Sep 2013)
+// Function: H5Location::getComment
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000 (moved from CommonFG, Sep 2013)
//--------------------------------------------------------------------------
H5std_string H5Location::getComment(const H5std_string& name, size_t buf_size) const
{
@@ -320,112 +355,112 @@ H5std_string H5Location::getComment(const H5std_string& name, size_t buf_size) c
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location::p_reference (protected)
-// Purpose Creates a reference to an HDF5 object or a dataset region.
+// Function: H5Location::p_reference (protected)
+// Purpose Creates a reference to an HDF5 object or a dataset region.
// Parameters
-// name - IN: Name of the object to be referenced
-// dataspace - IN: Dataspace with selection
-// ref_type - IN: Type of reference; default to \c H5R_DATASET_REGION
-// Exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - May, 2004
+// name - IN: Name of the object to be referenced
+// dataspace - IN: Dataspace with selection
+// ref_type - IN: Type of reference; default to \c H5R_DATASET_REGION
+// Exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
void H5Location::p_reference(void* ref, const char* name, hid_t space_id, H5R_type_t ref_type) const
{
- herr_t ret_value = H5Rcreate(ref, getId(), name, ref_type, space_id);
- if (ret_value < 0)
- {
- throw ReferenceException(inMemFunc("reference"), "H5Rcreate failed");
- }
+ herr_t ret_value = H5Rcreate(ref, getId(), name, ref_type, space_id);
+ if (ret_value < 0)
+ {
+ throw ReferenceException(inMemFunc("reference"), "H5Rcreate failed");
+ }
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location::reference
-///\brief Creates a reference to an HDF5 object or a dataset region.
-///\param ref - IN: Reference pointer
-///\param name - IN: Name of the object to be referenced
-///\param dataspace - IN: Dataspace with selection
-///\param ref_type - IN: Type of reference to query, valid values are:
-/// \li \c H5R_OBJECT - Reference is an object reference.
-/// \li \c H5R_DATASET_REGION - Reference is a dataset region
-/// reference. (default)
-///\exception H5::ReferenceException
-///\note This method is more suitable for a dataset region reference.
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: H5Location::reference
+///\brief Creates a reference to an HDF5 object or a dataset region.
+///\param ref - IN: Reference pointer
+///\param name - IN: Name of the object to be referenced
+///\param dataspace - IN: Dataspace with selection
+///\param ref_type - IN: Type of reference to query, valid values are:
+/// \li \c H5R_OBJECT - Reference is an object reference.
+/// \li \c H5R_DATASET_REGION - Reference is a dataset region
+/// reference. (default)
+///\exception H5::ReferenceException
+///\note This method is more suitable for a dataset region reference.
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
void H5Location::reference(void* ref, const char* name, const DataSpace& dataspace, H5R_type_t ref_type) const
{
- try {
- p_reference(ref, name, dataspace.getId(), ref_type);
- }
- catch (ReferenceException& E) {
- throw ReferenceException(inMemFunc("reference"), E.getDetailMsg());
- }
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::reference
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-///\param ref - IN: Reference pointer
-///\param name - IN: Name of the object to be referenced
-///\param dataspace - IN: Dataspace with selection
-///\param ref_type - IN: Type of reference to query, valid values are:
-/// \li \c H5R_OBJECT - Reference is an object reference.
-/// \li \c H5R_DATASET_REGION - Reference is a dataset region
-/// reference. (default)
-///\exception H5::ReferenceException
-///\note This method is more suitable for a dataset region reference.
-// Programmer Binh-Minh Ribler - May, 2004
+ try {
+ p_reference(ref, name, dataspace.getId(), ref_type);
+ }
+ catch (ReferenceException& E) {
+ throw ReferenceException(inMemFunc("reference"), E.getDetailMsg());
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: H5Location::reference
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+///\param ref - IN: Reference pointer
+///\param name - IN: Name of the object to be referenced
+///\param dataspace - IN: Dataspace with selection
+///\param ref_type - IN: Type of reference to query, valid values are:
+/// \li \c H5R_OBJECT - Reference is an object reference.
+/// \li \c H5R_DATASET_REGION - Reference is a dataset region
+/// reference. (default)
+///\exception H5::ReferenceException
+///\note This method is more suitable for a dataset region reference.
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
void H5Location::reference(void* ref, const H5std_string& name, const DataSpace& dataspace, H5R_type_t ref_type) const
{
- try {
- p_reference(ref, name.c_str(), dataspace.getId(), ref_type);
- }
- catch (ReferenceException& E) {
- throw ReferenceException(inMemFunc("reference"), E.getDetailMsg());
- }
+ try {
+ p_reference(ref, name.c_str(), dataspace.getId(), ref_type);
+ }
+ catch (ReferenceException& E) {
+ throw ReferenceException(inMemFunc("reference"), E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Location::reference
-///\brief This is an overloaded function, provided for your convenience.
-/// It differs from the above function in that it does not take
-/// a DataSpace object and the reference type must be specified.
-///\param ref - IN: Reference pointer
-///\param name - IN: Name of the object to be referenced
-///\param ref_type - IN: Type of reference to query, valid values are:
-/// \li \c H5R_OBJECT - Reference is an object reference (default)
-/// \li \c H5R_DATASET_REGION - Reference is a dataset region
-///\exception H5::ReferenceException
-///\note This method is more suitable for an object reference.
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: H5Location::reference
+///\brief This is an overloaded function, provided for your convenience.
+/// It differs from the above function in that it does not take
+/// a DataSpace object and the reference type must be specified.
+///\param ref - IN: Reference pointer
+///\param name - IN: Name of the object to be referenced
+///\param ref_type - IN: Type of reference to query, valid values are:
+/// \li \c H5R_OBJECT - Reference is an object reference (default)
+/// \li \c H5R_DATASET_REGION - Reference is a dataset region
+///\exception H5::ReferenceException
+///\note This method is more suitable for an object reference.
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
void H5Location::reference(void* ref, const char* name, H5R_type_t ref_type) const
{
- try {
- p_reference(ref, name, -1, ref_type);
- }
- catch (ReferenceException& E) {
- throw ReferenceException(inMemFunc("reference"), E.getDetailMsg());
- }
+ try {
+ p_reference(ref, name, -1, ref_type);
+ }
+ catch (ReferenceException& E) {
+ throw ReferenceException(inMemFunc("reference"), E.getDetailMsg());
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Location::reference
-///\brief This is an overloaded function, provided for your convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for the object's name.
-///\param ref - IN: Reference pointer
-///\param name - IN: Name of the object to be referenced - \c H5std_string
-///\param ref_type - IN: Type of reference to query, valid values are:
-/// \li \c H5R_OBJECT - Reference is an object reference (default)
-/// \li \c H5R_DATASET_REGION - Reference is a dataset region
-///\note This method is more suitable for an object reference.
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: H5Location::reference
+///\brief This is an overloaded function, provided for your convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for the object's name.
+///\param ref - IN: Reference pointer
+///\param name - IN: Name of the object to be referenced - \c H5std_string
+///\param ref_type - IN: Type of reference to query, valid values are:
+/// \li \c H5R_OBJECT - Reference is an object reference (default)
+/// \li \c H5R_DATASET_REGION - Reference is a dataset region
+///\note This method is more suitable for an object reference.
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
void H5Location::reference(void* ref, const H5std_string& name, H5R_type_t ref_type) const
{
@@ -434,224 +469,227 @@ void H5Location::reference(void* ref, const H5std_string& name, H5R_type_t ref_t
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location::p_dereference (protected)
-// Purpose Dereference a ref into an hdf5 object.
+// Function: H5Location::p_dereference (protected)
+// Purpose Dereference a ref into an hdf5 object.
// Parameters
-// loc_id - IN: An hdf5 identifier specifying the location of the
-// referenced object
-// ref - IN: Reference pointer
-// ref_type - IN: Reference type
-// plist - IN: Property list - default to PropList::DEFAULT
-// from_func - IN: Name of the calling function
-// Exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - Oct, 2006
+// loc_id - IN: An hdf5 identifier specifying the location of the
+// referenced object
+// ref - IN: Reference pointer
+// ref_type - IN: Reference type
+// plist - IN: Property list - default to PropList::DEFAULT
+// from_func - IN: Name of the calling function
+// Exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - Oct, 2006
// Modification
-// May 2008 - BMR
-// Moved from IdComponent.
+// May 2008 - BMR
+// Moved from IdComponent.
//--------------------------------------------------------------------------
hid_t H5Location::p_dereference(hid_t loc_id, const void* ref, H5R_type_t ref_type, const PropList& plist, const char* from_func)
{
- hid_t plist_id;
- if (p_valid_id(plist.getId()))
- plist_id = plist.getId();
- else
- plist_id = H5P_DEFAULT;
+ hid_t plist_id;
+ if (p_valid_id(plist.getId()))
+ plist_id = plist.getId();
+ else
+ plist_id = H5P_DEFAULT;
- hid_t temp_id = H5Rdereference2(loc_id, plist_id, ref_type, ref);
- if (temp_id < 0)
- {
- throw ReferenceException(inMemFunc(from_func), "H5Rdereference failed");
- }
+ hid_t temp_id = H5Rdereference2(loc_id, plist_id, ref_type, ref);
+ if (temp_id < 0)
+ {
+ throw ReferenceException(inMemFunc(from_func), "H5Rdereference failed");
+ }
- return(temp_id);
+ return(temp_id);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location::dereference
-///\brief Dereferences a reference into an HDF5 object, given an HDF5 object.
-///\param loc - IN: Location of the referenced object
-///\param ref - IN: Reference pointer
-///\param ref_type - IN: Reference type
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - Oct, 2006
+// Function: H5Location::dereference
+///\brief Dereferences a reference into an HDF5 object, given an HDF5 object.
+///\param loc - IN: Location of the referenced object
+///\param ref - IN: Reference pointer
+///\param ref_type - IN: Reference type
+///\param plist - IN: Property list - default to PropList::DEFAULT
+///\exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - Oct, 2006
// Modification
-// May, 2008
-// Corrected missing parameters. - BMR
+// May, 2008
+// Corrected missing parameters. - BMR
//--------------------------------------------------------------------------
void H5Location::dereference(const H5Location& loc, const void* ref, H5R_type_t ref_type, const PropList& plist)
{
- p_setId(p_dereference(loc.getId(), ref, ref_type, plist, "dereference"));
+ p_setId(p_dereference(loc.getId(), ref, ref_type, plist, "dereference"));
}
//--------------------------------------------------------------------------
-// Function: H5Location::dereference
-///\brief Dereferences a reference into an HDF5 object, given an attribute.
-///\param attr - IN: Attribute specifying the location of the referenced object
-///\param ref - IN: Reference pointer
-///\param ref_type - IN: Reference type
-///\param plist - IN: Property list - default to PropList::DEFAULT
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - Oct, 2006
+// Function: H5Location::dereference
+// brief Dereferences a reference into an HDF5 object, given an attribute.
+// param attr - IN: Attribute specifying the location of the referenced object
+// param ref - IN: Reference pointer
+// param ref_type - IN: Reference type
+// param plist - IN: Property list - default to PropList::DEFAULT
+// exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - Oct, 2006
// Modification
-// May, 2008
-// Corrected missing parameters. - BMR
+// May, 2008
+// Corrected missing parameters. -BMR
+// Mar, 2017
+// Removed in 1.10.1 because H5Location is Attribute's baseclass
+// now. -BMR
//--------------------------------------------------------------------------
-void H5Location::dereference(const Attribute& attr, const void* ref, H5R_type_t ref_type, const PropList& plist)
+ /* void H5Location::dereference(const Attribute& attr, const void* ref, H5R_type_t ref_type, const PropList& plist)
{
p_setId(p_dereference(attr.getId(), ref, ref_type, plist, "dereference"));
}
+ */
#ifndef H5_NO_DEPRECATED_SYMBOLS
//--------------------------------------------------------------------------
-// Function: H5Location::getObjType
-///\brief Retrieves the type of object that an object reference points to.
-///\param ref_type - IN: Type of reference to query, valid values are:
-/// \li \c H5R_OBJECT - Reference is an object reference.
-/// \li \c H5R_DATASET_REGION - Reference is a dataset region reference.
-///\param ref - IN: Reference to query
-///\return An object type, which can be one of the following:
-/// \li \c H5G_UNKNOWN - A failure occurs. (-1)
-/// \li \c H5G_GROUP - Object is a group.
-/// \li \c H5G_DATASET - Object is a dataset.
-/// \li \c H5G_TYPE Object - is a named datatype
-/// \li \c H5G_LINK - Object is a symbolic link.
-/// \li \c H5G_UDLINK - Object is a user-defined link.
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: H5Location::getObjType
+///\brief Retrieves the type of object that an object reference points to.
+///\param ref_type - IN: Type of reference to query, valid values are:
+/// \li \c H5R_OBJECT - Reference is an object reference.
+/// \li \c H5R_DATASET_REGION - Reference is a dataset region reference.
+///\param ref - IN: Reference to query
+///\return An object type, which can be one of the following:
+/// \li \c H5G_UNKNOWN - A failure occurs. (-1)
+/// \li \c H5G_GROUP - Object is a group.
+/// \li \c H5G_DATASET - Object is a dataset.
+/// \li \c H5G_TYPE Object - is a named datatype
+/// \li \c H5G_LINK - Object is a symbolic link.
+/// \li \c H5G_UDLINK - Object is a user-defined link.
+///\exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - May, 2004
// Modification
-// Sep 2012: Moved up from H5File, Group, DataSet, and DataType
+// Sep 2012: Moved up from H5File, Group, DataSet, and DataType
//--------------------------------------------------------------------------
H5G_obj_t H5Location::getObjType(void *ref, H5R_type_t ref_type) const
{
- try {
- return(p_get_obj_type(ref, ref_type));
- }
- catch (ReferenceException& E) {
- throw ReferenceException(inMemFunc("getObjType"), E.getDetailMsg());
- }
+ try {
+ return(p_get_obj_type(ref, ref_type));
+ }
+ catch (ReferenceException& E) {
+ throw ReferenceException(inMemFunc("getObjType"), E.getDetailMsg());
+ }
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location::p_get_obj_type (protected)
-// Purpose Retrieves the type of object that an object reference points to.
+// Function: H5Location::p_get_obj_type (protected)
+// Purpose Retrieves the type of object that an object reference points to.
// Parameters
-// ref - IN: Reference to query
-// ref_type - IN: Type of reference to query
-// Return An object type, which can be one of the following:
-// H5G_UNKNOWN \tFailure occurs (-1)
-// H5G_GROUP \tObject is a group.
-// H5G_DATASET \tObject is a dataset.
-// H5G_TYPE Object \tis a named datatype.
-// H5G_LINK \tObject is a symbolic link.
-// H5G_UDLINK \tObject is a user-defined link.
-// Exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - May, 2004
+// ref - IN: Reference to query
+// ref_type - IN: Type of reference to query
+// Return An object type, which can be one of the following:
+// H5G_UNKNOWN \tFailure occurs (-1)
+// H5G_GROUP \tObject is a group.
+// H5G_DATASET \tObject is a dataset.
+// H5G_TYPE Object \tis a named datatype.
+// H5G_LINK \tObject is a symbolic link.
+// H5G_UDLINK \tObject is a user-defined link.
+// Exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
H5G_obj_t H5Location::p_get_obj_type(void *ref, H5R_type_t ref_type) const
{
H5G_obj_t obj_type = H5Rget_obj_type1(getId(), ref_type, ref);
-
if (obj_type == H5G_UNKNOWN)
- {
- throw ReferenceException(inMemFunc("getObjType"), "H5Rget_obj_type1 failed");
- }
+ {
+ throw ReferenceException(inMemFunc("getObjType"), "H5Rget_obj_type1 failed");
+ }
return(obj_type);
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
#endif /* H5_NO_DEPRECATED_SYMBOLS */
//--------------------------------------------------------------------------
-// Function: H5Location::getRefObjType
-///\brief Retrieves the type of object that an object reference points to.
-///\param ref - IN: Reference to query
-///\param ref_type - IN: Type of reference to query, valid values are:
-/// \li \c H5R_OBJECT - Reference is an object reference.
-/// \li \c H5R_DATASET_REGION - Reference is a dataset region reference.
-///\return An object type, which can be one of the following:
-/// \li \c H5O_TYPE_UNKNOWN - Unknown object type (-1)
-/// \li \c H5O_TYPE_GROUP - Object is a group
-/// \li \c H5O_TYPE_DATASET - Object is a dataset
-/// \li \c H5O_TYPE_NAMED_DATATYPE - Object is a named datatype
-/// \li \c H5O_TYPE_NTYPES - Number of different object types
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: H5Location::getRefObjType
+///\brief Retrieves the type of object that an object reference points to.
+///\param ref - IN: Reference to query
+///\param ref_type - IN: Type of reference to query, valid values are:
+/// \li \c H5R_OBJECT - Reference is an object reference.
+/// \li \c H5R_DATASET_REGION - Reference is a dataset region reference.
+///\return An object type, which can be one of the following:
+/// \li \c H5O_TYPE_UNKNOWN - Unknown object type (-1)
+/// \li \c H5O_TYPE_GROUP - Object is a group
+/// \li \c H5O_TYPE_DATASET - Object is a dataset
+/// \li \c H5O_TYPE_NAMED_DATATYPE - Object is a named datatype
+/// \li \c H5O_TYPE_NTYPES - Number of different object types
+///\exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
H5O_type_t H5Location::getRefObjType(void *ref, H5R_type_t ref_type) const
{
- try {
+ try {
return(p_get_ref_obj_type(ref, ref_type));
- }
- catch (ReferenceException& E) {
- throw ReferenceException(inMemFunc("getRefObjType"), E.getDetailMsg());
- }
+ }
+ catch (ReferenceException& E) {
+ throw ReferenceException(inMemFunc("getRefObjType"), E.getDetailMsg());
+ }
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location::p_get_ref_obj_type (protected)
-// Purpose Retrieves the type of object that an object reference points to.
+// Function: H5Location::p_get_ref_obj_type (protected)
+// Purpose Retrieves the type of object that an object reference points to.
// Parameters
-// ref - IN: Reference to query
-// ref_type - IN: Type of reference to query
-// Return An object type, which can be one of the following:
-// H5O_TYPE_UNKNOWN - Unknown object type (-1)
-// H5O_TYPE_GROUP - Object is a group
-// H5O_TYPE_DATASET - Object is a dataset
-// H5O_TYPE_NAMED_DATATYPE - Object is a named datatype
-// H5O_TYPE_NTYPES - Number of object types
-// Exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - May, 2004
+// ref - IN: Reference to query
+// ref_type - IN: Type of reference to query
+// Return An object type, which can be one of the following:
+// H5O_TYPE_UNKNOWN - Unknown object type (-1)
+// H5O_TYPE_GROUP - Object is a group
+// H5O_TYPE_DATASET - Object is a dataset
+// H5O_TYPE_NAMED_DATATYPE - Object is a named datatype
+// H5O_TYPE_NTYPES - Number of object types
+// Exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
H5O_type_t H5Location::p_get_ref_obj_type(void *ref, H5R_type_t ref_type) const
{
- H5O_type_t obj_type = H5O_TYPE_UNKNOWN;
- herr_t ret_value = H5Rget_obj_type2(getId(), ref_type, ref, &obj_type);
- if (ret_value < 0)
- {
- throw ReferenceException(inMemFunc("getRefObjType"), "H5Rget_obj_type2 failed");
- }
- if (obj_type == H5O_TYPE_UNKNOWN || obj_type >= H5O_TYPE_NTYPES)
- {
- throw ReferenceException(inMemFunc("getRefObjType"), "H5Rget_obj_type2 returned invalid type");
- }
- return(obj_type);
+ H5O_type_t obj_type = H5O_TYPE_UNKNOWN;
+ herr_t ret_value = H5Rget_obj_type2(getId(), ref_type, ref, &obj_type);
+ if (ret_value < 0)
+ {
+ throw ReferenceException(inMemFunc("getRefObjType"), "H5Rget_obj_type2 failed");
+ }
+ if (obj_type == H5O_TYPE_UNKNOWN || obj_type >= H5O_TYPE_NTYPES)
+ {
+ throw ReferenceException(inMemFunc("getRefObjType"), "H5Rget_obj_type2 returned invalid type");
+ }
+ return(obj_type);
}
//--------------------------------------------------------------------------
-// Function: H5Location::getRegion
-///\brief Retrieves a dataspace with the region pointed to selected.
-///\param ref - IN: Reference to get region of
-///\param ref_type - IN: Type of reference to get region of - default
-// to H5R_DATASET_REGION
-///\return DataSpace object
-///\exception H5::ReferenceException
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: H5Location::getRegion
+///\brief Retrieves a dataspace with the region pointed to selected.
+///\param ref - IN: Reference to get region of
+///\param ref_type - IN: Type of reference to get region of - default
+// to H5R_DATASET_REGION
+///\return DataSpace object
+///\exception H5::ReferenceException
+// Programmer Binh-Minh Ribler - May, 2004
// Modification
-// Mar 29, 2015
-// Used friend function to set id for DataSpace instead of the
-// existing id constructor or the setId method to avoid incrementing
-// ref count, as a work-around for a problem described in the JIRA
-// issue HDFFV-7947. -BMR
+// Mar 29, 2015
+// Used friend function to set id for DataSpace instead of the
+// existing id constructor or the setId method to avoid incrementing
+// ref count, as a work-around for a problem described in the JIRA
+// issue HDFFV-7947. -BMR
//--------------------------------------------------------------------------
DataSpace H5Location::getRegion(void *ref, H5R_type_t ref_type) const
{
- hid_t space_id = H5Rget_region(getId(), ref_type, ref);
- if (space_id < 0)
- {
- throw ReferenceException(inMemFunc("getRegion"), "H5Rget_region failed");
- }
- try {
- DataSpace dataspace;
- f_DataSpace_setId(&dataspace, space_id);
- return(dataspace);
- }
- catch (DataSpaceIException& E) {
- throw ReferenceException(inMemFunc("getRegion"), E.getDetailMsg());
- }
+ hid_t space_id = H5Rget_region(getId(), ref_type, ref);
+ if (space_id < 0)
+ {
+ throw ReferenceException(inMemFunc("getRegion"), "H5Rget_region failed");
+ }
+ try {
+ DataSpace dataspace;
+ f_DataSpace_setId(&dataspace, space_id);
+ return(dataspace);
+ }
+ catch (DataSpaceIException& E) {
+ throw ReferenceException(inMemFunc("getRegion"), E.getDetailMsg());
+ }
}
@@ -667,11 +705,11 @@ DataSpace H5Location::getRegion(void *ref, H5R_type_t ref_type) const
// to call the right getId() - although, as the structure of the
// library at this time, getId() is basically the IdComponent::getId()
// ***Updated: after the classes are rearranged (HDFFV-9920), functions
-// in CommonFG are moved to Group, and they can call getId()
-// instead of getLocId(). getLocId() is kept for backward
-// compatibility on user applications. Aug 18, 2016 -BMR
+// in CommonFG are moved to Group, and they can call getId()
+// instead of getLocId(). getLocId() is kept for backward
+// compatibility on user applications. Aug 18, 2016 -BMR
// ***Updated: Moving to Group was a mistake, now to H5Location
-// Aug 24, 2016 -BMR
+// Aug 24, 2016 -BMR
// - when a failure returned by the C API, the functions will call
// throwException, which is a pure virtual function and is implemented
// by H5File to throw a FileIException and by Group to throw a
@@ -684,29 +722,29 @@ DataSpace H5Location::getRegion(void *ref, H5R_type_t ref_type) const
// is kept in those methods as well. Sep 17, 2016 -BMR
//--------------------------------------------------------------------------
-// Function: H5Location::createGroup
-///\brief Creates a new group at this location which can be a file
-/// or another group.
-///\param name - IN: Name of the group to create
-///\param size_hint - IN: Indicates the number of bytes to reserve for
-/// the names that will appear in the group
-///\return Group instance
-///\exception H5::FileIException or H5::GroupIException
+// Function: H5Location::createGroup
+///\brief Creates a new group at this location which can be a file
+/// or another group.
+///\param name - IN: Name of the group to create
+///\param size_hint - IN: Indicates the number of bytes to reserve for
+/// the names that will appear in the group
+///\return Group instance
+///\exception H5::FileIException or H5::GroupIException
///\par Description
-/// The optional \a size_hint specifies how much file space to
-/// reserve for storing the names that will appear in this new
-/// group. If a non-positive value is provided for the \a size_hint
-/// then a default size is chosen.
-// Programmer Binh-Minh Ribler - 2000
+/// The optional \a size_hint specifies how much file space to
+/// reserve for storing the names that will appear in this new
+/// group. If a non-positive value is provided for the \a size_hint
+/// then a default size is chosen.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Group H5Location::createGroup( const char* name, size_t size_hint ) const
+Group H5Location::createGroup(const char* name, size_t size_hint) const
{
// Group creation property list for size hint
hid_t gcpl_id = 0;
// Set the local heap size hint
if (size_hint > 0)
- {
+ {
// If the creation of the property list failed, throw an exception
if ((gcpl_id = H5Pcreate(H5P_GROUP_CREATE)) < 0)
throwException("createGroup", "H5Pcreate failed");
@@ -717,351 +755,351 @@ Group H5Location::createGroup( const char* name, size_t size_hint ) const
}
}
- // Call C routine H5Gcreate2 to create the named group, giving the
- // location id which can be a file id or a group id
- hid_t group_id = H5Gcreate2(getId(), name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT );
+ // Call C routine H5Gcreate2 to create the named group, giving the
+ // location id which can be a file id or a group id
+ hid_t group_id = H5Gcreate2(getId(), name, H5P_DEFAULT, gcpl_id, H5P_DEFAULT);
- // Close the group creation property list, if necessary
- if(gcpl_id > 0)
+ // Close the group creation property list, if necessary
+ if(gcpl_id > 0)
H5Pclose(gcpl_id);
- // If the creation of the group failed, throw an exception
- if( group_id < 0 )
- throwException("createGroup", "H5Gcreate2 failed");
+ // If the creation of the group failed, throw an exception
+ if (group_id < 0)
+ throwException("createGroup", "H5Gcreate2 failed");
- // No failure, create and return the Group object
- Group group;
- //group.p_setId(group_id);
- H5Location *ptr = &group;
- ptr->p_setId(group_id);
- return( group );
+ // No failure, create and return the Group object
+ Group group;
+ //group.p_setId(group_id);
+ H5Location *ptr = &group;
+ ptr->p_setId(group_id);
+ return(group);
}
//--------------------------------------------------------------------------
-// Function: H5Location::createGroup
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::createGroup
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Group H5Location::createGroup( const H5std_string& name, size_t size_hint ) const
+Group H5Location::createGroup(const H5std_string& name, size_t size_hint) const
{
- return( createGroup( name.c_str(), size_hint ));
+ return(createGroup( name.c_str(), size_hint));
}
//--------------------------------------------------------------------------
-// Function: H5Location::openGroup
-///\brief Opens an existing group in a location which can be a file
-/// or another group.
-///\param name - IN: Name of the group to open
-///\return Group instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::openGroup
+///\brief Opens an existing group in a location which can be a file
+/// or another group.
+///\param name - IN: Name of the group to open
+///\return Group instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Group H5Location::openGroup( const char* name ) const
+Group H5Location::openGroup(const char* name) const
{
- // Call C routine H5Gopen2 to open the named group, giving the
- // location id which can be a file id or a group id
- hid_t group_id = H5Gopen2(getId(), name, H5P_DEFAULT );
+ // Call C routine H5Gopen2 to open the named group, giving the
+ // location id which can be a file id or a group id
+ hid_t group_id = H5Gopen2(getId(), name, H5P_DEFAULT);
- // If the opening of the group failed, throw an exception
- if( group_id < 0 )
- throwException("openGroup", "H5Gopen2 failed");
+ // If the opening of the group failed, throw an exception
+ if (group_id < 0)
+ throwException("openGroup", "H5Gopen2 failed");
- // No failure, create and return the Group object
- Group group;
- //group.p_setId(group_id);
- H5Location *ptr = &group;
- ptr->p_setId(group_id);
- return( group );
+ // No failure, create and return the Group object
+ Group group;
+ //group.p_setId(group_id);
+ H5Location *ptr = &group;
+ ptr->p_setId(group_id);
+ return(group);
}
//--------------------------------------------------------------------------
-// Function: H5Location::openGroup
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::openGroup
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Group H5Location::openGroup( const H5std_string& name ) const
+Group H5Location::openGroup(const H5std_string& name) const
{
- return( openGroup( name.c_str() ));
+ return(openGroup( name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: H5Location::createDataSet
-///\brief Creates a new dataset at this location.
-///\param name - IN: Name of the dataset to create
-///\param data_type - IN: Datatype of the dataset
-///\param data_space - IN: Dataspace for the dataset
-///\param create_plist - IN: Creation properly list for the dataset
-///\return DataSet instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::createDataSet
+///\brief Creates a new dataset at this location.
+///\param name - IN: Name of the dataset to create
+///\param data_type - IN: Datatype of the dataset
+///\param data_space - IN: Dataspace for the dataset
+///\param create_plist - IN: Creation properly list for the dataset
+///\return DataSet instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DataSet H5Location::createDataSet( const char* name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist ) const
+DataSet H5Location::createDataSet(const char* name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist) const
{
// Obtain identifiers for C API
- hid_t type_id = data_type.getId();
- hid_t space_id = data_space.getId();
- hid_t create_plist_id = create_plist.getId();
+ hid_t type_id = data_type.getId();
+ hid_t space_id = data_space.getId();
+ hid_t create_plist_id = create_plist.getId();
- // Call C routine H5Dcreate2 to create the named dataset
- hid_t dataset_id = H5Dcreate2(getId(), name, type_id, space_id, H5P_DEFAULT, create_plist_id, H5P_DEFAULT );
+ // Call C routine H5Dcreate2 to create the named dataset
+ hid_t dataset_id = H5Dcreate2(getId(), name, type_id, space_id, H5P_DEFAULT, create_plist_id, H5P_DEFAULT);
- // If the creation of the dataset failed, throw an exception
- if( dataset_id < 0 )
- throwException("createDataSet", "H5Dcreate2 failed");
+ // If the creation of the dataset failed, throw an exception
+ if (dataset_id < 0)
+ throwException("createDataSet", "H5Dcreate2 failed");
- // No failure, create and return the DataSet object
- DataSet dataset;
- f_DataSet_setId(&dataset, dataset_id);
- return( dataset );
+ // No failure, create and return the DataSet object
+ DataSet dataset;
+ f_DataSet_setId(&dataset, dataset_id);
+ return(dataset);
}
//--------------------------------------------------------------------------
-// Function: H5Location::createDataSet
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::createDataSet
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DataSet H5Location::createDataSet( const H5std_string& name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist ) const
+DataSet H5Location::createDataSet(const H5std_string& name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist) const
{
- return( createDataSet( name.c_str(), data_type, data_space, create_plist ));
+ return(createDataSet(name.c_str(), data_type, data_space, create_plist));
}
//--------------------------------------------------------------------------
-// Function: H5Location::openDataSet
-///\brief Opens an existing dataset at this location.
-///\param name - IN: Name of the dataset to open
-///\return DataSet instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::openDataSet
+///\brief Opens an existing dataset at this location.
+///\param name - IN: Name of the dataset to open
+///\return DataSet instance
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DataSet H5Location::openDataSet( const char* name ) const
+DataSet H5Location::openDataSet(const char* name) const
{
- // Call C function H5Dopen2 to open the specified dataset, giving
- // the location id and the dataset's name
- hid_t dataset_id = H5Dopen2(getId(), name, H5P_DEFAULT );
+ // Call C function H5Dopen2 to open the specified dataset, giving
+ // the location id and the dataset's name
+ hid_t dataset_id = H5Dopen2(getId(), name, H5P_DEFAULT);
- // If the dataset's opening failed, throw an exception
- if(dataset_id < 0)
- throwException("openDataSet", "H5Dopen2 failed");
+ // If the dataset's opening failed, throw an exception
+ if(dataset_id < 0)
+ throwException("openDataSet", "H5Dopen2 failed");
- // No failure, create and return the DataSet object
- DataSet dataset;
- f_DataSet_setId(&dataset, dataset_id);
- return( dataset );
+ // No failure, create and return the DataSet object
+ DataSet dataset;
+ f_DataSet_setId(&dataset, dataset_id);
+ return(dataset);
}
//--------------------------------------------------------------------------
-// Function: H5Location::openDataSet
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::openDataSet
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-DataSet H5Location::openDataSet( const H5std_string& name ) const
+DataSet H5Location::openDataSet(const H5std_string& name) const
{
- return( openDataSet( name.c_str() ));
+ return(openDataSet( name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: H5Location::link
-///\brief Creates a link of the specified type from \a new_name to
-/// \a curr_name.
-///\param link_type - IN: Link type; possible values are
-/// \li \c H5G_LINK_HARD
-/// \li \c H5G_LINK_SOFT
-///\param curr_name - IN: Name of the existing object if link is a hard
-/// link; can be anything for the soft link
-///\param new_name - IN: New name for the object
-///\exception H5::FileIException or H5::GroupIException
+// Function: H5Location::link
+///\brief Creates a link of the specified type from \a new_name to
+/// \a curr_name.
+///\param link_type - IN: Link type; possible values are
+/// \li \c H5G_LINK_HARD
+/// \li \c H5G_LINK_SOFT
+///\param curr_name - IN: Name of the existing object if link is a hard
+/// link; can be anything for the soft link
+///\param new_name - IN: New name for the object
+///\exception H5::FileIException or H5::GroupIException
///\par Description
-/// Note that both names are interpreted relative to the
-/// specified location.
-/// For information on creating hard link and soft link, please
-/// refer to the C layer Reference Manual at:
+/// Note that both names are interpreted relative to the
+/// specified location.
+/// For information on creating hard link and soft link, please
+/// refer to the C layer Reference Manual at:
/// http://hdfgroup.org/HDF5/doc/RM/RM_H5L.html#Link-CreateHard and
/// http://hdfgroup.org/HDF5/doc/RM/RM_H5L.html#Link-CreateSoft
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// 2007: QAK modified to use H5L APIs - BMR
+// 2007: QAK modified to use H5L APIs - BMR
//--------------------------------------------------------------------------
-void H5Location::link( H5L_type_t link_type, const char* curr_name, const char* new_name ) const
+void H5Location::link(H5L_type_t link_type, const char* curr_name, const char* new_name) const
{
herr_t ret_value = -1;
switch(link_type) {
case H5L_TYPE_HARD:
- ret_value = H5Lcreate_hard(getId(), curr_name, H5L_SAME_LOC, new_name, H5P_DEFAULT, H5P_DEFAULT );
+ ret_value = H5Lcreate_hard(getId(), curr_name, H5L_SAME_LOC, new_name, H5P_DEFAULT, H5P_DEFAULT);
break;
case H5L_TYPE_SOFT:
- ret_value = H5Lcreate_soft( curr_name,getId(), new_name, H5P_DEFAULT, H5P_DEFAULT );
+ ret_value = H5Lcreate_soft(curr_name,getId(), new_name, H5P_DEFAULT, H5P_DEFAULT);
break;
- case H5L_TYPE_ERROR:
- case H5L_TYPE_EXTERNAL:
- case H5L_TYPE_MAX:
+ case H5L_TYPE_ERROR:
+ case H5L_TYPE_EXTERNAL:
+ case H5L_TYPE_MAX:
default:
throwException("link", "unknown link type");
break;
} /* end switch */
- if( ret_value < 0 )
- throwException("link", "creating link failed");
+ if (ret_value < 0)
+ throwException("link", "creating link failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::link
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a curr_name and \a new_name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::link
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a curr_name and \a new_name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void H5Location::link( H5L_type_t link_type, const H5std_string& curr_name, const H5std_string& new_name ) const
+void H5Location::link(H5L_type_t link_type, const H5std_string& curr_name, const H5std_string& new_name) const
{
- link( link_type, curr_name.c_str(), new_name.c_str() );
+ link(link_type, curr_name.c_str(), new_name.c_str());
}
//--------------------------------------------------------------------------
-// Function: H5Location::unlink
-///\brief Removes the specified name at this location.
-///\param name - IN: Name of the object to be removed
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::unlink
+///\brief Removes the specified name at this location.
+///\param name - IN: Name of the object to be removed
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// 2007: QAK modified to use H5L APIs - BMR
+// 2007: QAK modified to use H5L APIs - BMR
//--------------------------------------------------------------------------
-void H5Location::unlink( const char* name ) const
+void H5Location::unlink(const char* name) const
{
- herr_t ret_value = H5Ldelete(getId(), name, H5P_DEFAULT );
- if( ret_value < 0 )
- throwException("unlink", "H5Ldelete failed");
+ herr_t ret_value = H5Ldelete(getId(), name, H5P_DEFAULT);
+ if (ret_value < 0)
+ throwException("unlink", "H5Ldelete failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::unlink
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::unlink
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void H5Location::unlink( const H5std_string& name ) const
+void H5Location::unlink(const H5std_string& name) const
{
- unlink( name.c_str() );
+ unlink(name.c_str());
}
//--------------------------------------------------------------------------
-// Function: H5Location::move
-///\brief Renames an object at this location.
-///\param src - IN: Object's original name
-///\param dst - IN: Object's new name
-///\exception H5::FileIException or H5::GroupIException
+// Function: H5Location::move
+///\brief Renames an object at this location.
+///\param src - IN: Object's original name
+///\param dst - IN: Object's new name
+///\exception H5::FileIException or H5::GroupIException
///\note
-/// Exercise care in moving groups as it is possible to render
-/// data in a file inaccessible with H5Location::move. Please refer
-/// to the Group Interface in the HDF5 User's Guide for details at:
+/// Exercise care in moving groups as it is possible to render
+/// data in a file inaccessible with H5Location::move. Please refer
+/// to the Group Interface in the HDF5 User's Guide for details at:
/// https://www.hdfgroup.org/HDF5/doc/UG/HDF5_Users_Guide-Responsive%20HTML5/index.html#t=HDF5_Users_Guide%2FGroups%2FHDF5_Groups.htm
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// 2007: QAK modified to use H5L APIs - BMR
+// 2007: QAK modified to use H5L APIs - BMR
//--------------------------------------------------------------------------
-void H5Location::move( const char* src, const char* dst ) const
+void H5Location::move(const char* src, const char* dst) const
{
- herr_t ret_value = H5Lmove(getId(), src, H5L_SAME_LOC, dst, H5P_DEFAULT, H5P_DEFAULT );
- if( ret_value < 0 )
- throwException("move", "H5Lmove failed");
+ herr_t ret_value = H5Lmove(getId(), src, H5L_SAME_LOC, dst, H5P_DEFAULT, H5P_DEFAULT);
+ if (ret_value < 0)
+ throwException("move", "H5Lmove failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::move
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a src and \a dst.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::move
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a src and \a dst.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void H5Location::move( const H5std_string& src, const H5std_string& dst ) const
+void H5Location::move(const H5std_string& src, const H5std_string& dst) const
{
- move( src.c_str(), dst.c_str() );
+ move(src.c_str(), dst.c_str());
}
#ifndef H5_NO_DEPRECATED_SYMBOLS
//--------------------------------------------------------------------------
-// Function: H5Location::getObjinfo
-///\brief Returns information about an object.
-///\param name - IN: Name of the object
-///\param follow_link - IN: Link flag
-///\param statbuf - OUT: Buffer to return information about the object
-///\exception H5::FileIException or H5::GroupIException
+// Function: H5Location::getObjinfo
+///\brief Returns information about an object.
+///\param name - IN: Name of the object
+///\param follow_link - IN: Link flag
+///\param statbuf - OUT: Buffer to return information about the object
+///\exception H5::FileIException or H5::GroupIException
///\par Description
-/// For more information, please refer to the C layer Reference
-/// Manual at:
+/// For more information, please refer to the C layer Reference
+/// Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5G.html#Group-GetObjinfo
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void H5Location::getObjinfo( const char* name, hbool_t follow_link, H5G_stat_t& statbuf ) const
+void H5Location::getObjinfo(const char* name, hbool_t follow_link, H5G_stat_t& statbuf) const
{
- herr_t ret_value = H5Gget_objinfo(getId(), name, follow_link, &statbuf );
- if( ret_value < 0 )
- throwException("getObjinfo", "H5Gget_objinfo failed");
+ herr_t ret_value = H5Gget_objinfo(getId(), name, follow_link, &statbuf);
+ if (ret_value < 0)
+ throwException("getObjinfo", "H5Gget_objinfo failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::getObjinfo
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::getObjinfo
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void H5Location::getObjinfo( const H5std_string& name, hbool_t follow_link, H5G_stat_t& statbuf ) const
+void H5Location::getObjinfo(const H5std_string& name, hbool_t follow_link, H5G_stat_t& statbuf) const
{
- getObjinfo( name.c_str(), follow_link, statbuf );
+ getObjinfo(name.c_str(), follow_link, statbuf);
}
//--------------------------------------------------------------------------
-// Function: H5Location::getObjinfo
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above functions in that it doesn't have
-/// the paramemter \a follow_link.
-// Programmer Binh-Minh Ribler - Nov, 2005
+// Function: H5Location::getObjinfo
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above functions in that it doesn't have
+/// the paramemter \a follow_link.
+// Programmer Binh-Minh Ribler - Nov, 2005
// Note: need to modify to use H5Oget_info and H5Lget_info - BMR
//--------------------------------------------------------------------------
-void H5Location::getObjinfo( const char* name, H5G_stat_t& statbuf ) const
+void H5Location::getObjinfo(const char* name, H5G_stat_t& statbuf) const
{
- herr_t ret_value = H5Gget_objinfo(getId(), name, 0, &statbuf );
- if( ret_value < 0 )
- throwException("getObjinfo", "H5Gget_objinfo failed");
+ herr_t ret_value = H5Gget_objinfo(getId(), name, 0, &statbuf);
+ if (ret_value < 0)
+ throwException("getObjinfo", "H5Gget_objinfo failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::getObjinfo
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - Nov, 2005
+// Function: H5Location::getObjinfo
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - Nov, 2005
//--------------------------------------------------------------------------
-void H5Location::getObjinfo( const H5std_string& name, H5G_stat_t& statbuf ) const
+void H5Location::getObjinfo(const H5std_string& name, H5G_stat_t& statbuf) const
{
- getObjinfo( name.c_str(), statbuf );
+ getObjinfo(name.c_str(), statbuf);
}
#endif /* H5_NO_DEPRECATED_SYMBOLS */
//--------------------------------------------------------------------------
-// Function: H5Location::getLinkval
-///\brief Returns the name of the object that the symbolic link points to.
-///\param name - IN: Symbolic link to the object
-///\param size - IN: Maximum number of characters of value to be returned
-///\return Name of the object
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::getLinkval
+///\brief Returns the name of the object that the symbolic link points to.
+///\param name - IN: Symbolic link to the object
+///\param size - IN: Maximum number of characters of value to be returned
+///\return Name of the object
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5std_string H5Location::getLinkval( const char* name, size_t size ) const
+H5std_string H5Location::getLinkval(const char* name, size_t size) const
{
H5L_info_t linkinfo;
- char *value_C; // value in C string
+ char *value_C; // value in C string
size_t val_size = size;
H5std_string value = "";
herr_t ret_value;
@@ -1069,81 +1107,81 @@ H5std_string H5Location::getLinkval( const char* name, size_t size ) const
// if user doesn't provide buffer size, determine it
if (size == 0)
{
- ret_value = H5Lget_info(getId(), name, &linkinfo, H5P_DEFAULT);
- if( ret_value < 0 )
- throwException("getLinkval", "H5Lget_info to find buffer size failed");
+ ret_value = H5Lget_info(getId(), name, &linkinfo, H5P_DEFAULT);
+ if (ret_value < 0)
+ throwException("getLinkval", "H5Lget_info to find buffer size failed");
- val_size = linkinfo.u.val_size;
+ val_size = linkinfo.u.val_size;
}
// if link has value, retrieve the value, otherwise, return null string
if (val_size > 0)
{
- value_C = new char[val_size+1]; // temporary C-string for C API
- HDmemset(value_C, 0, val_size+1); // clear buffer
-
- ret_value = H5Lget_val(getId(), name, value_C, val_size, H5P_DEFAULT);
- if( ret_value < 0 )
- {
- delete []value_C;
- throwException("getLinkval", "H5Lget_val failed");
- }
-
- value = H5std_string(value_C);
- delete []value_C;
+ value_C = new char[val_size+1]; // temporary C-string for C API
+ HDmemset(value_C, 0, val_size+1); // clear buffer
+
+ ret_value = H5Lget_val(getId(), name, value_C, val_size, H5P_DEFAULT);
+ if (ret_value < 0)
+ {
+ delete []value_C;
+ throwException("getLinkval", "H5Lget_val failed");
+ }
+
+ value = H5std_string(value_C);
+ delete []value_C;
}
return(value);
}
//--------------------------------------------------------------------------
-// Function: H5Location::getLinkval
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::getLinkval
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-H5std_string H5Location::getLinkval( const H5std_string& name, size_t size ) const
+H5std_string H5Location::getLinkval(const H5std_string& name, size_t size) const
{
- return( getLinkval( name.c_str(), size ));
+ return(getLinkval( name.c_str(), size));
}
//--------------------------------------------------------------------------
-// Function: H5Location::mount
-///\brief Mounts the file \a child onto this group.
-///\param name - IN: Name of the group
-///\param child - IN: File to mount
-///\param plist - IN: Property list to use
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2014 (original 2000)
+// Function: H5Location::mount
+///\brief Mounts the file \a child onto this group.
+///\param name - IN: Name of the group
+///\param child - IN: File to mount
+///\param plist - IN: Property list to use
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2014 (original 2000)
//--------------------------------------------------------------------------
-void H5Location::mount(const char* name, const H5File& child, const PropList& plist ) const
+void H5Location::mount(const char* name, const H5File& child, const PropList& plist) const
{
- // Obtain identifiers for C API
- hid_t plist_id = plist.getId();
- hid_t child_id = child.getId();
+ // Obtain identifiers for C API
+ hid_t plist_id = plist.getId();
+ hid_t child_id = child.getId();
- // Call C routine H5Fmount to do the mouting
- herr_t ret_value = H5Fmount(getId(), name, child_id, plist_id );
+ // Call C routine H5Fmount to do the mouting
+ herr_t ret_value = H5Fmount(getId(), name, child_id, plist_id);
- // Raise exception if H5Fmount returns negative value
- if( ret_value < 0 )
- throwException("mount", "H5Fmount failed");
+ // Raise exception if H5Fmount returns negative value
+ if (ret_value < 0)
+ throwException("mount", "H5Fmount failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::mount
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const's. This wrapper will be removed in future release.
-// Param name - IN: Name of the group
-// Param child - IN: File to mount
-// Param plist - IN: Property list to use
-// Exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::mount
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const's. This wrapper will be removed in future release.
+// Param name - IN: Name of the group
+// Param child - IN: File to mount
+// Param plist - IN: Property list to use
+// Exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Modified to call its replacement. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Modified to call its replacement. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
//void H5Location::mount(const char* name, H5File& child, PropList& plist) const
//{
@@ -1151,26 +1189,26 @@ void H5Location::mount(const char* name, const H5File& child, const PropList& pl
//}
//--------------------------------------------------------------------------
-// Function: H5Location::mount
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes an \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::mount
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes an \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
void H5Location::mount(const H5std_string& name, const H5File& child, const PropList& plist) const
{
- mount(name.c_str(), child, plist);
+ mount(name.c_str(), child, plist);
}
//--------------------------------------------------------------------------
-// Function: H5Location::mount
-// Purpose This is an overloaded member function, kept for backward
-// compatibility. It differs from the above function in that it
-// misses const's. This wrapper will be removed in future release.
-// Programmer Binh-Minh Ribler - 2014
+// Function: H5Location::mount
+// Purpose This is an overloaded member function, kept for backward
+// compatibility. It differs from the above function in that it
+// misses const's. This wrapper will be removed in future release.
+// Programmer Binh-Minh Ribler - 2014
// Modification
-// Modified to call its replacement. -BMR, 2014/04/16
-// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
-// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
+// Modified to call its replacement. -BMR, 2014/04/16
+// Removed from documentation. -BMR, 2016/03/07 1.8.17 and 1.10.0
+// Removed from code. -BMR, 2016/08/11 1.8.18 and 1.10.1
//--------------------------------------------------------------------------
//void H5Location::mount(const H5std_string& name, H5File& child, PropList& plist) const
//{
@@ -1178,397 +1216,109 @@ void H5Location::mount(const H5std_string& name, const H5File& child, const Prop
//}
//--------------------------------------------------------------------------
-// Function: H5Location::unmount
-///\brief Unmounts the specified file.
-///\param name - IN: Name of the file to unmount
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void H5Location::unmount( const char* name ) const
-{
- // Call C routine H5Fmount to do the mouting
- herr_t ret_value = H5Funmount(getId(), name );
-
- // Raise exception if H5Funmount returns negative value
- if( ret_value < 0 )
- throwException("unmount", "H5Funmount failed");
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::unmount
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void H5Location::unmount( const H5std_string& name ) const
-{
- unmount( name.c_str() );
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openDataType
-///\brief Opens the named generic datatype at this location.
-///\param name - IN: Name of the datatype to open
-///\return DataType instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-DataType H5Location::openDataType( const char* name ) const
-{
- // Call C function H5Topen2 to open the named datatype in this group,
- // given either the file or group id
- hid_t type_id = H5Topen2(getId(), name, H5P_DEFAULT);
-
- // If the datatype's opening failed, throw an exception
- if( type_id < 0 )
- throwException("openDataType", "H5Topen2 failed");
-
- // No failure, create and return the DataType object
- DataType data_type;
- f_DataType_setId(&data_type, type_id);
- return(data_type);
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openDataType
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-DataType H5Location::openDataType( const H5std_string& name ) const
-{
- return( openDataType( name.c_str()) );
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openArrayType
-///\brief Opens the named array datatype at this location.
-///\param name - IN: Name of the array datatype to open
-///\return ArrayType instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - Jul, 2005
-//--------------------------------------------------------------------------
-ArrayType H5Location::openArrayType( const char* name ) const
-{
- // Call C function H5Topen2 to open the named datatype in this group,
- // given either the file or group id
- hid_t type_id = H5Topen2(getId(), name, H5P_DEFAULT);
-
- // If the datatype's opening failed, throw an exception
- if( type_id < 0 )
- throwException("openArrayType", "H5Topen2 failed");
-
- // No failure, create and return the ArrayType object
- ArrayType array_type;
- f_DataType_setId(&array_type, type_id);
- return(array_type);
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openArrayType
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - Jul, 2005
-//--------------------------------------------------------------------------
-ArrayType H5Location::openArrayType( const H5std_string& name ) const
-{
- return( openArrayType( name.c_str()) );
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openCompType
-///\brief Opens the named compound datatype at this location.
-///\param name - IN: Name of the compound datatype to open
-///\return CompType instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-CompType H5Location::openCompType( const char* name ) const
-{
- // Call C function H5Topen2 to open the named datatype in this group,
- // given either the file or group id
- hid_t type_id = H5Topen2(getId(), name, H5P_DEFAULT);
-
- // If the datatype's opening failed, throw an exception
- if( type_id < 0 )
- throwException("openCompType", "H5Topen2 failed");
-
- // No failure, create and return the CompType object
- CompType comp_type;
- f_DataType_setId(&comp_type, type_id);
- return(comp_type);
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openCompType
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-CompType H5Location::openCompType( const H5std_string& name ) const
-{
- return( openCompType( name.c_str()) );
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openEnumType
-///\brief Opens the named enumeration datatype at this location.
-///\param name - IN: Name of the enumeration datatype to open
-///\return EnumType instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-EnumType H5Location::openEnumType( const char* name ) const
-{
- // Call C function H5Topen2 to open the named datatype in this group,
- // given either the file or group id
- hid_t type_id = H5Topen2(getId(), name, H5P_DEFAULT);
-
- // If the datatype's opening failed, throw an exception
- if( type_id < 0 )
- throwException("openEnumType", "H5Topen2 failed");
-
- // No failure, create and return the EnumType object
- EnumType enum_type;
- f_DataType_setId(&enum_type, type_id);
- return(enum_type);
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openEnumType
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-EnumType H5Location::openEnumType( const H5std_string& name ) const
-{
- return( openEnumType( name.c_str()) );
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openIntType
-///\brief Opens the named integer datatype at this location.
-///\param name - IN: Name of the integer datatype to open
-///\return IntType instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-IntType H5Location::openIntType( const char* name ) const
-{
- // Call C function H5Topen2 to open the named datatype in this group,
- // given either the file or group id
- hid_t type_id = H5Topen2(getId(), name, H5P_DEFAULT);
-
- // If the datatype's opening failed, throw an exception
- if( type_id < 0 )
- throwException("openIntType", "H5Topen2 failed");
-
- // No failure, create and return the IntType object
- IntType int_type;
- f_DataType_setId(&int_type, type_id);
- return(int_type);
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openIntType
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-IntType H5Location::openIntType( const H5std_string& name ) const
-{
- return( openIntType( name.c_str()) );
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openFloatType
-///\brief Opens the named floating-point datatype at this location.
-///\param name - IN: Name of the floating-point datatype to open
-///\return FloatType instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-FloatType H5Location::openFloatType( const char* name ) const
-{
- // Call C function H5Topen2 to open the named datatype in this group,
- // given either the file or group id
- hid_t type_id = H5Topen2(getId(), name, H5P_DEFAULT);
-
- // If the datatype's opening failed, throw an exception
- if( type_id < 0 )
- throwException("openFloatType", "H5Topen2 failed");
-
- // No failure, create and return the FloatType object
- FloatType float_type;
- f_DataType_setId(&float_type, type_id);
- return(float_type);
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openFloatType
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-FloatType H5Location::openFloatType( const H5std_string& name ) const
-{
- return( openFloatType( name.c_str()) );
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openStrType
-///\brief Opens the named string datatype at this location.
-///\param name - IN: Name of the string datatype to open
-///\return StrType instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-StrType H5Location::openStrType( const char* name ) const
-{
- // Call C function H5Topen2 to open the named datatype in this group,
- // given either the file or group id
- hid_t type_id = H5Topen2(getId(), name, H5P_DEFAULT);
-
- // If the datatype's opening failed, throw an exception
- if( type_id < 0 )
- throwException("openStrType", "H5Topen2 failed");
-
- // No failure, create and return the StrType object
- StrType str_type;
- f_DataType_setId(&str_type, type_id);
- return(str_type);
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openStrType
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-StrType H5Location::openStrType( const H5std_string& name ) const
-{
- return( openStrType( name.c_str()) );
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::openVarLenType
-///\brief Opens the named variable length datatype at this location.
-///\param name - IN: Name of the variable length datatype to open
-///\return VarLenType instance
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - Jul, 2005
+// Function: H5Location::unmount
+///\brief Unmounts the specified file.
+///\param name - IN: Name of the file to unmount
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-VarLenType H5Location::openVarLenType( const char* name ) const
+void H5Location::unmount(const char* name) const
{
- // Call C function H5Topen2 to open the named datatype in this group,
- // given either the file or group id
- hid_t type_id = H5Topen2(getId(), name, H5P_DEFAULT);
-
- // If the datatype's opening failed, throw an exception
- if( type_id < 0 )
- throwException("openVarLenType", "H5Topen2 failed");
+ // Call C routine H5Fmount to do the mouting
+ herr_t ret_value = H5Funmount(getId(), name);
- // No failure, create and return the VarLenType object
- VarLenType varlen_type;
- f_DataType_setId(&varlen_type, type_id);
- return(varlen_type);
+ // Raise exception if H5Funmount returns negative value
+ if (ret_value < 0)
+ throwException("unmount", "H5Funmount failed");
}
//--------------------------------------------------------------------------
-// Function: H5Location::openVarLenType
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - Jul, 2005
+// Function: H5Location::unmount
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-VarLenType H5Location::openVarLenType( const H5std_string& name ) const
+void H5Location::unmount(const H5std_string& name) const
{
- return( openVarLenType( name.c_str()) );
+ unmount(name.c_str());
}
#ifndef H5_NO_DEPRECATED_SYMBOLS
//--------------------------------------------------------------------------
-// Function: H5Location::iterateElems
-///\brief Iterates a user's function over the entries of a group.
-///\param name - IN : Name of group to iterate over
-///\param idx - IN/OUT: Starting (IN) and ending (OUT) entry indices
-///\param op - IN : User's function to operate on each entry
-///\param op_data - IN/OUT: Data associated with the operation
-///\return The return value of the first operator that returns non-zero,
-/// or zero if all members were processed with no operator
-/// returning non-zero.
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::iterateElems
+///\brief Iterates a user's function over the entries of a group.
+///\param name - IN : Name of group to iterate over
+///\param idx - IN/OUT: Starting (IN) and ending (OUT) entry indices
+///\param op - IN : User's function to operate on each entry
+///\param op_data - IN/OUT: Data associated with the operation
+///\return The return value of the first operator that returns non-zero,
+/// or zero if all members were processed with no operator
+/// returning non-zero.
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-int H5Location::iterateElems( const char* name, int *idx, H5G_iterate_t op , void* op_data )
+int H5Location::iterateElems(const char* name, int *idx, H5G_iterate_t op , void* op_data)
{
- int ret_value = H5Giterate(getId(), name, idx, op, op_data );
- if( ret_value < 0 )
- {
- throwException("iterateElems", "H5Giterate failed");
- }
- return( ret_value );
+ int ret_value = H5Giterate(getId(), name, idx, op, op_data);
+ if (ret_value < 0)
+ {
+ throwException("iterateElems", "H5Giterate failed");
+ }
+ return(ret_value);
}
//--------------------------------------------------------------------------
-// Function: H5Location::iterateElems
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location::iterateElems
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-int H5Location::iterateElems( const H5std_string& name, int *idx, H5G_iterate_t op , void* op_data )
+int H5Location::iterateElems(const H5std_string& name, int *idx, H5G_iterate_t op , void* op_data)
{
- return( iterateElems( name.c_str(), idx, op, op_data ));
+ return(iterateElems( name.c_str(), idx, op, op_data));
}
#endif /* H5_NO_DEPRECATED_SYMBOLS */
//--------------------------------------------------------------------------
-// Function: H5Location::getNumObjs
-///\brief Returns the number of objects in this group.
-///\return Number of objects
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - January, 2003
+// Function: H5Location::getNumObjs
+///\brief Returns the number of objects in this group.
+///\return Number of objects
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - January, 2003
//--------------------------------------------------------------------------
hsize_t H5Location::getNumObjs() const
{
- H5G_info_t ginfo; /* Group information */
+ H5G_info_t ginfo; // Group information
- herr_t ret_value = H5Gget_info(getId(), &ginfo);
- if(ret_value < 0)
- throwException("getNumObjs", "H5Gget_info failed");
- return (ginfo.nlinks);
+ herr_t ret_value = H5Gget_info(getId(), &ginfo);
+ if(ret_value < 0)
+ throwException("getNumObjs", "H5Gget_info failed");
+ return (ginfo.nlinks);
}
//--------------------------------------------------------------------------
-// Function: H5Location::getObjnameByIdx
-///\brief Returns the name of an object in this group, given the
-/// object's index.
-///\param idx - IN: Transient index of the object
-///\return Object name
-///\exception H5::FileIException or H5::GroupIException
+// Function: H5Location::getObjnameByIdx
+///\brief Returns the name of an object in this group, given the
+/// object's index.
+///\param idx - IN: Transient index of the object
+///\return Object name
+///\exception H5::FileIException or H5::GroupIException
///\par Description
-/// The value of idx can be any nonnegative number less than the
-/// total number of objects in the group, which is returned by
-/// the function \c H5Location::getNumObjs. Note that this is a
-/// transient index; thus, an object may have a different index
-/// each time the group is opened.
-// Programmer Binh-Minh Ribler - Mar, 2005
+/// The value of idx can be any nonnegative number less than the
+/// total number of objects in the group, which is returned by
+/// the function \c H5Location::getNumObjs. Note that this is a
+/// transient index; thus, an object may have a different index
+/// each time the group is opened.
+// Programmer Binh-Minh Ribler - Mar, 2005
//--------------------------------------------------------------------------
H5std_string H5Location::getObjnameByIdx(hsize_t idx) const
{
// call H5Lget_name_by_idx with name as NULL to get its length
ssize_t name_len = H5Lget_name_by_idx(getId(), ".", H5_INDEX_NAME, H5_ITER_INC, idx, NULL, 0, H5P_DEFAULT);
if(name_len < 0)
- throwException("getObjnameByIdx", "H5Lget_name_by_idx failed");
+ throwException("getObjnameByIdx", "H5Lget_name_by_idx failed");
// now, allocate C buffer to get the name
char* name_C = new char[name_len+1];
@@ -1578,8 +1328,8 @@ H5std_string H5Location::getObjnameByIdx(hsize_t idx) const
if (name_len < 0)
{
- delete []name_C;
- throwException("getObjnameByIdx", "H5Lget_name_by_idx failed");
+ delete []name_C;
+ throwException("getObjnameByIdx", "H5Lget_name_by_idx failed");
}
// clean up and return the string
@@ -1589,74 +1339,74 @@ H5std_string H5Location::getObjnameByIdx(hsize_t idx) const
}
//--------------------------------------------------------------------------
-// Function: H5Location::getObjnameByIdx
-///\brief Retrieves the name of an object in this group, given the
-/// object's index.
-///\param idx - IN: Transient index of the object
-///\param name - IN/OUT: Retrieved name of the object
-///\param size - IN: Length to retrieve
-///\return Actual size of the object name or 0, if object has no name
-///\exception H5::FileIException or H5::GroupIException
+// Function: H5Location::getObjnameByIdx
+///\brief Retrieves the name of an object in this group, given the
+/// object's index.
+///\param idx - IN: Transient index of the object
+///\param name - IN/OUT: Retrieved name of the object
+///\param size - IN: Length to retrieve
+///\return Actual size of the object name or 0, if object has no name
+///\exception H5::FileIException or H5::GroupIException
///\par Description
-/// The value of idx can be any nonnegative number less than the
-/// total number of objects in the group, which is returned by
-/// the function \c H5Location::getNumObjs. Note that this is a
-/// transient index; thus, an object may have a different index
-/// each time the group is opened.
-// Programmer Binh-Minh Ribler - January, 2003
+/// The value of idx can be any nonnegative number less than the
+/// total number of objects in the group, which is returned by
+/// the function \c H5Location::getNumObjs. Note that this is a
+/// transient index; thus, an object may have a different index
+/// each time the group is opened.
+// Programmer Binh-Minh Ribler - January, 2003
//--------------------------------------------------------------------------
ssize_t H5Location::getObjnameByIdx(hsize_t idx, char* name, size_t size) const
{
- ssize_t name_len = H5Lget_name_by_idx(getId(), ".", H5_INDEX_NAME, H5_ITER_INC, idx, name, size, H5P_DEFAULT);
- if(name_len < 0)
- throwException("getObjnameByIdx", "H5Lget_name_by_idx failed");
+ ssize_t name_len = H5Lget_name_by_idx(getId(), ".", H5_INDEX_NAME, H5_ITER_INC, idx, name, size, H5P_DEFAULT);
+ if(name_len < 0)
+ throwException("getObjnameByIdx", "H5Lget_name_by_idx failed");
- return (name_len);
+ return (name_len);
}
//--------------------------------------------------------------------------
-// Function: H5Location::getObjnameByIdx
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes an
-/// \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - January, 2003
+// Function: H5Location::getObjnameByIdx
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes an
+/// \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - January, 2003
//--------------------------------------------------------------------------
ssize_t H5Location::getObjnameByIdx(hsize_t idx, H5std_string& name, size_t size) const
{
- char* name_C = new char[size+1]; // temporary C-string for object name
- HDmemset(name_C, 0, size+1); // clear buffer
+ char* name_C = new char[size+1]; // temporary C-string for object name
+ HDmemset(name_C, 0, size+1); // clear buffer
- // call overloaded function to get the name
- ssize_t name_len = getObjnameByIdx(idx, name_C, size+1);
- if(name_len < 0)
- {
+ // call overloaded function to get the name
+ ssize_t name_len = getObjnameByIdx(idx, name_C, size+1);
+ if(name_len < 0)
+ {
delete []name_C;
- throwException("getObjnameByIdx", "H5Lget_name_by_idx failed");
- }
-
- // clean up and return the string
- name = H5std_string(name_C);
- delete []name_C;
- return (name_len);
-}
-
-//--------------------------------------------------------------------------
-// Function: H5Location::childObjType
-///\brief Returns the type of an object in this file/group, given the
-/// object's name.
-///\param objname - IN: Name of the object
-///\return Object type, which can have the following values for group,
-/// dataset, and named datatype
-/// \li \c H5O_TYPE_GROUP
-/// \li \c H5O_TYPE_DATASET
-/// \li \c H5O_TYPE_NAMED_DATATYPE
-/// Refer to the C API documentation for more details:
-/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5O.html#Object-GetInfo
-///\exception H5::FileIException or H5::GroupIException
-/// Exception will be thrown when:
-/// - an error returned by the C API
-/// - object type is not one of the valid values above
-// Programmer Binh-Minh Ribler - April, 2014
+ throwException("getObjnameByIdx", "H5Lget_name_by_idx failed");
+ }
+
+ // clean up and return the string
+ name = H5std_string(name_C);
+ delete []name_C;
+ return (name_len);
+}
+
+//--------------------------------------------------------------------------
+// Function: H5Location::childObjType
+///\brief Returns the type of an object in this file/group, given the
+/// object's name.
+///\param objname - IN: Name of the object
+///\return Object type, which can have the following values for group,
+/// dataset, and named datatype
+/// \li \c H5O_TYPE_GROUP
+/// \li \c H5O_TYPE_DATASET
+/// \li \c H5O_TYPE_NAMED_DATATYPE
+/// Refer to the C API documentation for more details:
+/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5O.html#Object-GetInfo
+///\exception H5::FileIException or H5::GroupIException
+/// Exception will be thrown when:
+/// - an error returned by the C API
+/// - object type is not one of the valid values above
+// Programmer Binh-Minh Ribler - April, 2014
//--------------------------------------------------------------------------
H5O_type_t H5Location::childObjType(const char* objname) const
{
@@ -1668,33 +1418,33 @@ H5O_type_t H5Location::childObjType(const char* objname) const
// Throw exception if C API returns failure
if (ret_value < 0)
- throwException("childObjType", "H5Oget_info_by_name failed");
+ throwException("childObjType", "H5Oget_info_by_name failed");
// Return a valid type or throw an exception for unknown type
else
- switch (objinfo.type)
- {
- case H5O_TYPE_GROUP:
- case H5O_TYPE_DATASET:
- case H5O_TYPE_NAMED_DATATYPE:
- objtype = objinfo.type;
- break;
- case H5O_TYPE_UNKNOWN:
- case H5O_TYPE_NTYPES:
- default:
- throwException("childObjType", "Unknown type of object");
- }
+ switch (objinfo.type)
+ {
+ case H5O_TYPE_GROUP:
+ case H5O_TYPE_DATASET:
+ case H5O_TYPE_NAMED_DATATYPE:
+ objtype = objinfo.type;
+ break;
+ case H5O_TYPE_UNKNOWN:
+ case H5O_TYPE_NTYPES:
+ default:
+ throwException("childObjType", "Unknown type of object");
+ }
return(objtype);
}
//--------------------------------------------------------------------------
-// Function: H5Location::childObjType
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes an \a H5std_string for the object's name.
-///\brief Returns the type of an object in this group, given the
-/// object's name.
-///\param objname - IN: Name of the object (H5std_string&)
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - April, 2014
+// Function: H5Location::childObjType
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes an \a H5std_string for the object's name.
+///\brief Returns the type of an object in this group, given the
+/// object's name.
+///\param objname - IN: Name of the object (H5std_string&)
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - April, 2014
//--------------------------------------------------------------------------
H5O_type_t H5Location::childObjType(const H5std_string& objname) const
{
@@ -1704,33 +1454,33 @@ H5O_type_t H5Location::childObjType(const H5std_string& objname) const
}
//--------------------------------------------------------------------------
-// Function: H5Location::childObjType
-///\brief Returns the type of an object in this file/group, given the
-/// object's index and its type and order.
-///\param index - IN: Position of the object
-///\param index_type - IN: Type of the index, default to H5_INDEX_NAME
-///\param order - IN: Traversing order, default to H5_ITER_INC
-///\param objname - IN: Name of the object, default to "."
-///\return Object type, which can have the following values for group,
-/// dataset, and named datatype
-/// \li \c H5O_TYPE_GROUP
-/// \li \c H5O_TYPE_DATASET
-/// \li \c H5O_TYPE_NAMED_DATATYPE
-/// Refer to the C API documentation for more details:
-/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5O.html#Object-GetInfo
-///\exception H5::FileIException or H5::GroupIException
-/// Exception will be thrown when:
-/// - an error returned by the C API
-/// - object type is not one of the valid values above
+// Function: H5Location::childObjType
+///\brief Returns the type of an object in this file/group, given the
+/// object's index and its type and order.
+///\param index - IN: Position of the object
+///\param index_type - IN: Type of the index, default to H5_INDEX_NAME
+///\param order - IN: Traversing order, default to H5_ITER_INC
+///\param objname - IN: Name of the object, default to "."
+///\return Object type, which can have the following values for group,
+/// dataset, and named datatype
+/// \li \c H5O_TYPE_GROUP
+/// \li \c H5O_TYPE_DATASET
+/// \li \c H5O_TYPE_NAMED_DATATYPE
+/// Refer to the C API documentation for more details:
+/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5O.html#Object-GetInfo
+///\exception H5::FileIException or H5::GroupIException
+/// Exception will be thrown when:
+/// - an error returned by the C API
+/// - object type is not one of the valid values above
// Developer's Notes:
-// - this overload uses H5Oget_info_by_idx instead of H5Oget_info_by_name
-// like the previous childObjType()
-// - index is the required argument so, first
-// - objname is last because it's more likely the location is already
-// fully specified
-// - Leave property list out for now because C API is not using it, it
-// can be added later when needed.
-// Programmer Binh-Minh Ribler - April, 2014
+// - this overload uses H5Oget_info_by_idx instead of H5Oget_info_by_name
+// like the previous childObjType()
+// - index is the required argument so, first
+// - objname is last because it's more likely the location is already
+// fully specified
+// - Leave property list out for now because C API is not using it, it
+// can be added later when needed.
+// Programmer Binh-Minh Ribler - April, 2014
//--------------------------------------------------------------------------
H5O_type_t H5Location::childObjType(hsize_t index, H5_index_t index_type, H5_iter_order_t order, const char* objname) const
{
@@ -1743,37 +1493,37 @@ H5O_type_t H5Location::childObjType(hsize_t index, H5_index_t index_type, H5_ite
// Throw exception if C API returns failure
if (ret_value < 0)
- throwException("childObjType", "H5Oget_info_by_idx failed");
+ throwException("childObjType", "H5Oget_info_by_idx failed");
// Return a valid type or throw an exception for unknown type
else
- switch (objinfo.type)
- {
- case H5O_TYPE_GROUP:
- case H5O_TYPE_DATASET:
- case H5O_TYPE_NAMED_DATATYPE:
- objtype = objinfo.type;
- break;
- case H5O_TYPE_UNKNOWN:
- case H5O_TYPE_NTYPES:
- default:
- throwException("childObjType", "Unknown type of object");
- }
+ switch (objinfo.type)
+ {
+ case H5O_TYPE_GROUP:
+ case H5O_TYPE_DATASET:
+ case H5O_TYPE_NAMED_DATATYPE:
+ objtype = objinfo.type;
+ break;
+ case H5O_TYPE_UNKNOWN:
+ case H5O_TYPE_NTYPES:
+ default:
+ throwException("childObjType", "Unknown type of object");
+ }
return(objtype);
}
//--------------------------------------------------------------------------
-// Function: H5Location::childObjVersion
-///\brief Returns the object header version of an object in this file/group,
-/// given the object's name.
-///\param objname - IN: Name of the object
-///\return Object version, which can have the following values:
-/// \li \c H5O_VERSION_1
-/// \li \c H5O_VERSION_2
-///\exception H5::FileIException or H5::GroupIException
-/// Exception will be thrown when:
-/// - an error returned by the C API
-/// - version number is not one of the valid values above
-// Programmer Binh-Minh Ribler - April, 2014
+// Function: H5Location::childObjVersion
+///\brief Returns the object header version of an object in this file/group,
+/// given the object's name.
+///\param objname - IN: Name of the object
+///\return Object version, which can have the following values:
+/// \li \c H5O_VERSION_1
+/// \li \c H5O_VERSION_2
+///\exception H5::FileIException or H5::GroupIException
+/// Exception will be thrown when:
+/// - an error returned by the C API
+/// - version number is not one of the valid values above
+// Programmer Binh-Minh Ribler - April, 2014
//--------------------------------------------------------------------------
unsigned H5Location::childObjVersion(const char* objname) const
{
@@ -1785,26 +1535,26 @@ unsigned H5Location::childObjVersion(const char* objname) const
// Throw exception if C API returns failure
if (ret_value < 0)
- throwException("childObjVersion", "H5Oget_info_by_name failed");
+ throwException("childObjVersion", "H5Oget_info_by_name failed");
// Return a valid version or throw an exception for invalid value
else
{
- version = objinfo.hdr.version;
- if (version != H5O_VERSION_1 && version != H5O_VERSION_2)
- throwException("childObjVersion", "Invalid version for object");
+ version = objinfo.hdr.version;
+ if (version != H5O_VERSION_1 && version != H5O_VERSION_2)
+ throwException("childObjVersion", "Invalid version for object");
}
return(version);
}
//--------------------------------------------------------------------------
-// Function: H5Location::childObjVersion
-///\brief This is an overloaded member function, provided for convenience.
-/// It takes an \a H5std_string for the object's name.
-///\brief Returns the type of an object in this group, given the
-/// object's name.
-///\param objname - IN: Name of the object (H5std_string&)
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - April, 2014
+// Function: H5Location::childObjVersion
+///\brief This is an overloaded member function, provided for convenience.
+/// It takes an \a H5std_string for the object's name.
+///\brief Returns the type of an object in this group, given the
+/// object's name.
+///\param objname - IN: Name of the object (H5std_string&)
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - April, 2014
//--------------------------------------------------------------------------
unsigned H5Location::childObjVersion(const H5std_string& objname) const
{
@@ -1816,35 +1566,35 @@ unsigned H5Location::childObjVersion(const H5std_string& objname) const
#ifndef H5_NO_DEPRECATED_SYMBOLS
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Location::getObjTypeByIdx
-///\brief Returns the type of an object in this group, given the
-/// object's index.
-///\param idx - IN: Transient index of the object
-///\return Object type
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - January, 2003
+// Function: H5Location::getObjTypeByIdx
+///\brief Returns the type of an object in this group, given the
+/// object's index.
+///\param idx - IN: Transient index of the object
+///\return Object type
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - January, 2003
//--------------------------------------------------------------------------
H5G_obj_t H5Location::getObjTypeByIdx(hsize_t idx) const
{
H5G_obj_t obj_type = H5Gget_objtype_by_idx(getId(), idx);
if (obj_type == H5G_UNKNOWN)
- throwException("getObjTypeByIdx", "H5Gget_objtype_by_idx failed");
+ throwException("getObjTypeByIdx", "H5Gget_objtype_by_idx failed");
return (obj_type);
}
//--------------------------------------------------------------------------
-// Function: H5Location::getObjTypeByIdx
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function because it also provides
-/// the returned object type in text (char*)
-///\param idx - IN: Transient index of the object
-///\param type_name - OUT: Object type in text
-///\return Object type
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - May, 2010
+// Function: H5Location::getObjTypeByIdx
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function because it also provides
+/// the returned object type in text (char*)
+///\param idx - IN: Transient index of the object
+///\param type_name - OUT: Object type in text
+///\return Object type
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - May, 2010
// Modification
-// Modified to use the other function. -BMR, 2016/03/07
+// Modified to use the other function. -BMR, 2016/03/07
//--------------------------------------------------------------------------
H5G_obj_t H5Location::getObjTypeByIdx(hsize_t idx, char* type_name) const
{
@@ -1852,32 +1602,32 @@ H5G_obj_t H5Location::getObjTypeByIdx(hsize_t idx, char* type_name) const
return(getObjTypeByIdx(idx, stype_name));
}
//--------------------------------------------------------------------------
-// Function: H5Location::getObjTypeByIdx
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function because it also provides
-/// the returned object type in text (H5std_string&)
-///\param idx - IN: Transient index of the object
-///\param type_name - OUT: Object type in text
-///\return Object type
-///\exception H5::FileIException or H5::GroupIException
-// Programmer Binh-Minh Ribler - January, 2003
+// Function: H5Location::getObjTypeByIdx
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function because it also provides
+/// the returned object type in text (H5std_string&)
+///\param idx - IN: Transient index of the object
+///\param type_name - OUT: Object type in text
+///\return Object type
+///\exception H5::FileIException or H5::GroupIException
+// Programmer Binh-Minh Ribler - January, 2003
//--------------------------------------------------------------------------
H5G_obj_t H5Location::getObjTypeByIdx(hsize_t idx, H5std_string& type_name) const
{
H5G_obj_t obj_type = H5Gget_objtype_by_idx(getId(), idx);
switch (obj_type)
{
- case H5G_LINK: type_name = H5std_string("symbolic link"); break;
- case H5G_GROUP: type_name = H5std_string("group"); break;
- case H5G_DATASET: type_name = H5std_string("dataset"); break;
- case H5G_TYPE: type_name = H5std_string("datatype"); break;
- case H5G_UNKNOWN:
- case H5G_UDLINK:
- case H5G_RESERVED_5:
- case H5G_RESERVED_6:
- case H5G_RESERVED_7:
- default:
- throwException("getObjTypeByIdx", "H5Gget_objtype_by_idx failed");
+ case H5G_LINK: type_name = H5std_string("symbolic link"); break;
+ case H5G_GROUP: type_name = H5std_string("group"); break;
+ case H5G_DATASET: type_name = H5std_string("dataset"); break;
+ case H5G_TYPE: type_name = H5std_string("datatype"); break;
+ case H5G_UNKNOWN:
+ case H5G_UDLINK:
+ case H5G_RESERVED_5:
+ case H5G_RESERVED_6:
+ case H5G_RESERVED_7:
+ default:
+ throwException("getObjTypeByIdx", "H5Gget_objtype_by_idx failed");
}
return (obj_type);
}
@@ -1895,65 +1645,34 @@ H5G_obj_t H5Location::getObjTypeByIdx(hsize_t idx, H5std_string& type_name) cons
//--------------------------------------------------------------------------
void H5Location::throwException(const H5std_string& func_name, const H5std_string& msg) const
{
- throwException(func_name, msg);
-}
-
-//--------------------------------------------------------------------------
-// Function: f_DataType_setId - friend
-// Purpose: This function is friend to class H5::DataType so that it
-// can set DataType::id in order to work around a problem
-// described in the JIRA issue HDFFV-7947.
-// Applications shouldn't need to use it.
-// param dtype - IN/OUT: DataType object to be changed
-// param new_id - IN: New id to set
-// Programmer Binh-Minh Ribler - 2015
-//--------------------------------------------------------------------------
-void f_DataType_setId(DataType* dtype, hid_t new_id)
-{
- dtype->p_setId(new_id);
+ throwException(func_name, msg);
}
//--------------------------------------------------------------------------
// Function: f_DataSet_setId - friend
-// Purpose: This function is friend to class H5::DataSet so that it
-// can set DataSet::id in order to work around a problem
-// described in the JIRA issue HDFFV-7947.
-// Applications shouldn't need to use it.
-// param dset - IN/OUT: DataSet object to be changed
-// param new_id - IN: New id to set
-// Programmer Binh-Minh Ribler - 2015
+// Modification:
+// Moved to H5CommonFG.cpp after the rearrangement of classes
+// -BMR, Dec 2016
//--------------------------------------------------------------------------
-void f_DataSet_setId(DataSet* dset, hid_t new_id)
-{
- dset->p_setId(new_id);
-}
// end of From H5CommonFG.cpp
//--------------------------------------------------------------------------
-// Function: f_Attribute_setId - friend
-// Purpose: This function is friend to class H5::Attribute so that it
-// can set Attribute::id in order to work around a problem
-// described in the JIRA issue HDFFV-7947.
-// Applications shouldn't need to use it.
-// param attr - IN/OUT: Attribute object to be changed
-// param new_id - IN: New id to set
-// Programmer Binh-Minh Ribler - 2015
+// Function: f_Attribute_setId - friend
+// Modification:
+// Moved to H5Object.cpp after the rearrangement of classes
+// -BMR, Dec 2016
//--------------------------------------------------------------------------
-void f_Attribute_setId(Attribute* attr, hid_t new_id)
-{
- attr->p_setId(new_id);
-}
//--------------------------------------------------------------------------
-// Function: f_DataSpace_setId - friend
-// Purpose: This function is friend to class H5::DataSpace so that it can
-// can set DataSpace::id in order to work around a problem
-// described in the JIRA issue HDFFV-7947.
-// Applications shouldn't need to use it.
-// param dspace - IN/OUT: DataSpace object to be changed
-// param new_id - IN: New id to set
-// Programmer Binh-Minh Ribler - 2015
+// Function: f_DataSpace_setId - friend
+// Purpose This function is friend to class H5::DataSpace so that it can
+// can set DataSpace::id in order to work around a problem
+// described in the JIRA issue HDFFV-7947.
+// Applications shouldn't need to use it.
+// param dspace - IN/OUT: DataSpace object to be changed
+// param new_id - IN: New id to set
+// Programmer Binh-Minh Ribler - 2015
//--------------------------------------------------------------------------
void f_DataSpace_setId(DataSpace* dspace, hid_t new_id)
{
@@ -1961,9 +1680,9 @@ void f_DataSpace_setId(DataSpace* dspace, hid_t new_id)
}
//--------------------------------------------------------------------------
-// Function: H5Location destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Location destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5Location::~H5Location() {}
diff --git a/c++/src/H5Location.h b/c++/src/H5Location.h
index 647904b..a57d3ed 100644
--- a/c++/src/H5Location.h
+++ b/c++/src/H5Location.h
@@ -6,18 +6,16 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5Location_H
#define __H5Location_H
-#include "H5Classes.h" // constains forward class declarations
+#include "H5Classes.h" // constains forward class declarations
namespace H5 {
@@ -33,221 +31,195 @@ namespace H5 {
*/
// Class forwarding
class H5_DLLCPP ArrayType;
+class H5_DLLCPP LinkAccPropList;
class H5_DLLCPP VarLenType;
class H5_DLLCPP H5Location : public IdComponent {
public:
- // Flushes all buffers associated with this location to disk.
- void flush( H5F_scope_t scope ) const;
+ // Checks if a link of a given name exists in a location
+ bool exists(const char* name, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const;
+ bool exists(const H5std_string& name, const LinkAccPropList& lapl = LinkAccPropList::DEFAULT) const;
- // Gets the name of the file, specified by this location.
- H5std_string getFileName() const;
+ // Flushes all buffers associated with this location to disk.
+ void flush(H5F_scope_t scope) const;
+
+ // Gets the name of the file, specified by this location.
+ H5std_string getFileName() const;
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Retrieves the type of object that an object reference points to.
- H5G_obj_t getObjType(void *ref, H5R_type_t ref_type = H5R_OBJECT) const;
+ // Retrieves the type of object that an object reference points to.
+ H5G_obj_t getObjType(void *ref, H5R_type_t ref_type = H5R_OBJECT) const;
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- // Retrieves the type of object that an object reference points to.
- H5O_type_t getRefObjType(void *ref, H5R_type_t ref_type = H5R_OBJECT) const;
- // Note: getRefObjType deprecates getObjType, but getObjType's name is
- // misleading, so getRefObjType is used in the new function instead.
-
- // Sets the comment for an HDF5 object specified by its name.
- void setComment(const char* name, const char* comment) const;
- void setComment(const H5std_string& name, const H5std_string& comment) const;
- void setComment(const char* comment) const;
- void setComment(const H5std_string& comment) const;
-
- // Retrieves comment for the HDF5 object specified by its name.
- ssize_t getComment(const char* name, size_t buf_size, char* comment) const;
- H5std_string getComment(const char* name, size_t buf_size=0) const;
- H5std_string getComment(const H5std_string& name, size_t buf_size=0) const;
-
- // Removes the comment for the HDF5 object specified by its name.
- void removeComment(const char* name) const;
- void removeComment(const H5std_string& name) const;
-
- // Creates a reference to a named object or to a dataset region
- // in this object.
- void reference(void* ref, const char* name,
- H5R_type_t ref_type = H5R_OBJECT) const;
- void reference(void* ref, const H5std_string& name,
- H5R_type_t ref_type = H5R_OBJECT) const;
- void reference(void* ref, const char* name, const DataSpace& dataspace,
- H5R_type_t ref_type = H5R_DATASET_REGION) const;
- void reference(void* ref, const H5std_string& name, const DataSpace& dataspace,
- H5R_type_t ref_type = H5R_DATASET_REGION) const;
-
- // Open a referenced object whose location is specified by either
- // a file, an HDF5 object, or an attribute.
- void dereference(const H5Location& loc, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
- void dereference(const Attribute& attr, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
-
- // Retrieves a dataspace with the region pointed to selected.
- DataSpace getRegion(void *ref, H5R_type_t ref_type = H5R_DATASET_REGION) const;
+ // Retrieves the type of object that an object reference points to.
+ H5O_type_t getRefObjType(void *ref, H5R_type_t ref_type = H5R_OBJECT) const;
+ // Note: getRefObjType deprecates getObjType, but getObjType's name is
+ // misleading, so getRefObjType is used in the new function instead.
+
+ // Sets the comment for an HDF5 object specified by its name.
+ void setComment(const char* name, const char* comment) const;
+ void setComment(const H5std_string& name, const H5std_string& comment) const;
+ void setComment(const char* comment) const;
+ void setComment(const H5std_string& comment) const;
+
+ // Retrieves comment for the HDF5 object specified by its name.
+ ssize_t getComment(const char* name, size_t buf_size, char* comment) const;
+ H5std_string getComment(const char* name, size_t buf_size=0) const;
+ H5std_string getComment(const H5std_string& name, size_t buf_size=0) const;
+
+ // Removes the comment for the HDF5 object specified by its name.
+ void removeComment(const char* name) const;
+ void removeComment(const H5std_string& name) const;
+
+ // Creates a reference to a named object or to a dataset region
+ // in this object.
+ void reference(void* ref, const char* name,
+ H5R_type_t ref_type = H5R_OBJECT) const;
+ void reference(void* ref, const H5std_string& name,
+ H5R_type_t ref_type = H5R_OBJECT) const;
+ void reference(void* ref, const char* name, const DataSpace& dataspace,
+ H5R_type_t ref_type = H5R_DATASET_REGION) const;
+ void reference(void* ref, const H5std_string& name, const DataSpace& dataspace,
+ H5R_type_t ref_type = H5R_DATASET_REGION) const;
+
+ // Open a referenced object whose location is specified by either
+ // a file, an HDF5 object, or an attribute.
+ void dereference(const H5Location& loc, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
+ // Removed in 1.10.1, because H5Location is baseclass
+ //void dereference(const Attribute& attr, const void* ref, H5R_type_t ref_type = H5R_OBJECT, const PropList& plist = PropList::DEFAULT);
+
+ // Retrieves a dataspace with the region pointed to selected.
+ DataSpace getRegion(void *ref, H5R_type_t ref_type = H5R_DATASET_REGION) const;
// From CommonFG
- // Creates a new group at this location which can be a file
- // or another group.
- Group createGroup(const char* name, size_t size_hint = 0) const;
- Group createGroup(const H5std_string& name, size_t size_hint = 0) const;
-
- // Opens an existing group in a location which can be a file
- // or another group.
- Group openGroup(const char* name) const;
- Group openGroup(const H5std_string& name) const;
-
- // Creates a new dataset in this group.
- DataSet createDataSet(const char* name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist = DSetCreatPropList::DEFAULT) const;
- DataSet createDataSet(const H5std_string& name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist = DSetCreatPropList::DEFAULT) const;
-
- // Opens an existing dataset at this location.
- DataSet openDataSet(const char* name) const;
- DataSet openDataSet(const H5std_string& name) const;
-
- // Returns the value of a symbolic link.
- H5std_string getLinkval(const char* link_name, size_t size=0) const;
- H5std_string getLinkval(const H5std_string& link_name, size_t size=0) const;
-
- // Returns the number of objects in this group.
- hsize_t getNumObjs() const;
-
- // Retrieves the name of an object in this group, given the
- // object's index.
- H5std_string getObjnameByIdx(hsize_t idx) const;
- ssize_t getObjnameByIdx(hsize_t idx, char* name, size_t size) const;
- ssize_t getObjnameByIdx(hsize_t idx, H5std_string& name, size_t size) const;
-
- // Retrieves the type of an object in this file or group, given the
- // object's name
- H5O_type_t childObjType(const H5std_string& objname) const;
- H5O_type_t childObjType(const char* objname) const;
- H5O_type_t childObjType(hsize_t index, H5_index_t index_type=H5_INDEX_NAME, H5_iter_order_t order=H5_ITER_INC, const char* objname=".") const;
-
- // Returns the object header version of an object in this file or group,
- // given the object's name.
- unsigned childObjVersion(const char* objname) const;
- unsigned childObjVersion(const H5std_string& objname) const;
+ // Creates a new group at this location which can be a file
+ // or another group.
+ Group createGroup(const char* name, size_t size_hint = 0) const;
+ Group createGroup(const H5std_string& name, size_t size_hint = 0) const;
+
+ // Opens an existing group in a location which can be a file
+ // or another group.
+ Group openGroup(const char* name) const;
+ Group openGroup(const H5std_string& name) const;
+
+ // Creates a new dataset in this group.
+ DataSet createDataSet(const char* name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist = DSetCreatPropList::DEFAULT) const;
+ DataSet createDataSet(const H5std_string& name, const DataType& data_type, const DataSpace& data_space, const DSetCreatPropList& create_plist = DSetCreatPropList::DEFAULT) const;
+
+ // Opens an existing dataset at this location.
+ DataSet openDataSet(const char* name) const;
+ DataSet openDataSet(const H5std_string& name) const;
+
+ // Returns the value of a symbolic link.
+ H5std_string getLinkval(const char* link_name, size_t size=0) const;
+ H5std_string getLinkval(const H5std_string& link_name, size_t size=0) const;
+
+ // Returns the number of objects in this group.
+ hsize_t getNumObjs() const;
+
+ // Retrieves the name of an object in this group, given the
+ // object's index.
+ H5std_string getObjnameByIdx(hsize_t idx) const;
+ ssize_t getObjnameByIdx(hsize_t idx, char* name, size_t size) const;
+ ssize_t getObjnameByIdx(hsize_t idx, H5std_string& name, size_t size) const;
+
+ // Retrieves the type of an object in this file or group, given the
+ // object's name
+ H5O_type_t childObjType(const H5std_string& objname) const;
+ H5O_type_t childObjType(const char* objname) const;
+ H5O_type_t childObjType(hsize_t index, H5_index_t index_type=H5_INDEX_NAME, H5_iter_order_t order=H5_ITER_INC, const char* objname=".") const;
+
+ // Returns the object header version of an object in this file or group,
+ // given the object's name.
+ unsigned childObjVersion(const char* objname) const;
+ unsigned childObjVersion(const H5std_string& objname) const;
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Returns the type of an object in this group, given the
- // object's index.
- H5G_obj_t getObjTypeByIdx(hsize_t idx) const;
- H5G_obj_t getObjTypeByIdx(hsize_t idx, char* type_name) const;
- H5G_obj_t getObjTypeByIdx(hsize_t idx, H5std_string& type_name) const;
-
- // Returns information about an HDF5 object, given by its name,
- // at this location.
- void getObjinfo(const char* name, hbool_t follow_link, H5G_stat_t& statbuf) const;
- void getObjinfo(const H5std_string& name, hbool_t follow_link, H5G_stat_t& statbuf) const;
- void getObjinfo(const char* name, H5G_stat_t& statbuf) const;
- void getObjinfo(const H5std_string& name, H5G_stat_t& statbuf) const;
-
- // Iterates over the elements of this group - not implemented in
- // C++ style yet.
- int iterateElems(const char* name, int *idx, H5G_iterate_t op, void *op_data);
- int iterateElems(const H5std_string& name, int *idx, H5G_iterate_t op, void *op_data);
+ // Returns the type of an object in this group, given the
+ // object's index.
+ H5G_obj_t getObjTypeByIdx(hsize_t idx) const;
+ H5G_obj_t getObjTypeByIdx(hsize_t idx, char* type_name) const;
+ H5G_obj_t getObjTypeByIdx(hsize_t idx, H5std_string& type_name) const;
+
+ // Returns information about an HDF5 object, given by its name,
+ // at this location.
+ void getObjinfo(const char* name, hbool_t follow_link, H5G_stat_t& statbuf) const;
+ void getObjinfo(const H5std_string& name, hbool_t follow_link, H5G_stat_t& statbuf) const;
+ void getObjinfo(const char* name, H5G_stat_t& statbuf) const;
+ void getObjinfo(const H5std_string& name, H5G_stat_t& statbuf) const;
+
+ // Iterates over the elements of this group - not implemented in
+ // C++ style yet.
+ int iterateElems(const char* name, int *idx, H5G_iterate_t op, void *op_data);
+ int iterateElems(const H5std_string& name, int *idx, H5G_iterate_t op, void *op_data);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- // Creates a link of the specified type from new_name to current_name;
- // both names are interpreted relative to the specified location id.
- void link(H5L_type_t link_type, const char* curr_name, const char* new_name) const;
- void link(H5L_type_t link_type, const H5std_string& curr_name, const H5std_string& new_name) const;
-
- // Removes the specified name at this location.
- void unlink(const char* name) const;
- void unlink(const H5std_string& name) const;
-
- // Mounts the file 'child' onto this location.
- void mount(const char* name, const H5File& child, const PropList& plist) const;
- //void mount(const char* name, H5File& child, PropList& plist) const; // removed from 1.8.18 and 1.10.1
- void mount(const H5std_string& name, const H5File& child, const PropList& plist) const;
- //void mount(const H5std_string& name, H5File& child, PropList& plist) const; // removed from 1.8.18 and 1.10.1
-
- // Unmounts the file named 'name' from this parent location.
- void unmount(const char* name) const;
- void unmount(const H5std_string& name) const;
-
- // Renames an object at this location.
- void move(const char* src, const char* dst) const;
- void move(const H5std_string& src, const H5std_string& dst) const;
-
- // Opens a generic named datatype in this location.
- DataType openDataType(const char* name) const;
- DataType openDataType(const H5std_string& name) const;
-
- // Opens a named array datatype in this location.
- ArrayType openArrayType(const char* name) const;
- ArrayType openArrayType(const H5std_string& name) const;
-
- // Opens a named compound datatype in this location.
- CompType openCompType(const char* name) const;
- CompType openCompType(const H5std_string& name) const;
-
- // Opens a named enumeration datatype in this location.
- EnumType openEnumType(const char* name) const;
- EnumType openEnumType(const H5std_string& name) const;
+ // Creates a link of the specified type from new_name to current_name;
+ // both names are interpreted relative to the specified location id.
+ void link(H5L_type_t link_type, const char* curr_name, const char* new_name) const;
+ void link(H5L_type_t link_type, const H5std_string& curr_name, const H5std_string& new_name) const;
- // Opens a named integer datatype in this location.
- IntType openIntType(const char* name) const;
- IntType openIntType(const H5std_string& name) const;
+ // Removes the specified name at this location.
+ void unlink(const char* name) const;
+ void unlink(const H5std_string& name) const;
- // Opens a named floating-point datatype in this location.
- FloatType openFloatType(const char* name) const;
- FloatType openFloatType(const H5std_string& name) const;
+ // Mounts the file 'child' onto this location.
+ void mount(const char* name, const H5File& child, const PropList& plist) const;
+ //void mount(const char* name, H5File& child, PropList& plist) const; // removed from 1.8.18 and 1.10.1
+ void mount(const H5std_string& name, const H5File& child, const PropList& plist) const;
+ //void mount(const H5std_string& name, H5File& child, PropList& plist) const; // removed from 1.8.18 and 1.10.1
- // Opens a named string datatype in this location.
- StrType openStrType(const char* name) const;
- StrType openStrType(const H5std_string& name) const;
+ // Unmounts the file named 'name' from this parent location.
+ void unmount(const char* name) const;
+ void unmount(const H5std_string& name) const;
- // Opens a named variable length datatype in this location.
- VarLenType openVarLenType(const char* name) const;
- VarLenType openVarLenType(const H5std_string& name) const;
+ // Renames an object at this location.
+ void move(const char* src, const char* dst) const;
+ void move(const H5std_string& src, const H5std_string& dst) const;
// end From CommonFG
- /// For subclasses, H5File and Group, to throw appropriate exception.
- virtual void throwException(const H5std_string& func_name, const H5std_string& msg) const;
+ /// For subclasses, H5File and Group, to throw appropriate exception.
+ virtual void throwException(const H5std_string& func_name, const H5std_string& msg) const;
- // Default constructor
- H5Location();
+ // Default constructor
+ H5Location();
protected:
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // *** Deprecation warning ***
- // The following two constructors are no longer appropriate after the
- // data member "id" had been moved to the sub-classes.
- // The copy constructor is a noop and is removed in 1.8.15 and the
- // other will be removed from 1.10 release, and then from 1.8 if its
- // removal does not raise any problems in two 1.10 releases.
+ // *** Deprecation warning ***
+ // The following two constructors are no longer appropriate after the
+ // data member "id" had been moved to the sub-classes.
+ // The copy constructor is a noop and is removed in 1.8.15 and the
+ // other will be removed from 1.10 release, and then from 1.8 if its
+ // removal does not raise any problems in two 1.10 releases.
- // Creates a copy of an existing object giving the location id.
- H5Location(const hid_t loc_id);
+ // Creates a copy of an existing object giving the location id.
+ H5Location(const hid_t loc_id);
- // Creates a reference to an HDF5 object or a dataset region.
- void p_reference(void* ref, const char* name, hid_t space_id, H5R_type_t ref_type) const;
+ // Creates a reference to an HDF5 object or a dataset region.
+ void p_reference(void* ref, const char* name, hid_t space_id, H5R_type_t ref_type) const;
- // Dereferences a ref into an HDF5 id.
- hid_t p_dereference(hid_t loc_id, const void* ref, H5R_type_t ref_type, const PropList& plist, const char* from_func);
+ // Dereferences a ref into an HDF5 id.
+ hid_t p_dereference(hid_t loc_id, const void* ref, H5R_type_t ref_type, const PropList& plist, const char* from_func);
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Retrieves the type of object that an object reference points to.
- H5G_obj_t p_get_obj_type(void *ref, H5R_type_t ref_type) const;
+ // Retrieves the type of object that an object reference points to.
+ H5G_obj_t p_get_obj_type(void *ref, H5R_type_t ref_type) const;
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- // Retrieves the type of object that an object reference points to.
- H5O_type_t p_get_ref_obj_type(void *ref, H5R_type_t ref_type) const;
+ // Retrieves the type of object that an object reference points to.
+ H5O_type_t p_get_ref_obj_type(void *ref, H5R_type_t ref_type) const;
// Sets the identifier of this object to a new value. - this one
// doesn't increment reference count
- virtual void p_setId(const hid_t new_id) = 0;
+ //virtual void p_setId(const hid_t new_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
- // Noop destructor.
- virtual ~H5Location();
+ // Noop destructor.
+ virtual ~H5Location();
-}; /* end class H5Location */
+}; // end of H5Location
+} // namespace H5
-}
#endif // __H5Location_H
diff --git a/c++/src/H5Object.cpp b/c++/src/H5Object.cpp
index ab79d9d..865d04f 100644
--- a/c++/src/H5Object.cpp
+++ b/c++/src/H5Object.cpp
@@ -5,16 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
+#include "H5private.h" // for HDmemset
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
@@ -24,16 +23,17 @@
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
#include "H5DataSpace.h"
#include "H5AbstractDs.h"
+#include "H5CommonFG.h"
#include "H5Group.h"
#include "H5File.h"
#include "H5DataSet.h"
#include "H5Attribute.h"
-#include "H5private.h" // for HDmemset
namespace H5 {
@@ -44,25 +44,24 @@ namespace H5 {
extern "C" herr_t userAttrOpWrpr(hid_t loc_id, const char *attr_name,
const H5A_info_t *ainfo, void *op_data)
{
- H5std_string s_attr_name = H5std_string( attr_name );
+ H5std_string s_attr_name = H5std_string(attr_name);
UserData4Aiterate* myData = reinterpret_cast<UserData4Aiterate *> (op_data);
- myData->op( *myData->location, s_attr_name, myData->opData );
+ myData->op(*myData->location, s_attr_name, myData->opData);
return 0;
}
-#endif
//--------------------------------------------------------------------------
-// Function: H5Object default constructor (protected)
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object default constructor (protected)
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5Object::H5Object() : H5Location() {}
//--------------------------------------------------------------------------
-// Function: H5Object overloaded constructor (protected)
-// Purpose Creates an H5Object object using the id of an existing HDF5
-// object.
-// Parameters object_id - IN: Id of an existing HDF5 object
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object overloaded constructor (protected)
+// Purpose Creates an H5Object object using the id of an existing HDF5
+// object.
+// Parameters object_id - IN: Id of an existing HDF5 object
+// Programmer Binh-Minh Ribler - 2000
// *** Deprecation warning ***
// This constructor is no longer appropriate because the data member "id" had
// been moved to the sub-classes. It will be removed in 1.10 release. If its
@@ -73,11 +72,11 @@ H5Object::H5Object() : H5Location() {}
//H5Object::H5Object(const hid_t object_id) : H5Location() {}
//--------------------------------------------------------------------------
-// Function: H5Object copy constructor
-///\brief Copy constructor: makes a copy of the original H5Object
-/// instance.
-///\param original - IN: H5Object instance to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object copy constructor
+///\brief Copy constructor: makes a copy of the original H5Object
+/// instance.
+///\param original - IN: H5Object instance to copy
+// Programmer Binh-Minh Ribler - 2000
// *** Deprecation warning ***
// This constructor is no longer appropriate because the data member "id" had
// been moved to the sub-classes. It is removed from 1.8.15 because it is
@@ -86,262 +85,312 @@ H5Object::H5Object() : H5Location() {}
// H5Object::H5Object(const H5Object& original) : H5Location() {}
//--------------------------------------------------------------------------
-// Function: H5Object::createAttribute
-///\brief Creates an attribute for a group, dataset, or named datatype.
-///\param name - IN: Name of the attribute
-///\param data_type - IN: Datatype for the attribute
-///\param data_space - IN: Dataspace for the attribute - only simple
-/// dataspaces are allowed at this time
-///\param create_plist - IN: Creation property list - default to
-/// PropList::DEFAULT
-///\return Attribute instance
-///\exception H5::AttributeIException
+// Function: f_Attribute_setId - friend
+// Purpose: This function is friend to class H5::Attribute so that it
+// can set Attribute::id in order to work around a problem
+// described in the JIRA issue HDFFV-7947.
+// Applications shouldn't need to use it.
+// param attr - IN/OUT: Attribute object to be changed
+// param new_id - IN: New id to set
+// Programmer Binh-Minh Ribler - 2015
+//--------------------------------------------------------------------------
+void f_Attribute_setId(Attribute* attr, hid_t new_id)
+{
+ attr->p_setId(new_id);
+}
+#endif
+
+//--------------------------------------------------------------------------
+// Function: H5Object::createAttribute
+///\brief Creates an attribute for a group, dataset, or named datatype.
+///\param name - IN: Name of the attribute
+///\param data_type - IN: Datatype for the attribute
+///\param data_space - IN: Dataspace for the attribute - only simple
+/// dataspaces are allowed at this time
+///\param create_plist - IN: Creation property list - default to
+/// PropList::DEFAULT
+///\return Attribute instance
+///\exception H5::AttributeIException
///\par Description
-/// The attribute name specified in \a name must be unique.
-/// Attempting to create an attribute with the same name as an
-/// existing attribute will raise an exception, leaving the
-/// pre-existing attribute intact. To overwrite an existing
-/// attribute with a new attribute of the same name, first
-/// delete the existing one with \c H5Object::removeAttr, then
-/// recreate it with this function.
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-Attribute H5Object::createAttribute( const char* name, const DataType& data_type, const DataSpace& data_space, const PropList& create_plist ) const
+/// The attribute name specified in \a name must be unique.
+/// Attempting to create an attribute with the same name as an
+/// existing attribute will raise an exception, leaving the
+/// pre-existing attribute intact. To overwrite an existing
+/// attribute with a new attribute of the same name, first
+/// delete the existing one with \c H5Object::removeAttr, then
+/// recreate it with this function.
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+Attribute H5Object::createAttribute(const char* name, const DataType& data_type, const DataSpace& data_space, const PropList& create_plist) const
{
- hid_t type_id = data_type.getId();
- hid_t space_id = data_space.getId();
- hid_t plist_id = create_plist.getId();
- hid_t attr_id = H5Acreate2(getId(), name, type_id, space_id, plist_id, H5P_DEFAULT );
-
- // If the attribute id is valid, create and return the Attribute object
- if( attr_id > 0 )
- {
- Attribute attr;
- f_Attribute_setId(&attr, attr_id);
- return( attr );
- }
- else
- throw AttributeIException(inMemFunc("createAttribute"), "H5Acreate2 failed");
+ hid_t type_id = data_type.getId();
+ hid_t space_id = data_space.getId();
+ hid_t plist_id = create_plist.getId();
+ hid_t attr_id = H5Acreate2(getId(), name, type_id, space_id, plist_id, H5P_DEFAULT);
+
+ // If the attribute id is valid, create and return the Attribute object
+ if (attr_id > 0)
+ {
+ Attribute attr;
+ f_Attribute_setId(&attr, attr_id);
+ return(attr);
+ }
+ else
+ throw AttributeIException(inMemFunc("createAttribute"), "H5Acreate2 failed");
}
//--------------------------------------------------------------------------
-// Function: H5Object::createAttribute
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes
-/// a reference to an \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object::createAttribute
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes
+/// a reference to an \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Attribute H5Object::createAttribute( const H5std_string& name, const DataType& data_type, const DataSpace& data_space, const PropList& create_plist ) const
+Attribute H5Object::createAttribute(const H5std_string& name, const DataType& data_type, const DataSpace& data_space, const PropList& create_plist) const
{
- return( createAttribute( name.c_str(), data_type, data_space, create_plist ));
+ return(createAttribute( name.c_str(), data_type, data_space, create_plist));
}
//--------------------------------------------------------------------------
-// Function: H5Object::openAttribute
-///\brief Opens an attribute given its name.
-///\param name - IN: Name of the attribute
-///\return Attribute instance
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object::openAttribute
+///\brief Opens an attribute given its name.
+///\param name - IN: Name of the attribute
+///\return Attribute instance
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Attribute H5Object::openAttribute( const char* name ) const
+Attribute H5Object::openAttribute(const char* name) const
{
- hid_t attr_id = H5Aopen(getId(), name, H5P_DEFAULT);
- if( attr_id > 0 )
- {
- Attribute attr;
- f_Attribute_setId(&attr, attr_id);
- return( attr );
- }
- else
- {
- throw AttributeIException(inMemFunc("openAttribute"), "H5Aopen failed");
- }
+ hid_t attr_id = H5Aopen(getId(), name, H5P_DEFAULT);
+ if (attr_id > 0)
+ {
+ Attribute attr;
+ f_Attribute_setId(&attr, attr_id);
+ return(attr);
+ }
+ else
+ {
+ throw AttributeIException(inMemFunc("openAttribute"), "H5Aopen failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Object::openAttribute
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes
-/// a reference to an \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object::openAttribute
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes
+/// a reference to an \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Attribute H5Object::openAttribute( const H5std_string& name ) const
+Attribute H5Object::openAttribute(const H5std_string& name) const
{
- return( openAttribute( name.c_str()) );
+ return(openAttribute( name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: H5Object::openAttribute
-///\brief Opens an attribute given its index.
-///\param idx - IN: Index of the attribute, a 0-based, non-negative integer
-///\return Attribute instance
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object::openAttribute
+///\brief Opens an attribute given its index.
+///\param idx - IN: Index of the attribute, a 0-based, non-negative integer
+///\return Attribute instance
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-Attribute H5Object::openAttribute( const unsigned int idx ) const
+Attribute H5Object::openAttribute(const unsigned int idx) const
{
- hid_t attr_id = H5Aopen_by_idx(getId(), ".", H5_INDEX_CRT_ORDER,
- H5_ITER_INC, static_cast<hsize_t>(idx), H5P_DEFAULT, H5P_DEFAULT);
- if( attr_id > 0 )
- {
- Attribute attr;
- f_Attribute_setId(&attr, attr_id);
- return(attr);
- }
- else
- {
- throw AttributeIException(inMemFunc("openAttribute"), "H5Aopen_by_idx failed");
- }
+ hid_t attr_id = H5Aopen_by_idx(getId(), ".", H5_INDEX_CRT_ORDER,
+ H5_ITER_INC, static_cast<hsize_t>(idx), H5P_DEFAULT, H5P_DEFAULT);
+ if (attr_id > 0)
+ {
+ Attribute attr;
+ f_Attribute_setId(&attr, attr_id);
+ return(attr);
+ }
+ else
+ {
+ throw AttributeIException(inMemFunc("openAttribute"), "H5Aopen_by_idx failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: H5Object::iterateAttrs
-///\brief Iterates a user's function over all the attributes of an H5
-/// object, which may be a group, dataset or named datatype.
-///\param user_op - IN: User's function to operate on each attribute
-///\param _idx - IN/OUT: Starting (IN) and ending (OUT) attribute indices
-///\param op_data - IN: User's data to pass to user's operator function
-///\return Returned value of the last operator if it was non-zero, or
-/// zero if all attributes were processed
-///\exception H5::AttributeIException
+// Function: H5Object::iterateAttrs
+///\brief Iterates a user's function over all the attributes of an H5
+/// object, which may be a group, dataset or named datatype.
+///\param user_op - IN: User's function to operate on each attribute
+///\param _idx - IN/OUT: Starting (IN) and ending (OUT) attribute indices
+///\param op_data - IN: User's data to pass to user's operator function
+///\return Returned value of the last operator if it was non-zero, or
+/// zero if all attributes were processed
+///\exception H5::AttributeIException
///\par Description
-/// The signature of user_op is
-/// void (*)(H5::H5Location&, H5std_string, void*).
-/// For information, please refer to the C layer Reference Manual
-/// at:
+/// The signature of user_op is
+/// void (*)(H5::H5Location&, H5std_string, void*).
+/// For information, please refer to the C layer Reference Manual
+/// at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5A.html#Annot-Iterate
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-int H5Object::iterateAttrs( attr_operator_t user_op, unsigned *_idx, void *op_data )
+int H5Object::iterateAttrs(attr_operator_t user_op, unsigned *_idx, void *op_data)
{
- // store the user's function and data
- UserData4Aiterate* userData = new UserData4Aiterate;
- userData->opData = op_data;
- userData->op = user_op;
- userData->location = this;
-
- // call the C library routine H5Aiterate2 to iterate the attributes
- hsize_t idx = _idx ? static_cast<hsize_t>(*_idx) : 0;
- int ret_value = H5Aiterate2(getId(), H5_INDEX_NAME, H5_ITER_INC, &idx,
- userAttrOpWrpr, reinterpret_cast<void *>(userData));
-
- // release memory
- delete userData;
-
- if( ret_value >= 0 ) {
- /* Pass back update index value to calling code */
- if (_idx)
- *_idx = static_cast<unsigned>(idx);
-
- return( ret_value );
- }
- else // raise exception when H5Aiterate returns a negative value
- throw AttributeIException(inMemFunc("iterateAttrs"), "H5Aiterate2 failed");
+ // store the user's function and data
+ UserData4Aiterate* userData = new UserData4Aiterate;
+ userData->opData = op_data;
+ userData->op = user_op;
+ userData->location = this;
+
+ // call the C library routine H5Aiterate2 to iterate the attributes
+ hsize_t idx = _idx ? static_cast<hsize_t>(*_idx) : 0;
+ int ret_value = H5Aiterate2(getId(), H5_INDEX_NAME, H5_ITER_INC, &idx,
+ userAttrOpWrpr, reinterpret_cast<void *>(userData));
+
+ // release memory
+ delete userData;
+
+ if (ret_value >= 0) {
+ /* Pass back update index value to calling code */
+ if (_idx)
+ *_idx = static_cast<unsigned>(idx);
+ return(ret_value);
+ }
+ else // raise exception when H5Aiterate returns a negative value
+ throw AttributeIException(inMemFunc("iterateAttrs"), "H5Aiterate2 failed");
}
//--------------------------------------------------------------------------
-// Function: H5Object::getNumAttrs
-///\brief Returns the number of attributes attached to this HDF5 object.
-///\return Number of attributes
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object::objVersion
+///\brief Returns the header version of this HDF5 object.
+///\return Object version, which can have the following values:
+/// \li \c H5O_VERSION_1
+/// \li \c H5O_VERSION_2
+///\exception H5::ObjHeaderIException
+/// Exception will be thrown when:
+/// - an error returned by the C API
+/// - version number is not one of the valid values above
+// Programmer Binh-Minh Ribler - December, 2016
+//--------------------------------------------------------------------------
+unsigned H5Object::objVersion() const
+{
+ H5O_info_t objinfo;
+ unsigned version = 0;
+
+ // Use C API to get information of the object
+ herr_t ret_value = H5Oget_info(getId(), &objinfo);
+
+ // Throw exception if C API returns failure
+ if (ret_value < 0)
+ throw Exception(inMemFunc("objVersion"), "H5Oget_info failed");
+ // Return a valid version or throw an exception for invalid value
+ else
+ {
+ version = objinfo.hdr.version;
+ if (version != H5O_VERSION_1 && version != H5O_VERSION_2)
+ throw ObjHeaderIException("objVersion", "Invalid version for object");
+ }
+ return(version);
+}
+
+//--------------------------------------------------------------------------
+// Function: H5Object::getNumAttrs
+///\brief Returns the number of attributes attached to this HDF5 object.
+///\return Number of attributes
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
int H5Object::getNumAttrs() const
{
- H5O_info_t oinfo; /* Object info */
+ H5O_info_t oinfo; /* Object info */
- if(H5Oget_info(getId(), &oinfo) < 0)
- throw AttributeIException(inMemFunc("getNumAttrs"), "H5Oget_info failed");
- else
- return(static_cast<int>(oinfo.num_attrs));
+ if(H5Oget_info(getId(), &oinfo) < 0)
+ throw AttributeIException(inMemFunc("getNumAttrs"), "H5Oget_info failed");
+ else
+ return(static_cast<int>(oinfo.num_attrs));
}
//--------------------------------------------------------------------------
-// Function: H5Object::attrExists
-///\brief Checks whether the named attribute exists at this location.
-///\param name - IN: Name of the attribute to be queried
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2013
+// Function: H5Object::attrExists
+///\brief Checks whether the named attribute exists at this location.
+///\param name - IN: Name of the attribute to be queried
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2013
//--------------------------------------------------------------------------
bool H5Object::attrExists(const char* name) const
{
- // Call C routine H5Aexists to determine whether an attribute exists
- // at this location, which could be specified by a file, group, dataset,
- // or named datatype.
- herr_t ret_value = H5Aexists(getId(), name);
- if( ret_value > 0 )
- return true;
- else if(ret_value == 0)
- return false;
- else // Raise exception when H5Aexists returns a negative value
- throw AttributeIException(inMemFunc("attrExists"), "H5Aexists failed");
+ // Call C routine H5Aexists to determine whether an attribute exists
+ // at this location, which could be specified by a file, group, dataset,
+ // or named datatype.
+ herr_t ret_value = H5Aexists(getId(), name);
+ if (ret_value > 0)
+ return true;
+ else if(ret_value == 0)
+ return false;
+ else // Raise exception when H5Aexists returns a negative value
+ throw AttributeIException(inMemFunc("attrExists"), "H5Aexists failed");
}
//--------------------------------------------------------------------------
-// Function: H5Object::attrExists
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes
-/// a reference to an \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object::attrExists
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes
+/// a reference to an \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
bool H5Object::attrExists(const H5std_string& name) const
{
- return(attrExists(name.c_str()));
+ return(attrExists(name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: H5Object::removeAttr
-///\brief Removes the named attribute from this object.
-///\param name - IN: Name of the attribute to be removed
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object::removeAttr
+///\brief Removes the named attribute from this object.
+///\param name - IN: Name of the attribute to be removed
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void H5Object::removeAttr( const char* name ) const
+void H5Object::removeAttr(const char* name) const
{
- herr_t ret_value = H5Adelete(getId(), name);
- if( ret_value < 0 )
- throw AttributeIException(inMemFunc("removeAttr"), "H5Adelete failed");
+ herr_t ret_value = H5Adelete(getId(), name);
+ if (ret_value < 0)
+ throw AttributeIException(inMemFunc("removeAttr"), "H5Adelete failed");
}
//--------------------------------------------------------------------------
-// Function: H5Object::removeAttr
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes
-/// a reference to an \c H5std_string for \a name.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object::removeAttr
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes
+/// a reference to an \c H5std_string for \a name.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void H5Object::removeAttr( const H5std_string& name ) const
+void H5Object::removeAttr(const H5std_string& name) const
{
- removeAttr( name.c_str() );
+ removeAttr(name.c_str());
}
//--------------------------------------------------------------------------
-// Function: H5Object::renameAttr
-///\brief Renames the named attribute from this object.
-///\param oldname - IN: Name of the attribute to be renamed
-///\param newname - IN: New name ame of the attribute
-///\exception H5::AttributeIException
-// Programmer Binh-Minh Ribler - Mar, 2005
+// Function: H5Object::renameAttr
+///\brief Renames the named attribute from this object.
+///\param oldname - IN: Name of the attribute to be renamed
+///\param newname - IN: New name ame of the attribute
+///\exception H5::AttributeIException
+// Programmer Binh-Minh Ribler - Mar, 2005
//--------------------------------------------------------------------------
void H5Object::renameAttr(const char* oldname, const char* newname) const
{
- herr_t ret_value = H5Arename(getId(), oldname, newname);
- if (ret_value < 0)
- throw AttributeIException(inMemFunc("renameAttr"), "H5Arename failed");
+ herr_t ret_value = H5Arename(getId(), oldname, newname);
+ if (ret_value < 0)
+ throw AttributeIException(inMemFunc("renameAttr"), "H5Arename failed");
}
//--------------------------------------------------------------------------
-// Function: H5Object::renameAttr
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function in that it takes
-/// a reference to an \c H5std_string for the names.
-// Programmer Binh-Minh Ribler - Mar, 2005
+// Function: H5Object::renameAttr
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function in that it takes
+/// a reference to an \c H5std_string for the names.
+// Programmer Binh-Minh Ribler - Mar, 2005
//--------------------------------------------------------------------------
void H5Object::renameAttr(const H5std_string& oldname, const H5std_string& newname) const
{
- renameAttr (oldname.c_str(), newname.c_str());
+ renameAttr (oldname.c_str(), newname.c_str());
}
+
+#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
// Function: getObjName
///\brief Given an id, returns the type of the object.
@@ -355,11 +404,11 @@ ssize_t H5Object::getObjName(char *obj_name, size_t buf_size) const
// If H5Iget_name returns a negative value, raise an exception
if (name_size < 0)
- {
+ {
throw Exception(inMemFunc("getObjName"), "H5Iget_name failed");
}
else if (name_size == 0)
- {
+ {
throw Exception(inMemFunc("getObjName"), "Object must have a name, but name length is 0");
}
// Return length of the name
@@ -383,16 +432,16 @@ H5std_string H5Object::getObjName() const
// If H5Iget_name failed, throw exception
if (name_size < 0)
- {
+ {
throw Exception(inMemFunc("getObjName"), "H5Iget_name failed");
}
else if (name_size == 0)
- {
+ {
throw Exception(inMemFunc("getObjName"), "Object must have a name, but name length is 0");
}
// Object's name exists, retrieve it
else if (name_size > 0)
- {
+ {
char* name_C = new char[name_size+1]; // temporary C-string
HDmemset(name_C, 0, name_size+1); // clear buffer
@@ -428,13 +477,13 @@ ssize_t H5Object::getObjName(H5std_string& obj_name, size_t len) const
// If no length is provided, get the entire object name
if (len == 0)
- {
+ {
obj_name = getObjName();
name_size = obj_name.length();
}
// If length is provided, get that number of characters in name
else
- {
+ {
char* name_C = new char[len+1]; // temporary C-string
HDmemset(name_C, 0, len+1); // clear buffer
@@ -453,11 +502,10 @@ ssize_t H5Object::getObjName(H5std_string& obj_name, size_t len) const
return(name_size);
}
-#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: H5Object destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: H5Object destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5Object::~H5Object() {}
#endif // DOXYGEN_SHOULD_SKIP_THIS
diff --git a/c++/src/H5Object.h b/c++/src/H5Object.h
index 72a9f50..fdaead2 100644
--- a/c++/src/H5Object.h
+++ b/c++/src/H5Object.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5Object_H
@@ -24,20 +22,20 @@ namespace H5 {
and Group.
Modification:
- Sept 18, 2012: Added class H5Location in between IdComponent and
- H5Object. An H5File now inherits from H5Location. All HDF5
- wrappers in H5Object are moved up to H5Location. H5Object
- is left mostly empty for future wrappers that are only for
- group, dataset, and named datatype. Note that the reason for
- adding H5Location instead of simply moving H5File to be under
- H5Object is H5File is not an HDF5 object, and renaming H5Object
- to H5Location will risk breaking user applications.
- -BMR
- Apr 2, 2014: Added wrapper getObjName for H5Iget_name
- Sep 21, 2016: Rearranging classes (HDFFV-9920) moved H5A wrappers back
- into H5Object. This way, C functions that takes attribute id
- can be in H5Location and those that cannot take attribute id
- can be in H5Object.
+ Sept 18, 2012: Added class H5Location in between IdComponent and
+ H5Object. An H5File now inherits from H5Location. All HDF5
+ wrappers in H5Object are moved up to H5Location. H5Object
+ is left mostly empty for future wrappers that are only for
+ group, dataset, and named datatype. Note that the reason for
+ adding H5Location instead of simply moving H5File to be under
+ H5Object is H5File is not an HDF5 object, and renaming H5Object
+ to H5Location will risk breaking user applications.
+ -BMR
+ Apr 2, 2014: Added wrapper getObjName for H5Iget_name
+ Sep 21, 2016: Rearranging classes (HDFFV-9920) moved H5A wrappers back
+ into H5Object. This way, C functions that takes attribute id
+ can be in H5Location and those that cannot take attribute id
+ can be in H5Object.
Inheritance: H5Location -> IdComponent
*/
@@ -46,65 +44,68 @@ class H5_DLLCPP H5Object;
class H5_DLLCPP Attribute;
// Define the operator function pointer for H5Aiterate().
-typedef void (*attr_operator_t)( H5Object& loc/*in*/,
+typedef void (*attr_operator_t)(H5Object& loc/*in*/,
const H5std_string attr_name/*in*/,
void *operator_data/*in,out*/);
// User data for attribute iteration
class UserData4Aiterate {
public:
- attr_operator_t op;
- void* opData;
- H5Object* location;
+ attr_operator_t op;
+ void* opData;
+ H5Object* location;
};
class H5_DLLCPP H5Object : public H5Location {
public:
- // Creates an attribute for the specified object
- // PropList is currently not used, so always be default.
- Attribute createAttribute( const char* name, const DataType& type, const DataSpace& space, const PropList& create_plist = PropList::DEFAULT ) const;
- Attribute createAttribute( const H5std_string& name, const DataType& type, const DataSpace& space, const PropList& create_plist = PropList::DEFAULT ) const;
+ // Creates an attribute for the specified object
+ // PropList is currently not used, so always be default.
+ Attribute createAttribute(const char* name, const DataType& type, const DataSpace& space, const PropList& create_plist = PropList::DEFAULT) const;
+ Attribute createAttribute(const H5std_string& name, const DataType& type, const DataSpace& space, const PropList& create_plist = PropList::DEFAULT) const;
- // Given its name, opens the attribute that belongs to an object at
- // this location.
- Attribute openAttribute( const char* name ) const;
- Attribute openAttribute( const H5std_string& name ) const;
+ // Given its name, opens the attribute that belongs to an object at
+ // this location.
+ Attribute openAttribute(const char* name) const;
+ Attribute openAttribute(const H5std_string& name) const;
- // Given its index, opens the attribute that belongs to an object at
- // this location.
- Attribute openAttribute( const unsigned int idx ) const;
+ // Given its index, opens the attribute that belongs to an object at
+ // this location.
+ Attribute openAttribute(const unsigned int idx) const;
- // Iterate user's function over the attributes of this object.
- int iterateAttrs(attr_operator_t user_op, unsigned* idx = NULL, void* op_data = NULL);
+ // Iterate user's function over the attributes of this object.
+ int iterateAttrs(attr_operator_t user_op, unsigned* idx = NULL, void* op_data = NULL);
- // Determines the number of attributes belong to this object.
- int getNumAttrs() const;
+ // Returns the object header version of an object
+ unsigned objVersion() const;
- // Checks whether the named attribute exists for this object.
- bool attrExists(const char* name) const;
- bool attrExists(const H5std_string& name) const;
+ // Determines the number of attributes belong to this object.
+ int getNumAttrs() const;
- // Renames the named attribute to a new name.
- void renameAttr(const char* oldname, const char* newname) const;
- void renameAttr(const H5std_string& oldname, const H5std_string& newname) const;
+ // Checks whether the named attribute exists for this object.
+ bool attrExists(const char* name) const;
+ bool attrExists(const H5std_string& name) const;
- // Removes the named attribute from this object.
- void removeAttr(const char* name) const;
- void removeAttr(const H5std_string& name) const;
+ // Renames the named attribute to a new name.
+ void renameAttr(const char* oldname, const char* newname) const;
+ void renameAttr(const H5std_string& oldname, const H5std_string& newname) const;
- // Returns an identifier.
- virtual hid_t getId() const = 0;
+ // Removes the named attribute from this object.
+ void removeAttr(const char* name) const;
+ void removeAttr(const H5std_string& name) const;
+
+ // Returns an identifier.
+ virtual hid_t getId() const = 0;
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Gets the name of this HDF5 object, i.e., Group, DataSet, or
- // DataType. These should have const but are retiring anyway.
- ssize_t getObjName(char *obj_name, size_t buf_size = 0) const;
- ssize_t getObjName(H5std_string& obj_name, size_t len = 0) const;
- H5std_string getObjName() const;
+ // Gets the name of this HDF5 object, i.e., Group, DataSet, or
+ // DataType. These should have const but are retiring anyway.
+ ssize_t getObjName(char *obj_name, size_t buf_size = 0) const;
+ ssize_t getObjName(H5std_string& obj_name, size_t len = 0) const;
+ H5std_string getObjName() const;
protected:
- // Default constructor
- H5Object();
+ // Default constructor
+ H5Object();
// *** Deprecation warning ***
// The following two constructors are no longer appropriate after the
@@ -113,22 +114,22 @@ class H5_DLLCPP H5Object : public H5Location {
// other will be removed from 1.10 release, and then from 1.8 if its
// removal does not raise any problems in two 1.10 releases.
- // Creates a copy of an existing object giving the object id
- H5Object( const hid_t object_id );
+ // Creates a copy of an existing object giving the object id
+ H5Object(const hid_t object_id);
- // Copy constructor: makes copy of an H5Object object.
- // H5Object(const H5Object& original);
+ // Copy constructor: makes copy of an H5Object object.
+ // H5Object(const H5Object& original);
// Sets the identifier of this object to a new value. - this one
// doesn't increment reference count
virtual void p_setId(const hid_t new_id) = 0;
- // Noop destructor.
- virtual ~H5Object();
+ // Noop destructor.
+ virtual ~H5Object();
#endif // DOXYGEN_SHOULD_SKIP_THIS
-}; /* end class H5Object */
+}; // end of H5Object
+} // namespace H5
-}
#endif // __H5Object_H
diff --git a/c++/src/H5OcreatProp.cpp b/c++/src/H5OcreatProp.cpp
index 3cda945..ec6c08e 100644
--- a/c++/src/H5OcreatProp.cpp
+++ b/c++/src/H5OcreatProp.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -20,7 +18,6 @@
#include "H5IdComponent.h"
#include "H5PropList.h"
#include "H5OcreatProp.h"
-#include "H5FaccProp.h"
namespace H5 {
@@ -76,49 +73,49 @@ void ObjCreatPropList::deleteConstants()
}
//--------------------------------------------------------------------------
-// Purpose: Constant for default property
+// Purpose: Constant for default property
//--------------------------------------------------------------------------
const ObjCreatPropList& ObjCreatPropList::DEFAULT = *getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: Default Constructor
-///\brief Creates a file access property list
-// Programmer: Binh-Minh Ribler - 2000
+// Function: Default Constructor
+///\brief Creates a file access property list
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
ObjCreatPropList::ObjCreatPropList() : PropList(H5P_OBJECT_CREATE) {}
//--------------------------------------------------------------------------
-// Function: ObjCreatPropList copy constructor
-///\brief Copy Constructor: makes a copy of the original
-///\param original - IN: ObjCreatPropList instance to copy
-// Programmer: Binh-Minh Ribler - 2000
+// Function: ObjCreatPropList copy constructor
+///\brief Copy Constructor: makes a copy of the original
+///\param original - IN: ObjCreatPropList instance to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
ObjCreatPropList::ObjCreatPropList(const ObjCreatPropList& original) : PropList(original) {}
//--------------------------------------------------------------------------
-// Function: ObjCreatPropList overloaded constructor
-///\brief Creates a file access property list using the id of an
-/// existing one.
-// Programmer: Binh-Minh Ribler - 2000
+// Function: ObjCreatPropList overloaded constructor
+///\brief Creates a file access property list using the id of an
+/// existing one.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
ObjCreatPropList::ObjCreatPropList(const hid_t plist_id) : PropList(plist_id) {}
//--------------------------------------------------------------------------
-// Function: ObjCreatPropList::setAttrPhaseChange
-///\brief Sets attribute storage phase change thresholds.
-///\param max_compact - IN: Maximum number of attributes to be stored in
-/// compact storage. Default to 8
-///\param min_dense - IN: Minimum number of attributes to be stored in
-/// dense storage. Default to 6
-///\exception H5::PropListIException
+// Function: ObjCreatPropList::setAttrPhaseChange
+///\brief Sets attribute storage phase change thresholds.
+///\param max_compact - IN: Maximum number of attributes to be stored in
+/// compact storage. Default to 8
+///\param min_dense - IN: Minimum number of attributes to be stored in
+/// dense storage. Default to 6
+///\exception H5::PropListIException
///\par Description
-/// If \c max_compact is set to 0, dense storage will be used.
-/// For more detail about on attribute storage, please refer to the
-/// C layer Reference Manual at:
+/// If \c max_compact is set to 0, dense storage will be used.
+/// For more detail about on attribute storage, please refer to the
+/// C layer Reference Manual at:
/// https://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetAttrPhaseChange
-// Programmer: Binh-Minh Ribler - September 2015
+// Programmer Binh-Minh Ribler - September 2015
//--------------------------------------------------------------------------
void ObjCreatPropList::setAttrPhaseChange(unsigned max_compact, unsigned min_dense) const
{
@@ -130,19 +127,19 @@ void ObjCreatPropList::setAttrPhaseChange(unsigned max_compact, unsigned min_den
}
//--------------------------------------------------------------------------
-// Function: ObjCreatPropList::getAttrPhaseChange
-///\brief Gets attribute storage phase change thresholds.
-///\param max_compact - OUT: Maximum number of attributes to be stored in
-/// compact storage.
-///\param min_dense - OUT: Minimum number of attributes to be stored in
-/// dense storage.
-///\exception H5::PropListIException
+// Function: ObjCreatPropList::getAttrPhaseChange
+///\brief Gets attribute storage phase change thresholds.
+///\param max_compact - OUT: Maximum number of attributes to be stored in
+/// compact storage.
+///\param min_dense - OUT: Minimum number of attributes to be stored in
+/// dense storage.
+///\exception H5::PropListIException
///\par Description
-/// If \c max_compact is set to 0, dense storage will be used.
-/// For more detail about on attribute storage, please refer to the
-/// C layer Reference Manual at:
+/// If \c max_compact is set to 0, dense storage will be used.
+/// For more detail about on attribute storage, please refer to the
+/// C layer Reference Manual at:
/// https://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-GetAttrPhaseChange
-// Programmer: Binh-Minh Ribler - September 2015
+// Programmer Binh-Minh Ribler - September 2015
//--------------------------------------------------------------------------
void ObjCreatPropList::getAttrPhaseChange(unsigned& max_compact, unsigned& min_dense) const
{
@@ -155,23 +152,23 @@ void ObjCreatPropList::getAttrPhaseChange(unsigned& max_compact, unsigned& min_d
}
//--------------------------------------------------------------------------
-// Function: ObjCreatPropList::setAttrCrtOrder
-///\brief Sets tracking and indexing of attribute creation order.
-///\param crt_order_flags - IN: Flags specifying whether to track and
-/// index attribute creation order. Default: No flag set
-///\exception H5::PropListIException
+// Function: ObjCreatPropList::setAttrCrtOrder
+///\brief Set the flags for creation order of attributes on an object
+///\param crt_order_flags - IN: Flags specifying whether to track and
+/// index attribute creation order. Default: No flag set
+///\exception H5::PropListIException
///\par Description
-/// Valid flags are:
-/// \li \c H5P_CRT_ORDER_TRACKED - Attribute creation order is tracked
-/// \li \c H5P_CRT_ORDER_INDEXED - Attribute creation order is
-/// indexed (requires H5P_CRT_ORDER_TRACKED).
-/// When no flag is set, attribute creation order is neither
-/// tracked not indexed. Note that HDF5 currently provides no
-/// mechanism to turn on attribute creation order tracking at object
-/// creation time and to build the index later.
-/// The C layer Reference Manual at can be found at:
+/// Valid flags are:
+/// \li \c H5P_CRT_ORDER_TRACKED - Attribute creation order is tracked
+/// \li \c H5P_CRT_ORDER_INDEXED - Attribute creation order is
+/// indexed (requires H5P_CRT_ORDER_TRACKED).
+/// When no flag is set, attribute creation order is neither
+/// tracked not indexed. Note that HDF5 currently provides no
+/// mechanism to turn on attribute creation order tracking at object
+/// creation time and to build the index later.
+/// The C layer Reference Manual at can be found at:
/// https://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-SetAttrCreationOrder
-// Programmer: Binh-Minh Ribler - September 2015
+// Programmer Binh-Minh Ribler - September 2015
//--------------------------------------------------------------------------
void ObjCreatPropList::setAttrCrtOrder(unsigned crt_order_flags) const
{
@@ -183,18 +180,17 @@ void ObjCreatPropList::setAttrCrtOrder(unsigned crt_order_flags) const
}
//--------------------------------------------------------------------------
-// Function: ObjCreatPropList::getAttrCrtOrder
-///\brief Gets tracking and indexing settings for attribute
-/// creation order.
-///\param crt_order_flags - OUT: Flags specifying whether to track and
-/// index attribute creation order
-///\exception H5::PropListIException
+// Function: ObjCreatPropList::getAttrCrtOrder
+///\brief Returns the flags indicating creation order is tracked/indexed
+/// for attributes on an object.
+///\return The flags
+///\exception H5::PropListIException
///\par Description
-/// When no flag is set, i.e. crt_order_flags = 0, attribute
-/// creation order is neither tracked not indexed.
-/// The C layer Reference Manual at can be found at:
+/// When no flag is set, i.e. crt_order_flags = 0, attribute
+/// creation order is neither tracked not indexed.
+/// The C layer Reference Manual at can be found at:
/// https://www.hdfgroup.org/HDF5/doc/RM/RM_H5P.html#Property-GetAttrCreationOrder
-// Programmer: Binh-Minh Ribler - September 2015
+// Programmer Binh-Minh Ribler - September 2015
//--------------------------------------------------------------------------
unsigned ObjCreatPropList::getAttrCrtOrder() const
{
@@ -209,9 +205,9 @@ unsigned ObjCreatPropList::getAttrCrtOrder() const
}
//--------------------------------------------------------------------------
-// Function: ObjCreatPropList destructor
-///\brief Noop destructor
-// Programmer Binh-Minh Ribler - 2000
+// Function: ObjCreatPropList destructor
+///\brief Noop destructor
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
ObjCreatPropList::~ObjCreatPropList() {}
diff --git a/c++/src/H5OcreatProp.h b/c++/src/H5OcreatProp.h
index bfba1c4..878c67a 100644
--- a/c++/src/H5OcreatProp.h
+++ b/c++/src/H5OcreatProp.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5ObjCreatPropList_H
@@ -27,37 +25,37 @@ namespace H5 {
*/
class H5_DLLCPP ObjCreatPropList : public PropList {
public:
- ///\brief Default object creation property list.
- static const ObjCreatPropList& DEFAULT;
+ ///\brief Default object creation property list.
+ static const ObjCreatPropList& DEFAULT;
- // Creates a object creation property list.
- ObjCreatPropList();
+ // Creates a object creation property list.
+ ObjCreatPropList();
- // Sets attribute storage phase change thresholds.
- void setAttrPhaseChange(unsigned max_compact = 8, unsigned min_dense = 6) const;
+ // Sets attribute storage phase change thresholds.
+ void setAttrPhaseChange(unsigned max_compact = 8, unsigned min_dense = 6) const;
- // Gets attribute storage phase change thresholds.
- void getAttrPhaseChange(unsigned& max_compact, unsigned& min_dense) const;
+ // Gets attribute storage phase change thresholds.
+ void getAttrPhaseChange(unsigned& max_compact, unsigned& min_dense) const;
- // Sets tracking and indexing of attribute creation order.
- void setAttrCrtOrder(unsigned crt_order_flags) const;
+ // Sets tracking and indexing of attribute creation order.
+ void setAttrCrtOrder(unsigned crt_order_flags) const;
- // Gets tracking and indexing settings for attribute creation order.
- unsigned getAttrCrtOrder() const;
+ // Gets tracking and indexing settings for attribute creation order.
+ unsigned getAttrCrtOrder() const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("ObjCreatPropList"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("ObjCreatPropList"); }
- // Copy constructor: creates a copy of a ObjCreatPropList object.
- ObjCreatPropList( const ObjCreatPropList& original );
+ // Copy constructor: creates a copy of a ObjCreatPropList object.
+ ObjCreatPropList(const ObjCreatPropList& original);
- // Creates a copy of an existing object creation property list
- // using the property list id.
- ObjCreatPropList (const hid_t plist_id);
+ // Creates a copy of an existing object creation property list
+ // using the property list id.
+ ObjCreatPropList (const hid_t plist_id);
- // Noop destructor
- virtual ~ObjCreatPropList();
+ // Noop destructor
+ virtual ~ObjCreatPropList();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -72,6 +70,7 @@ class H5_DLLCPP ObjCreatPropList : public PropList {
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+}; // end of ObjCreatPropList
+} // namespace H5
+
#endif // __H5ObjCreatPropList_H
diff --git a/c++/src/H5PredType.cpp b/c++/src/H5PredType.cpp
index 0711020..53d525c 100644
--- a/c++/src/H5PredType.cpp
+++ b/c++/src/H5PredType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -21,6 +19,7 @@
#include "H5PropList.h"
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -31,14 +30,14 @@ namespace H5 {
#ifndef DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: PredType overloaded constructor
-///\brief Creates a PredType object using the id of an existing
-/// predefined datatype.
-///\param predtype_id - IN: Id of a predefined datatype
+// Function: PredType overloaded constructor
+///\brief Creates a PredType object using the id of an existing
+/// predefined datatype.
+///\param predtype_id - IN: Id of a predefined datatype
// Description
-// This constructor creates a PredType object by copying
-// the provided HDF5 predefined datatype.
-// Programmer Binh-Minh Ribler - 2000
+// This constructor creates a PredType object by copying
+// the provided HDF5 predefined datatype.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
PredType::PredType(const hid_t predtype_id) : AtomType(predtype_id)
{
@@ -46,69 +45,69 @@ PredType::PredType(const hid_t predtype_id) : AtomType(predtype_id)
}
//--------------------------------------------------------------------------
-// Function: PredType default constructor
-///\brief Default constructor: Creates a stub predefined datatype
-// Programmer Binh-Minh Ribler - 2000
+// Function: PredType default constructor
+///\brief Default constructor: Creates a stub predefined datatype
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
PredType::PredType() : AtomType() {}
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: PredType copy constructor
-///\brief Copy constructor: makes a copy of the original PredType object.
-///\param original - IN: PredType instance to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: PredType copy constructor
+///\brief Copy constructor: makes a copy of the original PredType object.
+///\param original - IN: PredType instance to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
PredType::PredType(const PredType& original) : AtomType(original) {}
//--------------------------------------------------------------------------
-// Function: PredType::operator=
-///\brief Assignment operator.
-///\param rhs - IN: Reference to the predefined datatype
-///\return Reference to PredType instance
-///\exception H5::DataTypeIException
+// Function: PredType::operator=
+///\brief Assignment operator.
+///\param rhs - IN: Reference to the predefined datatype
+///\return Reference to PredType instance
+///\exception H5::DataTypeIException
// Description
-// Makes a copy of the type on the right hand side and stores
-// the new id in the left hand side object.
-// Programmer Binh-Minh Ribler - 2000
+// Makes a copy of the type on the right hand side and stores
+// the new id in the left hand side object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-PredType& PredType::operator=( const PredType& rhs )
+PredType& PredType::operator=(const PredType& rhs)
{
if (this != &rhs)
- copy(rhs);
+ copy(rhs);
return(*this);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
// These dummy functions do not inherit from DataType - they'll
// throw an DataTypeIException if invoked.
-void PredType::commit(H5Location& loc, const char* name )
+void PredType::commit(H5Location& loc, const char* name)
{
- throw DataTypeIException("PredType::commit", "Error: Attempted to commit a predefined datatype. Invalid operation!" );
+ throw DataTypeIException("PredType::commit", "Error: Attempted to commit a predefined datatype. Invalid operation!");
}
-void PredType::commit(H5Location& loc, const H5std_string& name )
+void PredType::commit(H5Location& loc, const H5std_string& name)
{
- commit( loc, name.c_str());
+ commit(loc, name.c_str());
}
bool PredType::committed()
{
- throw DataTypeIException("PredType::committed", "Error: Attempting to check for commit status on a predefined datatype." );
+ throw DataTypeIException("PredType::committed", "Error: Attempting to check for commit status on a predefined datatype.");
}
#endif // DOXYGEN_SHOULD_SKIP_THIS
// Default destructor
//--------------------------------------------------------------------------
-// Function: PredType destructor
-///\brief Noop destructor.
-// Programmer Binh-Minh Ribler - 2000
+// Function: PredType destructor
+///\brief Noop destructor.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
PredType::~PredType() {}
/*****************************************************************************
- The following section is regarding the global constants PredType,
- DataSpace, and PropList.
+ The following section is regarding the global constants PredType,
+ DataSpace, and PropList.
*****************************************************************************/
@@ -291,18 +290,18 @@ PredType* PredType::NATIVE_UINT_FAST64_;
#endif /* H5_SIZEOF_UINT_FAST64_T */
//--------------------------------------------------------------------------
-// Function: PredType::getPredTypes
-// Purpose: Returns the dummy PredType constant object pointer
-// Return: PredType object pointer
+// Function: PredType::getPredTypes
+// Purpose Returns the dummy PredType constant object pointer
+// Return: PredType object pointer
// Description
-// If the dummy constant PREDTYPE_CONST_ is not allocated yet,
-// call makePredTypes() to allocate all of the PredType constants.
-// Otherwise, just simply return the object pointer PREDTYPE_CONST_.
+// If the dummy constant PREDTYPE_CONST_ is not allocated yet,
+// call makePredTypes() to allocate all of the PredType constants.
+// Otherwise, just simply return the object pointer PREDTYPE_CONST_.
//
-// Note that, there is a similar function to getPredTypes() in
-// other classes, that have global constants, is called getConstant().
+// Note that, there is a similar function to getPredTypes() in
+// other classes, that have global constants, is called getConstant().
//
-// Programmer Binh-Minh Ribler - September 2015
+// Programmer Binh-Minh Ribler - September 2015
//--------------------------------------------------------------------------
PredType* PredType::getPredTypes()
{
@@ -317,16 +316,16 @@ PredType* PredType::getPredTypes()
// If the dummy constant pointer is not allocated, allocate all PredType
// constant pointers. Otherwise, throw because it shouldn't be.
if (PREDTYPE_CONST_ == 0)
- makePredTypes();
+ makePredTypes();
else
- throw H5::DataTypeIException("PredType::getPredTypes", "PredType::getPredTypes is being invoked on an allocated PREDTYPE_CONST_");
+ throw H5::DataTypeIException("PredType::getPredTypes", "PredType::getPredTypes is being invoked on an allocated PREDTYPE_CONST_");
return PREDTYPE_CONST_;
}
//--------------------------------------------------------------------------
-// Function: PredType::makePredTypes
-// Purpose: Allocate all PredType constants.
-// Programmer Binh-Minh Ribler - September 2015
+// Function: PredType::makePredTypes
+// Purpose Allocate all PredType constants.
+// Programmer Binh-Minh Ribler - September 2015
//--------------------------------------------------------------------------
void PredType::makePredTypes()
{
@@ -514,9 +513,9 @@ void PredType::makePredTypes()
//--------------------------------------------------------------------------
-// Function: PredType::deleteConstants
-// Purpose: Deletes all PredType constant pointers.
-// Programmer Binh-Minh Ribler - September 2015
+// Function: PredType::deleteConstants
+// Purpose Deletes all PredType constant pointers.
+// Programmer Binh-Minh Ribler - September 2015
//--------------------------------------------------------------------------
void PredType::deleteConstants()
{
@@ -890,206 +889,205 @@ const PredType& PredType::NATIVE_UINT_FAST64 = *NATIVE_UINT_FAST64_;
} // end namespace
/***************************************************************************
- Design Note
- ===========
+ Design Note
+ ===========
September 2015:
- The C++ library has several types of global constants from different
- classes, such as PropList, PredType, DataSpace, etc... Previously,
- these global constants were declared statically and the C++ library used
- a constant, called PredType::AtExit, to detect when all the global
- contants are destroyed then close the C library (H5close). This method
- relied on the order of the constants being created and destroyed and
- that PredType constants be the last to be destroyed. In September
- 2015, it was recognized that the order in which the global constants were
- created and destroyed was actually undefined, thus can be different
- between different compilers. This resulted in failure when compilers
- destroy PredType constants before others because when PredType::AtExit
- was destroyed, the C library was closed, so when the constants of other
- classes such as PropList or DataSpace were being deleted, the C library
- would not be available.
-
- These are the classes that have global constants:
- + PredType
- + DataSpace
- + PropList (and its subclasses below)
- + FileAccPropList
- + FileCreatPropList
- + DSetMemXferPropList
- + DSetCreatPropList
-
-
- The new method includes these main points:
-
- - The C++ library uses dynamically allocated constants to have the
- control in which order the global constants are created/destroyed.
-
- - The previous static constants are changed to be the references to
- the dynamically allocated constants to avoid impact on applications.
-
- - The first time an IdComponent default constructor is invoked, it
- will call the function H5Library::initH5cpp which registers the
- terminating functions from each class that has the global constants
- so that these functions can destroy those constants at the exit of the
- application. IdComponent is a baseclass of any object class that has
- an identifier, such as Group, DataSet, DataType,... The classes which
- have the global constants are all derived from IdComponent.
-
- - At the normal termination of the application, each registered function
- for each constant type will delete all the allocated constants in
- that type class, then a different terminating function, which was also
- registered with atexit() by initH5cpp, will call H5close to close the
- C library.
-
- The following list presents the differences between the old and new
- methods and the changes implemented for the new method.
-
- 1. The following items are added to class H5Library:
- // Private instance to be created by H5Library only
- static H5Library* instance;
-
- // Returns a singleton H5Library to initialize the global
- // constants, invoked in IdComponent default constructor
- static H5Library* getInstance(); // public
-
- // Registers cleanup and terminating functions with atexit(),
- // called in IdComponent default constructor
- static void initH5cpp(void); // public
-
- // Calls H5close to terminate the library, registered with
- // atexit(), as the last thing to be done.
- static void termH5cpp(void); // public
-
- 2. The following shows the differences between the old and new methods
- for allocating the PredType constants. There are more than 100
- constants, but only one is shown here for examples.
-
- Old Method:
- ----------
- // Declaration of the constant - in "H5PredType.h"
- static const PredType NATIVE_INT;
-
- // Definition of the constant - in "H5PredType.cpp"
- const PredType PredType::NATIVE_INT(H5T_NATIVE_INT);
-
- New Method:
- ----------
- // Declare pointer for a constant - in "H5PredType.h"
- static PredType* NATIVE_INT_; // "H5PredType.h"
-
- // Change previous constant to reference - in "H5PredType.h"
- static const PredType& NATIVE_INT;
-
- // The assignment of the first static constant, named
- // PREDTYPE_CONST, calls makePredTypes() which allocates the
- // dynamic memory for every PredType constant.
-
- // Creates a dynamic PredType object representing a C constant
- // - in makePredTypes()
- NATIVE_INT_ = new PredType(H5T_NATIVE_INT);
-
- // Assign the constant reference to the dynamic object
- // - in "H5PredType.cpp"
- const PredType& PredType::NATIVE_INT = *NATIVE_INT_;
-
- Functions added to class PredType:
-
- // Creates the constants
- static void makePredTypes(); // private
-
- // Calls makePredTypes to create the constants and returns
- // the dummy constant PREDTYPE_CONST;
- static PredType* getPredTypes(); // private
-
- // Deletes the constants
- static void deleteConstants(); // public
-
- 3. This section shows the differences between the old and new methods
- for allocating the DataSpace constant, DataSpace::ALL.
+ The C++ library has several types of global constants from different
+ classes, such as PropList, PredType, DataSpace, etc... Previously,
+ these global constants were declared statically and the C++ library used
+ a constant, called PredType::AtExit, to detect when all the global
+ contants are destroyed then close the C library (H5close). This method
+ relied on the order of the constants being created and destroyed and
+ that PredType constants be the last to be destroyed. In September
+ 2015, it was recognized that the order in which the global constants were
+ created and destroyed was actually undefined, thus can be different
+ between different compilers. This resulted in failure when compilers
+ destroy PredType constants before others because when PredType::AtExit
+ was destroyed, the C library was closed, so when the constants of other
+ classes such as PropList or DataSpace were being deleted, the C library
+ would not be available.
+
+ These are the classes that have global constants:
+ + PredType
+ + DataSpace
+ + PropList (and its subclasses below)
+ + FileAccPropList
+ + FileCreatPropList
+ + DSetMemXferPropList
+ + DSetCreatPropList
+
+ The new method includes these main points:
+
+ - The C++ library uses dynamically allocated constants to have the
+ control in which order the global constants are created/destroyed.
+
+ - The previous static constants are changed to be the references to
+ the dynamically allocated constants to avoid impact on applications.
+
+ - The first time an IdComponent default constructor is invoked, it
+ will call the function H5Library::initH5cpp which registers the
+ terminating functions from each class that has the global constants
+ so that these functions can destroy those constants at the exit of the
+ application. IdComponent is a baseclass of any object class that has
+ an identifier, such as Group, DataSet, DataType,... The classes which
+ have the global constants are all derived from IdComponent.
+
+ - At the normal termination of the application, each registered function
+ for each constant type will delete all the allocated constants in
+ that type class, then a different terminating function, which was also
+ registered with atexit() by initH5cpp, will call H5close to close the
+ C library.
+
+ The following list presents the differences between the old and new
+ methods and the changes implemented for the new method.
+
+ 1. The following items are added to class H5Library:
+ // Private instance to be created by H5Library only
+ static H5Library* instance;
+
+ // Returns a singleton H5Library to initialize the global
+ // constants, invoked in IdComponent default constructor
+ static H5Library* getInstance(); // public
+
+ // Registers cleanup and terminating functions with atexit(),
+ // called in IdComponent default constructor
+ static void initH5cpp(void); // public
+
+ // Calls H5close to terminate the library, registered with
+ // atexit(), as the last thing to be done.
+ static void termH5cpp(void); // public
+
+ 2. The following shows the differences between the old and new methods
+ for allocating the PredType constants. There are more than 100
+ constants, but only one is shown here for examples.
+
+ Old Method:
+ ----------
+ // Declaration of the constant - in "H5PredType.h"
+ static const PredType NATIVE_INT;
+
+ // Definition of the constant - in "H5PredType.cpp"
+ const PredType PredType::NATIVE_INT(H5T_NATIVE_INT);
+
+ New Method:
+ ----------
+ // Declare pointer for a constant - in "H5PredType.h"
+ static PredType* NATIVE_INT_; // "H5PredType.h"
+
+ // Change previous constant to reference - in "H5PredType.h"
+ static const PredType& NATIVE_INT;
+
+ // The assignment of the first static constant, named
+ // PREDTYPE_CONST, calls makePredTypes() which allocates the
+ // dynamic memory for every PredType constant.
+
+ // Creates a dynamic PredType object representing a C constant
+ // - in makePredTypes()
+ NATIVE_INT_ = new PredType(H5T_NATIVE_INT);
+
+ // Assign the constant reference to the dynamic object
+ // - in "H5PredType.cpp"
+ const PredType& PredType::NATIVE_INT = *NATIVE_INT_;
+
+ Functions added to class PredType:
+
+ // Creates the constants
+ static void makePredTypes(); // private
+
+ // Calls makePredTypes to create the constants and returns
+ // the dummy constant PREDTYPE_CONST;
+ static PredType* getPredTypes(); // private
+
+ // Deletes the constants
+ static void deleteConstants(); // public
+
+ 3. This section shows the differences between the old and new methods
+ for allocating the DataSpace constant, DataSpace::ALL.
- Old Method:
- ----------
- // Declaration of the constant - in "H5DataSpace.h"
- static const DataSpace ALL;
+ Old Method:
+ ----------
+ // Declaration of the constant - in "H5DataSpace.h"
+ static const DataSpace ALL;
- // Definition of the constant - in "H5DataSpace.cpp"
- const DataSpace DataSpace::ALL(H5S_ALL);
+ // Definition of the constant - in "H5DataSpace.cpp"
+ const DataSpace DataSpace::ALL(H5S_ALL);
- New Method:
- ----------
- // Declare pointer for a constant - in "H5DataSpace.h"
- static DataSpace* ALL_; // "H5DataSpace.h"
+ New Method:
+ ----------
+ // Declare pointer for a constant - in "H5DataSpace.h"
+ static DataSpace* ALL_; // "H5DataSpace.h"
- // Change previous constant to reference - in "H5DataSpace.h"
- static const DataSpace& ALL;
+ // Change previous constant to reference - in "H5DataSpace.h"
+ static const DataSpace& ALL;
- // Creates a dynamic DataSpace object representing the C constant
- // - in "H5DataSpace.cpp"
- ALL_ = new DataSpace(H5S_ALL);
+ // Creates a dynamic DataSpace object representing the C constant
+ // - in "H5DataSpace.cpp"
+ ALL_ = new DataSpace(H5S_ALL);
- // Assign the constant reference to the dynamic object
- // - in "H5DataSpace.cpp"
- const DataSpace& DataSpace::ALL = *ALL_;
+ // Assign the constant reference to the dynamic object
+ // - in "H5DataSpace.cpp"
+ const DataSpace& DataSpace::ALL = *ALL_;
- Functions added to class DataSpace:
+ Functions added to class DataSpace:
- // Creates the constant
- static DataSpace* getConstant(); // private
+ // Creates the constant
+ static DataSpace* getConstant(); // private
- // Deletes the constant
- static void deleteConstants(); // public
+ // Deletes the constant
+ static void deleteConstants(); // public
- 4. This section shows the differences between the old and new methods
- for allocating the following constants
- - PropList constant, PropList::DEFAULT.
- - DSetCreatPropList constant, DSetCreatPropList::DEFAULT.
- - DSetMemXferPropList constant, DSetMemXferPropList::DEFAULT.
- - FileCreatPropList constant, FileCreatPropList::DEFAULT.
- - FileAccPropList constant, FileAccPropList::DEFAULT.
+ 4. This section shows the differences between the old and new methods
+ for allocating the following constants
+ - PropList constant, PropList::DEFAULT.
+ - DSetCreatPropList constant, DSetCreatPropList::DEFAULT.
+ - DSetMemXferPropList constant, DSetMemXferPropList::DEFAULT.
+ - FileCreatPropList constant, FileCreatPropList::DEFAULT.
+ - FileAccPropList constant, FileAccPropList::DEFAULT.
- For these constants, the library has the same changes, except the
- class names and the HDF5 corresponding constants. Only the items
- of PropList are listed, and "PropList" can be replaced by any of
- DSetCreatPropList, DSetMemXferPropList, FileCreatPropList,
- FileAccPropList for those classes. The HDF5 C constant "H5P_DEFAULT"
- can be replaced by any of these respectively: H5P_DATASET_CREATE,
- H5P_DATASET_XFER, H5P_FILE_CREATE, and H5P_FILE_ACCESS.
+ For these constants, the library has the same changes, except the
+ class names and the HDF5 corresponding constants. Only the items
+ of PropList are listed, and "PropList" can be replaced by any of
+ DSetCreatPropList, DSetMemXferPropList, FileCreatPropList,
+ FileAccPropList for those classes. The HDF5 C constant "H5P_DEFAULT"
+ can be replaced by any of these respectively: H5P_DATASET_CREATE,
+ H5P_DATASET_XFER, H5P_FILE_CREATE, and H5P_FILE_ACCESS.
- Old Method:
- ----------
- // Declaration of the constant - in "H5PropList.h"
- static const PropList DEFAULT;
+ Old Method:
+ ----------
+ // Declaration of the constant - in "H5PropList.h"
+ static const PropList DEFAULT;
- // Definition of the constant - in "H5PropList.cpp"
- const PropList PropList::DEFAULT(H5P_DEFAULT);
+ // Definition of the constant - in "H5PropList.cpp"
+ const PropList PropList::DEFAULT(H5P_DEFAULT);
- New Method:
- ----------
- // Declare pointer for a constant - in "H5PropList.h"
- static PropList* DEFAULT_; // "H5PropList.h"
+ New Method:
+ ----------
+ // Declare pointer for a constant - in "H5PropList.h"
+ static PropList* DEFAULT_; // "H5PropList.h"
- // Change previous constant to reference - in "H5PropList.h"
- static const PropList& DEFAULT;
+ // Change previous constant to reference - in "H5PropList.h"
+ static const PropList& DEFAULT;
- // Creates a dynamic PropList object representing the C constant
- // - in "H5PropList.cpp"
- DEFAULT_ = new PropList(H5P_DEFAULT);
+ // Creates a dynamic PropList object representing the C constant
+ // - in "H5PropList.cpp"
+ DEFAULT_ = new PropList(H5P_DEFAULT);
- // Assign the constant reference to the dynamic object
- // - in "H5PropList.cpp"
- const PropList& PropList::DEFAULT = *DEFAULT_;
+ // Assign the constant reference to the dynamic object
+ // - in "H5PropList.cpp"
+ const PropList& PropList::DEFAULT = *DEFAULT_;
- Functions added to class PropList:
+ Functions added to class PropList:
- // Creates the constant
- static PropList* getConstant(); // private
+ // Creates the constant
+ static PropList* getConstant(); // private
- // Deletes the constants
- static void deleteConstants(); // public
+ // Deletes the constants
+ static void deleteConstants(); // public
- The same functions are added to the subclasses of PropList instead of
- using PropList's because of the class types and in favor of clarity.
+ The same functions are added to the subclasses of PropList instead of
+ using PropList's because of the class types and in favor of clarity.
****************************************************************************/
diff --git a/c++/src/H5PredType.h b/c++/src/H5PredType.h
index 750902e..2ffc87f 100644
--- a/c++/src/H5PredType.h
+++ b/c++/src/H5PredType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5PredType_H
@@ -30,207 +28,207 @@ namespace H5 {
*/
class H5_DLLCPP PredType : public AtomType {
public:
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("PredType"); }
-
- // Makes a copy of the predefined type and stores the new
- // id in the left hand side object.
- PredType& operator=( const PredType& rhs );
-
- // Copy constructor - makes copy of the original object
- PredType( const PredType& original );
-
- // Noop destructor
- virtual ~PredType();
-
- /*! \brief This dummy function do not inherit from DataType - it will
- throw a DataTypeIException if invoked.
- */
- void commit(H5Location& loc, const H5std_string& name );
- /*! \brief This dummy function do not inherit from DataType - it will
- throw a DataTypeIException if invoked.
- */
- void commit(H5Location& loc, const char* name );
- /*! \brief This dummy function do not inherit from DataType - it will
- throw a DataTypeIException if invoked.
- */
- bool committed();
-
- ///\brief PredType constants
- static const PredType& STD_I8BE;
- static const PredType& STD_I8LE;
- static const PredType& STD_I16BE;
- static const PredType& STD_I16LE;
- static const PredType& STD_I32BE;
- static const PredType& STD_I32LE;
- static const PredType& STD_I64BE;
- static const PredType& STD_I64LE;
- static const PredType& STD_U8BE;
- static const PredType& STD_U8LE;
- static const PredType& STD_U16BE;
- static const PredType& STD_U16LE;
- static const PredType& STD_U32BE;
- static const PredType& STD_U32LE;
- static const PredType& STD_U64BE;
- static const PredType& STD_U64LE;
- static const PredType& STD_B8BE;
- static const PredType& STD_B8LE;
- static const PredType& STD_B16BE;
- static const PredType& STD_B16LE;
- static const PredType& STD_B32BE;
- static const PredType& STD_B32LE;
- static const PredType& STD_B64BE;
- static const PredType& STD_B64LE;
- static const PredType& STD_REF_OBJ;
- static const PredType& STD_REF_DSETREG;
-
- static const PredType& C_S1;
- static const PredType& FORTRAN_S1;
-
- static const PredType& IEEE_F32BE;
- static const PredType& IEEE_F32LE;
- static const PredType& IEEE_F64BE;
- static const PredType& IEEE_F64LE;
-
- static const PredType& UNIX_D32BE;
- static const PredType& UNIX_D32LE;
- static const PredType& UNIX_D64BE;
- static const PredType& UNIX_D64LE;
-
- static const PredType& INTEL_I8;
- static const PredType& INTEL_I16;
- static const PredType& INTEL_I32;
- static const PredType& INTEL_I64;
- static const PredType& INTEL_U8;
- static const PredType& INTEL_U16;
- static const PredType& INTEL_U32;
- static const PredType& INTEL_U64;
- static const PredType& INTEL_B8;
- static const PredType& INTEL_B16;
- static const PredType& INTEL_B32;
- static const PredType& INTEL_B64;
- static const PredType& INTEL_F32;
- static const PredType& INTEL_F64;
-
- static const PredType& ALPHA_I8;
- static const PredType& ALPHA_I16;
- static const PredType& ALPHA_I32;
- static const PredType& ALPHA_I64;
- static const PredType& ALPHA_U8;
- static const PredType& ALPHA_U16;
- static const PredType& ALPHA_U32;
- static const PredType& ALPHA_U64;
- static const PredType& ALPHA_B8;
- static const PredType& ALPHA_B16;
- static const PredType& ALPHA_B32;
- static const PredType& ALPHA_B64;
- static const PredType& ALPHA_F32;
- static const PredType& ALPHA_F64;
-
- static const PredType& MIPS_I8;
- static const PredType& MIPS_I16;
- static const PredType& MIPS_I32;
- static const PredType& MIPS_I64;
- static const PredType& MIPS_U8;
- static const PredType& MIPS_U16;
- static const PredType& MIPS_U32;
- static const PredType& MIPS_U64;
- static const PredType& MIPS_B8;
- static const PredType& MIPS_B16;
- static const PredType& MIPS_B32;
- static const PredType& MIPS_B64;
- static const PredType& MIPS_F32;
- static const PredType& MIPS_F64;
-
- static const PredType& NATIVE_CHAR;
- static const PredType& NATIVE_SCHAR;
- static const PredType& NATIVE_UCHAR;
- static const PredType& NATIVE_SHORT;
- static const PredType& NATIVE_USHORT;
- static const PredType& NATIVE_INT;
- static const PredType& NATIVE_UINT;
- static const PredType& NATIVE_LONG;
- static const PredType& NATIVE_ULONG;
- static const PredType& NATIVE_LLONG;
- static const PredType& NATIVE_ULLONG;
- static const PredType& NATIVE_FLOAT;
- static const PredType& NATIVE_DOUBLE;
- static const PredType& NATIVE_LDOUBLE;
- static const PredType& NATIVE_B8;
- static const PredType& NATIVE_B16;
- static const PredType& NATIVE_B32;
- static const PredType& NATIVE_B64;
- static const PredType& NATIVE_OPAQUE;
- static const PredType& NATIVE_HSIZE;
- static const PredType& NATIVE_HSSIZE;
- static const PredType& NATIVE_HERR;
- static const PredType& NATIVE_HBOOL;
-
- static const PredType& NATIVE_INT8;
- static const PredType& NATIVE_UINT8;
- static const PredType& NATIVE_INT16;
- static const PredType& NATIVE_UINT16;
- static const PredType& NATIVE_INT32;
- static const PredType& NATIVE_UINT32;
- static const PredType& NATIVE_INT64;
- static const PredType& NATIVE_UINT64;
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("PredType"); }
+
+ // Makes a copy of the predefined type and stores the new
+ // id in the left hand side object.
+ PredType& operator=(const PredType& rhs);
+
+ // Copy constructor - makes copy of the original object
+ PredType(const PredType& original);
+
+ // Noop destructor
+ virtual ~PredType();
+
+ /*! \brief This dummy function do not inherit from DataType - it will
+ throw a DataTypeIException if invoked.
+ */
+ void commit(H5Location& loc, const H5std_string& name);
+ /*! \brief This dummy function do not inherit from DataType - it will
+ throw a DataTypeIException if invoked.
+ */
+ void commit(H5Location& loc, const char* name);
+ /*! \brief This dummy function do not inherit from DataType - it will
+ throw a DataTypeIException if invoked.
+ */
+ bool committed();
+
+ ///\brief PredType constants
+ static const PredType& STD_I8BE;
+ static const PredType& STD_I8LE;
+ static const PredType& STD_I16BE;
+ static const PredType& STD_I16LE;
+ static const PredType& STD_I32BE;
+ static const PredType& STD_I32LE;
+ static const PredType& STD_I64BE;
+ static const PredType& STD_I64LE;
+ static const PredType& STD_U8BE;
+ static const PredType& STD_U8LE;
+ static const PredType& STD_U16BE;
+ static const PredType& STD_U16LE;
+ static const PredType& STD_U32BE;
+ static const PredType& STD_U32LE;
+ static const PredType& STD_U64BE;
+ static const PredType& STD_U64LE;
+ static const PredType& STD_B8BE;
+ static const PredType& STD_B8LE;
+ static const PredType& STD_B16BE;
+ static const PredType& STD_B16LE;
+ static const PredType& STD_B32BE;
+ static const PredType& STD_B32LE;
+ static const PredType& STD_B64BE;
+ static const PredType& STD_B64LE;
+ static const PredType& STD_REF_OBJ;
+ static const PredType& STD_REF_DSETREG;
+
+ static const PredType& C_S1;
+ static const PredType& FORTRAN_S1;
+
+ static const PredType& IEEE_F32BE;
+ static const PredType& IEEE_F32LE;
+ static const PredType& IEEE_F64BE;
+ static const PredType& IEEE_F64LE;
+
+ static const PredType& UNIX_D32BE;
+ static const PredType& UNIX_D32LE;
+ static const PredType& UNIX_D64BE;
+ static const PredType& UNIX_D64LE;
+
+ static const PredType& INTEL_I8;
+ static const PredType& INTEL_I16;
+ static const PredType& INTEL_I32;
+ static const PredType& INTEL_I64;
+ static const PredType& INTEL_U8;
+ static const PredType& INTEL_U16;
+ static const PredType& INTEL_U32;
+ static const PredType& INTEL_U64;
+ static const PredType& INTEL_B8;
+ static const PredType& INTEL_B16;
+ static const PredType& INTEL_B32;
+ static const PredType& INTEL_B64;
+ static const PredType& INTEL_F32;
+ static const PredType& INTEL_F64;
+
+ static const PredType& ALPHA_I8;
+ static const PredType& ALPHA_I16;
+ static const PredType& ALPHA_I32;
+ static const PredType& ALPHA_I64;
+ static const PredType& ALPHA_U8;
+ static const PredType& ALPHA_U16;
+ static const PredType& ALPHA_U32;
+ static const PredType& ALPHA_U64;
+ static const PredType& ALPHA_B8;
+ static const PredType& ALPHA_B16;
+ static const PredType& ALPHA_B32;
+ static const PredType& ALPHA_B64;
+ static const PredType& ALPHA_F32;
+ static const PredType& ALPHA_F64;
+
+ static const PredType& MIPS_I8;
+ static const PredType& MIPS_I16;
+ static const PredType& MIPS_I32;
+ static const PredType& MIPS_I64;
+ static const PredType& MIPS_U8;
+ static const PredType& MIPS_U16;
+ static const PredType& MIPS_U32;
+ static const PredType& MIPS_U64;
+ static const PredType& MIPS_B8;
+ static const PredType& MIPS_B16;
+ static const PredType& MIPS_B32;
+ static const PredType& MIPS_B64;
+ static const PredType& MIPS_F32;
+ static const PredType& MIPS_F64;
+
+ static const PredType& NATIVE_CHAR;
+ static const PredType& NATIVE_SCHAR;
+ static const PredType& NATIVE_UCHAR;
+ static const PredType& NATIVE_SHORT;
+ static const PredType& NATIVE_USHORT;
+ static const PredType& NATIVE_INT;
+ static const PredType& NATIVE_UINT;
+ static const PredType& NATIVE_LONG;
+ static const PredType& NATIVE_ULONG;
+ static const PredType& NATIVE_LLONG;
+ static const PredType& NATIVE_ULLONG;
+ static const PredType& NATIVE_FLOAT;
+ static const PredType& NATIVE_DOUBLE;
+ static const PredType& NATIVE_LDOUBLE;
+ static const PredType& NATIVE_B8;
+ static const PredType& NATIVE_B16;
+ static const PredType& NATIVE_B32;
+ static const PredType& NATIVE_B64;
+ static const PredType& NATIVE_OPAQUE;
+ static const PredType& NATIVE_HSIZE;
+ static const PredType& NATIVE_HSSIZE;
+ static const PredType& NATIVE_HERR;
+ static const PredType& NATIVE_HBOOL;
+
+ static const PredType& NATIVE_INT8;
+ static const PredType& NATIVE_UINT8;
+ static const PredType& NATIVE_INT16;
+ static const PredType& NATIVE_UINT16;
+ static const PredType& NATIVE_INT32;
+ static const PredType& NATIVE_UINT32;
+ static const PredType& NATIVE_INT64;
+ static const PredType& NATIVE_UINT64;
// LEAST types
#if H5_SIZEOF_INT_LEAST8_T != 0
- static const PredType& NATIVE_INT_LEAST8;
+ static const PredType& NATIVE_INT_LEAST8;
#endif /* H5_SIZEOF_INT_LEAST8_T */
#if H5_SIZEOF_UINT_LEAST8_T != 0
- static const PredType& NATIVE_UINT_LEAST8;
+ static const PredType& NATIVE_UINT_LEAST8;
#endif /* H5_SIZEOF_UINT_LEAST8_T */
#if H5_SIZEOF_INT_LEAST16_T != 0
- static const PredType& NATIVE_INT_LEAST16;
+ static const PredType& NATIVE_INT_LEAST16;
#endif /* H5_SIZEOF_INT_LEAST16_T */
#if H5_SIZEOF_UINT_LEAST16_T != 0
- static const PredType& NATIVE_UINT_LEAST16;
+ static const PredType& NATIVE_UINT_LEAST16;
#endif /* H5_SIZEOF_UINT_LEAST16_T */
#if H5_SIZEOF_INT_LEAST32_T != 0
- static const PredType& NATIVE_INT_LEAST32;
+ static const PredType& NATIVE_INT_LEAST32;
#endif /* H5_SIZEOF_INT_LEAST32_T */
#if H5_SIZEOF_UINT_LEAST32_T != 0
- static const PredType& NATIVE_UINT_LEAST32;
+ static const PredType& NATIVE_UINT_LEAST32;
#endif /* H5_SIZEOF_UINT_LEAST32_T */
#if H5_SIZEOF_INT_LEAST64_T != 0
- static const PredType& NATIVE_INT_LEAST64;
+ static const PredType& NATIVE_INT_LEAST64;
#endif /* H5_SIZEOF_INT_LEAST64_T */
#if H5_SIZEOF_UINT_LEAST64_T != 0
- static const PredType& NATIVE_UINT_LEAST64;
+ static const PredType& NATIVE_UINT_LEAST64;
#endif /* H5_SIZEOF_UINT_LEAST64_T */
// FAST types
#if H5_SIZEOF_INT_FAST8_T != 0
- static const PredType& NATIVE_INT_FAST8;
+ static const PredType& NATIVE_INT_FAST8;
#endif /* H5_SIZEOF_INT_FAST8_T */
#if H5_SIZEOF_UINT_FAST8_T != 0
- static const PredType& NATIVE_UINT_FAST8;
+ static const PredType& NATIVE_UINT_FAST8;
#endif /* H5_SIZEOF_UINT_FAST8_T */
#if H5_SIZEOF_INT_FAST16_T != 0
- static const PredType& NATIVE_INT_FAST16;
+ static const PredType& NATIVE_INT_FAST16;
#endif /* H5_SIZEOF_INT_FAST16_T */
#if H5_SIZEOF_UINT_FAST16_T != 0
- static const PredType& NATIVE_UINT_FAST16;
+ static const PredType& NATIVE_UINT_FAST16;
#endif /* H5_SIZEOF_UINT_FAST16_T */
#if H5_SIZEOF_INT_FAST32_T != 0
- static const PredType& NATIVE_INT_FAST32;
+ static const PredType& NATIVE_INT_FAST32;
#endif /* H5_SIZEOF_INT_FAST32_T */
#if H5_SIZEOF_UINT_FAST32_T != 0
- static const PredType& NATIVE_UINT_FAST32;
+ static const PredType& NATIVE_UINT_FAST32;
#endif /* H5_SIZEOF_UINT_FAST32_T */
#if H5_SIZEOF_INT_FAST64_T != 0
- static const PredType& NATIVE_INT_FAST64;
+ static const PredType& NATIVE_INT_FAST64;
#endif /* H5_SIZEOF_INT_FAST64_T */
#if H5_SIZEOF_UINT_FAST64_T != 0
- static const PredType& NATIVE_UINT_FAST64;
+ static const PredType& NATIVE_UINT_FAST64;
#endif /* H5_SIZEOF_UINT_FAST64_T */
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -242,11 +240,11 @@ class H5_DLLCPP PredType : public AtomType {
static const PredType& PREDTYPE_CONST; // dummy constant
protected:
- // Default constructor
- PredType();
+ // Default constructor
+ PredType();
- // Creates a pre-defined type using an HDF5 pre-defined constant
- PredType( const hid_t predtype_id ); // used by the library only
+ // Creates a pre-defined type using an HDF5 pre-defined constant
+ PredType(const hid_t predtype_id); // used by the library only
private:
// Activates the creation of the PredType global constants
@@ -438,6 +436,7 @@ class H5_DLLCPP PredType : public AtomType {
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+}; // end of PredType
+} // namespace H5
+
#endif // __H5PredType_H
diff --git a/c++/src/H5PropList.cpp b/c++/src/H5PropList.cpp
index b6f7c15..dd7b21a 100644
--- a/c++/src/H5PropList.cpp
+++ b/c++/src/H5PropList.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef OLD_HEADER_FILENAME
@@ -21,11 +19,11 @@
#include <string>
+#include "H5private.h" // for HDmemset
#include "H5Include.h"
#include "H5Exception.h"
#include "H5IdComponent.h"
#include "H5PropList.h"
-#include "H5private.h" // for HDfree
namespace H5 {
@@ -42,7 +40,7 @@ PropList* PropList::DEFAULT_ = 0;
//--------------------------------------------------------------------------
// Function: PropList::getConstant
-// Purpose: Creates a PropList object representing the HDF5 constant
+// Purpose Creates a PropList object representing the HDF5 constant
// H5P_DEFAULT, pointed to by PropList::DEFAULT_.
// Exception H5::PropListIException
// Description
@@ -71,7 +69,7 @@ PropList* PropList::getConstant()
//--------------------------------------------------------------------------
// Function: PropList::deleteConstants
-// Purpose: Deletes the constant object that PropList::DEFAULT_ points to.
+// Purpose Deletes the constant object that PropList::DEFAULT_ points to.
// Programmer Binh-Minh Ribler - 2015
//--------------------------------------------------------------------------
void PropList::deleteConstants()
@@ -81,24 +79,24 @@ void PropList::deleteConstants()
}
//--------------------------------------------------------------------------
-// Purpose Constant for default property.
+// Purpose Constant for default property.
//--------------------------------------------------------------------------
const PropList& PropList::DEFAULT = *getConstant();
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function Default constructor
-///\brief Default constructor: creates a stub property list object.
-// Programmer Binh-Minh Ribler - 2000
+// Function: Default constructor
+///\brief Default constructor: creates a stub property list object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
PropList::PropList() : IdComponent(), id(H5P_DEFAULT) {}
//--------------------------------------------------------------------------
-// Function: PropList copy constructor
-///\brief Copy constructor
-///\param original - IN: The original property list to copy
-// Programmer Binh-Minh Ribler - 2000
+// Function: PropList copy constructor
+///\brief Copy constructor
+///\param original - IN: The original property list to copy
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
PropList::PropList(const PropList& original) : IdComponent(), id(original.id)
{
@@ -106,166 +104,166 @@ PropList::PropList(const PropList& original) : IdComponent(), id(original.id)
}
//--------------------------------------------------------------------------
-// Function: PropList overloaded constructor
-///\brief Creates a property list using the id of an existing property.
-///\param plist_id - IN: Id of the existing property list
-///\exception H5::PropListIException
+// Function: PropList overloaded constructor
+///\brief Creates a property list using the id of an existing property.
+///\param plist_id - IN: Id of the existing property list
+///\exception H5::PropListIException
// Description
-// This function creates a new property list if a property
-// class is provided or makes a copy of a property list if one
-// is given. If the given id is anything else, then set this
-// property's id to H5P_DEFAULT.
-// Programmer Binh-Minh Ribler - 2000
+// This function creates a new property list if a property
+// class is provided or makes a copy of a property list if one
+// is given. If the given id is anything else, then set this
+// property's id to H5P_DEFAULT.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-PropList::PropList( const hid_t plist_id ) : IdComponent()
+PropList::PropList(const hid_t plist_id) : IdComponent()
{
if (plist_id <= 0)
- id = H5P_DEFAULT;
+ id = H5P_DEFAULT;
H5I_type_t id_type = H5Iget_type(plist_id);
switch (id_type) {
- case H5I_GENPROP_CLS:
- // call C routine to create a new property from the given prop class
- id = H5Pcreate(plist_id);
- if( id < 0 )
- {
- throw PropListIException("PropList constructor", "H5Pcreate failed");
- }
- break;
- case H5I_GENPROP_LST:
- // call C routine to make a copy of the given property list
- id = H5Pcopy(plist_id);
- if( id < 0 )
- {
- throw PropListIException("PropList constructor", "H5Pcopy failed");
- }
- break;
- default:
- id = H5P_DEFAULT;
- break;
+ case H5I_GENPROP_CLS:
+ // call C routine to create a new property from the given prop class
+ id = H5Pcreate(plist_id);
+ if (id < 0)
+ {
+ throw PropListIException("PropList constructor", "H5Pcreate failed");
+ }
+ break;
+ case H5I_GENPROP_LST:
+ // call C routine to make a copy of the given property list
+ id = H5Pcopy(plist_id);
+ if (id < 0)
+ {
+ throw PropListIException("PropList constructor", "H5Pcopy failed");
+ }
+ break;
+ default:
+ id = H5P_DEFAULT;
+ break;
}
}
//--------------------------------------------------------------------------
-// Function: PropList::copy
-///\brief Makes a copy of an existing property list.
-///\param like_plist - IN: Reference to the existing property list
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: PropList::copy
+///\brief Makes a copy of an existing property list.
+///\param like_plist - IN: Reference to the existing property list
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Jun 1, 2004
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Jun 1, 2004
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
-void PropList::copy( const PropList& like_plist )
+void PropList::copy(const PropList& like_plist)
{
// If this object is representing an hdf5 object, close it before
// copying like_plist to it
try {
- close();
+ close();
}
catch (Exception& close_error) {
- throw PropListIException(inMemFunc("copy"), close_error.getDetailMsg());
+ throw PropListIException(inMemFunc("copy"), close_error.getDetailMsg());
}
// call C routine to copy the property list
- id = H5Pcopy( like_plist.getId() );
- if( id < 0 )
- throw PropListIException(inMemFunc("copy"), "H5Pcopy failed");
+ id = H5Pcopy(like_plist.getId());
+ if (id < 0)
+ throw PropListIException(inMemFunc("copy"), "H5Pcopy failed");
}
//--------------------------------------------------------------------------
-// Function: PropList::operator=
-///\brief Assignment operator.
-///\param rhs - IN: Reference to the existing property list
-///\return Reference to PropList instance
-///\exception H5::PropListIException
+// Function: PropList::operator=
+///\brief Assignment operator.
+///\param rhs - IN: Reference to the existing property list
+///\return Reference to PropList instance
+///\exception H5::PropListIException
// Description
-// Makes a copy of the property list on the right hand side
-// and stores the new id in the left hand side object.
-// Programmer Binh-Minh Ribler - 2000
+// Makes a copy of the property list on the right hand side
+// and stores the new id in the left hand side object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-PropList& PropList::operator=( const PropList& rhs )
+PropList& PropList::operator=(const PropList& rhs)
{
if (this != &rhs)
- copy(rhs);
+ copy(rhs);
return(*this);
}
//--------------------------------------------------------------------------
-// Function: PropList::copyProp
-///\brief Copies a property from this property list or class to another
-///\param dest - IN: Destination property list or class
-///\param name - IN: Name of the property to copy - \c char pointer
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - Jul, 2005
+// Function: PropList::copyProp
+///\brief Copies a property from this property list or class to another
+///\param dest - IN: Destination property list or class
+///\param name - IN: Name of the property to copy - \c char pointer
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Jul, 2005
//--------------------------------------------------------------------------
void PropList::copyProp(PropList& dest, const char *name) const
{
- hid_t dst_id = dest.getId();
- herr_t ret_value = H5Pcopy_prop(dst_id, id, name);
- if( ret_value < 0 )
- {
- throw PropListIException(inMemFunc("copyProp"), "H5Pcopy_prop failed");
- }
+ hid_t dst_id = dest.getId();
+ herr_t ret_value = H5Pcopy_prop(dst_id, id, name);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("copyProp"), "H5Pcopy_prop failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: PropList::copyProp
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param dest - IN: Destination property list or class
-///\param name - IN: Name of the property to copy - \c H5std_string
-// Programmer Binh-Minh Ribler - Jul, 2005
+// Function: PropList::copyProp
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param dest - IN: Destination property list or class
+///\param name - IN: Name of the property to copy - \c H5std_string
+// Programmer Binh-Minh Ribler - Jul, 2005
//--------------------------------------------------------------------------
-void PropList::copyProp( PropList& dest, const H5std_string& name ) const
+void PropList::copyProp(PropList& dest, const H5std_string& name) const
{
- copyProp( dest, name.c_str());
+ copyProp(dest, name.c_str());
}
//--------------------------------------------------------------------------
-// Function: PropList::copyProp
-///\brief Copies a property from one list or class to another - Obsolete
-///\param dest - IN: Destination property list or class
-///\param src - IN: Source property list or class
-///\param name - IN: Name of the property to copy - \c char pointer
-///\note This member function will be removed in the next release
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: PropList::copyProp
+///\brief Copies a property from one list or class to another - Obsolete
+///\param dest - IN: Destination property list or class
+///\param src - IN: Source property list or class
+///\param name - IN: Name of the property to copy - \c char pointer
+///\note This member function will be removed in the next release
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void PropList::copyProp( PropList& dest, PropList& src, const char *name ) const
+void PropList::copyProp(PropList& dest, PropList& src, const char *name) const
{
- hid_t dst_id = dest.getId();
- hid_t src_id = src.getId();
- herr_t ret_value = H5Pcopy_prop(dst_id, src_id, name);
- if( ret_value < 0 )
- {
- throw PropListIException(inMemFunc("copyProp"), "H5Pcopy_prop failed");
- }
+ hid_t dst_id = dest.getId();
+ hid_t src_id = src.getId();
+ herr_t ret_value = H5Pcopy_prop(dst_id, src_id, name);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("copyProp"), "H5Pcopy_prop failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: PropList::copyProp
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts. - Obsolete
-///\param dest - IN: Destination property list or class
-///\param src - IN: Source property list or class
-///\param name - IN: Name of the property to copy - \c H5std_string
-// Programmer Binh-Minh Ribler - 2000
+// Function: PropList::copyProp
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts. - Obsolete
+///\param dest - IN: Destination property list or class
+///\param src - IN: Source property list or class
+///\param name - IN: Name of the property to copy - \c H5std_string
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void PropList::copyProp( PropList& dest, PropList& src, const H5std_string& name ) const
+void PropList::copyProp(PropList& dest, PropList& src, const H5std_string& name) const
{
- copyProp( dest, src, name.c_str());
+ copyProp(dest, src, name.c_str());
}
//--------------------------------------------------------------------------
// Function: PropList::getId
-///\brief Get the id of this property list
-///\return Property list identifier
+///\brief Get the id of this property list
+///\return Property list identifier
// Description:
// Class hierarchy is revised to address bugzilla 1068. Class
// AbstractDS and Attribute are moved out of H5Object. In
@@ -275,7 +273,7 @@ void PropList::copyProp( PropList& dest, PropList& src, const H5std_string& name
//--------------------------------------------------------------------------
hid_t PropList::getId() const
{
- return(id);
+ return(id);
}
#ifndef DOXYGEN_SHOULD_SKIP_THIS
@@ -307,444 +305,500 @@ void PropList::p_setId(const hid_t new_id)
#endif // DOXYGEN_SHOULD_SKIP_THIS
//--------------------------------------------------------------------------
-// Function: PropList::close
-///\brief Closes the property list if it is not a default one.
+// Function: PropList::close
+///\brief Closes the property list if it is not a default one.
///
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - Mar 9, 2005
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - Mar 9, 2005
//--------------------------------------------------------------------------
void PropList::close()
{
if (p_valid_id(id))
{
- herr_t ret_value = H5Pclose( id );
- if( ret_value < 0 )
- {
- throw PropListIException(inMemFunc("close"), "H5Pclose failed");
- }
- // reset the id
- id = H5I_INVALID_HID;
+ herr_t ret_value = H5Pclose(id);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("close"), "H5Pclose failed");
+ }
+ // reset the id
+ id = H5I_INVALID_HID;
}
}
//--------------------------------------------------------------------------
-// Function: PropList::getClass
-///\brief Returns the class of this property list, i.e. \c H5P_FILE_CREATE...
-///\return The property list class if it is not equal to \c H5P_ROOT
-///\exception H5::PropListIException
-// Programmer Binh-Minh Ribler - April, 2004
+// Function: PropList::getClass
+///\brief Returns the class of this property list, i.e. \c H5P_FILE_CREATE...
+///\return The property list class if it is not equal to \c H5P_ROOT
+///\exception H5::PropListIException
+// Programmer Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
hid_t PropList::getClass() const
{
- hid_t plist_class = H5Pget_class( id );
- if( plist_class == H5P_ROOT )
- {
- throw PropListIException(inMemFunc("getClass"),
- "H5Pget_class failed - returned H5P_ROOT");
- }
- return( plist_class );
+ hid_t plist_class = H5Pget_class(id);
+ if (plist_class == H5P_ROOT)
+ {
+ throw PropListIException(inMemFunc("getClass"),
+ "H5Pget_class failed - returned H5P_ROOT");
+ }
+ return(plist_class);
}
//--------------------------------------------------------------------------
-// Function: PropList::propExist
-///\brief Query the existance of a property in a property object.
-///\param name - IN: Name of property to check for - \c char pointer
-///\return true if the property exists in the property object, and
-/// false, otherwise.
-///\exception H5::PropListIException
+// Function: PropList::propExist
+///\brief Queries the existence of a property in a property object.
+///\param name - IN: Name of property to check for - \c char pointer
+///\return true if the property exists in the property object, and
+/// false, otherwise.
+///\exception H5::PropListIException
///\par Description
-/// This routine checks if a property exists within a property
-/// list or class.
+/// This routine checks if a property exists within a property
+/// list or class.
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
-bool PropList::propExist(const char* name ) const
-{
- // Calls C routine H5Pexist to determine whether a property exists
- // within a property list or class. It returns a positive value, 0,
- // or a negative value
- htri_t ret_value = H5Pexist(id, name);
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else // Raise exception when H5Pexist returns a negative value
- {
- throw PropListIException(inMemFunc("propExist"), "H5Pexist failed");
- }
-}
-//--------------------------------------------------------------------------
-// Function: PropList::propExist
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to check for - \c H5std_string
+bool PropList::propExist(const char* name) const
+{
+ // Calls C routine H5Pexist to determine whether a property exists
+ // within a property list or class. It returns a positive value, 0,
+ // or a negative value
+ htri_t ret_value = H5Pexist(id, name);
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else // Raise exception when H5Pexist returns a negative value
+ {
+ throw PropListIException(inMemFunc("propExist"), "H5Pexist failed");
+ }
+}
+//--------------------------------------------------------------------------
+// Function: PropList::propExist
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to check for - \c H5std_string
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
-bool PropList::propExist(const H5std_string& name ) const
+bool PropList::propExist(const H5std_string& name) const
{
- return( propExist( name.c_str()) );
+ return(propExist( name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: PropList::closeClass
-///\brief Close a property list class.
+// Function: PropList::closeClass
+///\brief Close a property list class.
///
-///\exception H5::PropListIException
+///\exception H5::PropListIException
///\par Description
-/// Releases memory and detaches a class from the property
-/// list class hierarchy.
+/// Releases memory and detaches a class from the property
+/// list class hierarchy.
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void PropList::closeClass() const
{
- herr_t ret_value = H5Pclose_class(id);
- if( ret_value < 0 )
- {
- throw PropListIException(inMemFunc("closeClass"), "H5Pclose_class failed");
- }
+ herr_t ret_value = H5Pclose_class(id);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("closeClass"), "H5Pclose_class failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: PropList::getProperty
-///\brief Query the value of a property in a property list.
-///\param name - IN: Name of property to query - \c char pointer
-///\param value - OUT: Pointer to the buffer for the property value
-///\exception H5::PropListIException
+// Function: PropList::getProperty
+///\brief Query the value of a property in a property list.
+///\param name - IN: Name of property to query - \c char pointer
+///\param value - OUT: Pointer to the buffer for the property value
+///\exception H5::PropListIException
///\par Description
-/// Retrieves a copy of the value for a property in a property
-/// list. The property name must exist or this routine will
-/// throw an exception.
+/// Retrieves a copy of the value for a property in a property
+/// list. The property name must exist or this routine will
+/// throw an exception.
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void PropList::getProperty(const char* name, void* value) const
{
- herr_t ret_value = H5Pget(id, name, value);
- if (ret_value < 0)
- {
- throw PropListIException(inMemFunc("getProperty"), "H5Pget failed");
- }
+ herr_t ret_value = H5Pget(id, name, value);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("getProperty"), "H5Pget failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: PropList::getProperty
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to query - \c char pointer
-///\return The property that is a \c H5std_string.
-///\exception H5::PropListIException
+// Function: PropList::getProperty
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to query - \c char pointer
+///\return The property that is a \c H5std_string.
+///\exception H5::PropListIException
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
H5std_string PropList::getProperty(const char* name) const
{
- // Get property size first
- size_t size = getPropSize(name);
+ // Get property size first
+ size_t size = getPropSize(name);
- // Allocate buffer then get the property
- char* prop_strg_C = new char[size+1]; // temporary C-string for C API
- HDmemset(prop_strg_C, 0, size+1); // clear buffer
+ // Allocate buffer then get the property
+ char* prop_strg_C = new char[size+1]; // temporary C-string for C API
+ HDmemset(prop_strg_C, 0, size+1); // clear buffer
- herr_t ret_value = H5Pget(id, name, prop_strg_C); // call C API
+ herr_t ret_value = H5Pget(id, name, prop_strg_C); // call C API
- // Throw exception if H5Pget returns failure
- if (ret_value < 0)
- {
+ // Throw exception if H5Pget returns failure
+ if (ret_value < 0)
+ {
delete []prop_strg_C;
- throw PropListIException(inMemFunc("getProperty"), "H5Pget failed");
- }
+ throw PropListIException(inMemFunc("getProperty"), "H5Pget failed");
+ }
- // Return propety value as a string after deleting temp C-string
- H5std_string prop_strg(prop_strg_C);
- delete []prop_strg_C;
- return (prop_strg);
+ // Return propety value as a string after deleting temp C-string
+ H5std_string prop_strg(prop_strg_C);
+ delete []prop_strg_C;
+ return (prop_strg);
}
//--------------------------------------------------------------------------
-// Function: PropList::getProperty
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to query - \c H5std_string
-///\param value - OUT: Pointer to the buffer for the property value
+// Function: PropList::getProperty
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to query - \c H5std_string
+///\param value - OUT: Pointer to the buffer for the property value
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void PropList::getProperty(const H5std_string& name, void* value) const
{
- getProperty(name.c_str(), value);
+ getProperty(name.c_str(), value);
}
//--------------------------------------------------------------------------
-// Function: PropList::getProperty
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to query - \c H5std_string
-///\return The property that is a \c H5std_string.
+// Function: PropList::getProperty
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to query - \c H5std_string
+///\return The property that is a \c H5std_string.
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
H5std_string PropList::getProperty(const H5std_string& name) const
{
- return (getProperty(name.c_str()));
+ return (getProperty(name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: PropList::getPropSize
-///\brief Query the size of a property in a property list or class.
-///\param name - IN: Name of property to query
-///\return Size of the property
-///\exception H5::PropListIException
+// Function: PropList::getPropSize
+///\brief Query the size of a property in a property list or class.
+///\param name - IN: Name of property to query
+///\return Size of the property
+///\exception H5::PropListIException
///\par Description
-/// This routine retrieves the size of a property's value
-/// in bytes. Zero-sized properties are allowed and the return
-/// value will be of 0. This function works for both property
-/// lists and classes.
+/// This routine retrieves the size of a property's value
+/// in bytes. Zero-sized properties are allowed and the return
+/// value will be of 0. This function works for both property
+/// lists and classes.
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
size_t PropList::getPropSize(const char *name) const
{
- size_t prop_size;
- herr_t ret_value = H5Pget_size(id, name, &prop_size);
- if (ret_value < 0)
- {
- throw PropListIException(inMemFunc("getPropSize"), "H5Pget_size failed");
- }
- return(prop_size);
+ size_t prop_size;
+ herr_t ret_value = H5Pget_size(id, name, &prop_size);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("getPropSize"), "H5Pget_size failed");
+ }
+ return(prop_size);
}
//--------------------------------------------------------------------------
-// Function: PropList::getPropSize
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to query - \c H5std_string
+// Function: PropList::getPropSize
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to query - \c H5std_string
///
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
size_t PropList::getPropSize(const H5std_string& name) const
{
- return (getPropSize(name.c_str()));
+ return (getPropSize(name.c_str()));
}
//--------------------------------------------------------------------------
-// Function: PropList::getClassName
-///\brief Return the name of a generic property list class.
-///\return A string containing the class name, if success, otherwise,
-/// a NULL string.
+// Function: PropList::getClassName
+///\brief Return the name of a generic property list class.
+///\return A string containing the class name, if success, otherwise,
+/// a NULL string.
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
H5std_string PropList::getClassName() const
{
- char* temp_str;
- temp_str = H5Pget_class_name(id); // this API specified that temp_str must
- // be freed.
-
- if (temp_str != NULL)
- {
- H5std_string class_name(temp_str);
- H5free_memory(temp_str);
- return(class_name);
- }
- else
- return 0;
+ char* temp_str;
+ temp_str = H5Pget_class_name(id);
+ if (temp_str != NULL)
+ {
+ H5std_string class_name(temp_str);
+ H5free_memory(temp_str);
+ return(class_name);
+ }
+ else
+ return 0;
}
//--------------------------------------------------------------------------
-// Function: PropList::getNumProps
-///\brief Returns the number of properties in this property list or class.
-///\return Size of the property.
-///\exception H5::PropListIException
+// Function: PropList::getNumProps
+///\brief Returns the number of properties in this property list or class.
+///\return Size of the property.
+///\exception H5::PropListIException
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
size_t PropList::getNumProps() const
{
- size_t nprops;
- herr_t ret_value = H5Pget_nprops (id, &nprops);
- if (ret_value < 0)
- {
- throw PropListIException(inMemFunc("getNumProps"), "H5Pget_nprops failed");
- }
- return (nprops);
+ size_t nprops;
+ herr_t ret_value = H5Pget_nprops (id, &nprops);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("getNumProps"), "H5Pget_nprops failed");
+ }
+ return (nprops);
}
//--------------------------------------------------------------------------
-// Function: PropList::setProperty
-///\brief Set a property's value in a property list.
-///\param name - IN: Name of property to set - \c char pointer
-///\param value - IN: Void pointer to the value for the property
-///\exception H5::PropListIException
+// Function: PropList::setProperty
+///\brief Set a property's value in a property list.
+///\param name - IN: Name of property to set - \c char pointer
+///\param value - IN: Void pointer to the value for the property
+///\exception H5::PropListIException
+// Description
+// Revision svn r29815 changed 'value' to const, hence, deprecated
+// the non-const setProperty.
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
+void PropList::setProperty(const char* name, const void* value) const
+{
+ herr_t ret_value = H5Pset(id, name, value);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("setProperty"), "H5Pset failed");
+ }
+}
+
+//--------------------------------------------------------------------------
+// Function: PropList::setProperty
+///\brief Deprecated due to missing const in prototype. (1.10.1)
+// Programmer: Binh-Minh Ribler - March, 2017
+// Modification
+// Planned for removal. -BMR, 2017/03/17 1.10.1
+//--------------------------------------------------------------------------
void PropList::setProperty(const char* name, void* value) const
{
- herr_t ret_value = H5Pset(id, name, value);
- if (ret_value < 0)
- {
- throw PropListIException(inMemFunc("setProperty"), "H5Pset failed");
- }
+ herr_t ret_value = H5Pset(id, name, value);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("setProperty"), "H5Pset failed");
+ }
}
+
//--------------------------------------------------------------------------
-// Function: PropList::setProperty
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to set - \c char pointer
-///\param charptr - IN: Char pointer to the value for the property
+// Function: PropList::setProperty
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to set - \c char pointer
+///\param charptr - IN: Char pointer to the value for the property
+// Description
+// Revision svn r29815 changed 'value' to const, hence, deprecated
+// the non-const setProperty.
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void PropList::setProperty(const char* name, const char* charptr) const
{
- herr_t ret_value = H5Pset(id, name, (void*)charptr);
- if (ret_value < 0)
- {
- throw PropListIException(inMemFunc("setProperty"), "H5Pset failed");
- }
+ herr_t ret_value = H5Pset(id, name, (const void*)charptr);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("setProperty"), "H5Pset failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: PropList::setProperty
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to set - \c char pointer
-///\param strg - IN: Value for the property is a \c H5std_string
+// Function: PropList::setProperty
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to set - \c char pointer
+///\param strg - IN: Value for the property is a \c H5std_string
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
+void PropList::setProperty(const char* name, const H5std_string& strg) const
+{
+ setProperty(name, strg.c_str());
+}
+
+//--------------------------------------------------------------------------
+// Function: PropList::setProperty
+///\brief Deprecated due to missing const in prototype. (1.10.1)
+// Programmer: Binh-Minh Ribler - March, 2017
+// Modification
+// Planned for removal. -BMR, 2017/03/17 1.10.1
+//--------------------------------------------------------------------------
void PropList::setProperty(const char* name, H5std_string& strg) const
{
- setProperty(name, strg.c_str());
+ setProperty(name, strg.c_str());
}
//--------------------------------------------------------------------------
-// Function: PropList::setProperty
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to set - \c H5std_string
-///\param value - IN: Void pointer to the value for the property
+// Function: PropList::setProperty
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to set - \c H5std_string
+///\param value - IN: Void pointer to the value for the property
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
+void PropList::setProperty(const H5std_string& name, const void* value) const
+{
+ setProperty(name.c_str(), value);
+}
+
+//--------------------------------------------------------------------------
+// Function: PropList::setProperty
+///\brief Deprecated due to missing const in prototype. (1.10.1)
+// Programmer: Binh-Minh Ribler - March, 2017
+// Modification
+// Planned for removal. -BMR, 2017/03/17 1.10.1
+//--------------------------------------------------------------------------
void PropList::setProperty(const H5std_string& name, void* value) const
{
- setProperty(name.c_str(), value);
+ setProperty(name.c_str(), value);
}
//--------------------------------------------------------------------------
-// Function: PropList::setProperty
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to set - \c H5std_string
-///\param strg - IN: Value for the property is a \c H5std_string
+// Function: PropList::setProperty
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to set - \c H5std_string
+///\param strg - IN: Value for the property is a \c H5std_string
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
+void PropList::setProperty(const H5std_string& name, const H5std_string& strg) const
+{
+ setProperty(name.c_str(), strg.c_str());
+}
+
+//--------------------------------------------------------------------------
+// Function: PropList::setProperty
+///\brief Deprecated due to missing const in prototype. (1.10.1)
+// Programmer: Binh-Minh Ribler - March, 2017
+// Modification
+// Planned for removal. -BMR, 2017/03/17 1.10.1
+//--------------------------------------------------------------------------
void PropList::setProperty(const H5std_string& name, H5std_string& strg) const
{
- setProperty(name.c_str(), strg.c_str());
+ setProperty(name.c_str(), strg.c_str());
}
//--------------------------------------------------------------------------
-// Function: PropList::isAClass
-///\brief Determines whether a property list is a certain class.
-///\param prop_class - IN: Property class to query
-///\return true if the property list is a member of the property list
-/// class, and false, otherwise.
-///\exception H5::PropListIException
+// Function: PropList::isAClass
+///\brief Determines whether a property list is a certain class.
+///\param prop_class - IN: Property class to query
+///\return true if the property list is a member of the property list
+/// class, and false, otherwise.
+///\exception H5::PropListIException
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
bool PropList::isAClass(const PropList& prop_class) const
{
- htri_t ret_value = H5Pisa_class(id, prop_class.getId());
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else // Raise exception when H5Pisa_class returns a negative value
- {
- throw PropListIException(inMemFunc("isAClass"), "H5Pisa_class failed");
- }
-
+ htri_t ret_value = H5Pisa_class(id, prop_class.getId());
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else // Raise exception when H5Pisa_class returns a negative value
+ {
+ throw PropListIException(inMemFunc("isAClass"), "H5Pisa_class failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: PropList::removeProp
-///\brief Removes a property from a property list.
-///\param name - IN: Name of property to remove - \c char pointer
-///\exception H5::PropListIException
+// Function: PropList::removeProp
+///\brief Removes a property from a property list.
+///\param name - IN: Name of property to remove - \c char pointer
+///\exception H5::PropListIException
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void PropList::removeProp(const char *name) const
{
- herr_t ret_value = H5Premove(id, name);
- if (ret_value < 0)
- {
- throw PropListIException(inMemFunc("removeProp"), "H5Premove failed");
- }
+ herr_t ret_value = H5Premove(id, name);
+ if (ret_value < 0)
+ {
+ throw PropListIException(inMemFunc("removeProp"), "H5Premove failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: PropList::removeProp
-///\brief This is an overloaded member function, provided for convenience.
-/// It differs from the above function only in what arguments it
-/// accepts.
-///\param name - IN: Name of property to remove - \c H5std_string
+// Function: PropList::removeProp
+///\brief This is an overloaded member function, provided for convenience.
+/// It differs from the above function only in what arguments it
+/// accepts.
+///\param name - IN: Name of property to remove - \c H5std_string
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
void PropList::removeProp(const H5std_string& name) const
{
- removeProp(name.c_str());
+ removeProp(name.c_str());
}
//--------------------------------------------------------------------------
-// Function: PropList::operator==
-///\brief Compares this property list or class against the given list or class.
-///\param rhs - IN: Reference to the property list to compare
-///\return true if the property lists or classes are equal, and
-/// false, otherwise.
-///\exception H5::PropListIException
+// Function: PropList::operator==
+///\brief Compares this property list or class against the given list or class.
+///\param rhs - IN: Reference to the property list to compare
+///\return true if the property lists or classes are equal, and
+/// false, otherwise.
+///\exception H5::PropListIException
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
bool PropList::operator==(const PropList& rhs) const
{
- htri_t ret_value = H5Pequal(id, rhs.getId());
- if( ret_value > 0 )
- return true;
- else if( ret_value == 0 )
- return false;
- else // Raise exception when H5Pequal returns a negative value
- {
- throw PropListIException(inMemFunc("operator=="), "H5Pequal failed");
- }
+ htri_t ret_value = H5Pequal(id, rhs.getId());
+ if (ret_value > 0)
+ return true;
+ else if (ret_value == 0)
+ return false;
+ else // Raise exception when H5Pequal returns a negative value
+ {
+ throw PropListIException(inMemFunc("operator=="), "H5Pequal failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: PropList::getClassParent
-///\brief Returns the parent class of a generic property class
-///\return The parent class of a property class
-///\exception H5::PropListIException
+// Function: PropList::getClassParent
+///\brief Returns the parent class of a generic property class
+///\return The parent class of a property class
+///\exception H5::PropListIException
// Programmer: Binh-Minh Ribler - April, 2004
//--------------------------------------------------------------------------
PropList PropList::getClassParent() const
{
- hid_t class_id = H5Pget_class_parent(id);
- if (class_id < 0)
- {
- throw PropListIException(inMemFunc("getClassParent"), "H5Pget_class_parent failed");
- }
- PropList pclass(class_id);
- return(pclass);
+ hid_t class_id = H5Pget_class_parent(id);
+ if (class_id < 0)
+ {
+ throw PropListIException(inMemFunc("getClassParent"), "H5Pget_class_parent failed");
+ }
+ PropList pclass(class_id);
+ return(pclass);
}
//--------------------------------------------------------------------------
-// Function: PropList destructor
-///\brief Properly terminates access to this property list.
-// Programmer Binh-Minh Ribler - 2000
+// Function: PropList destructor
+///\brief Properly terminates access to this property list.
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// - Replaced resetIdComponent() with decRefCount() to use C
-// library ID reference counting mechanism - BMR, Jun 1, 2004
-// - Replaced decRefCount with close() to let the C library
-// handle the reference counting - BMR, Jun 1, 2006
+// - Replaced resetIdComponent() with decRefCount() to use C
+// library ID reference counting mechanism - BMR, Jun 1, 2004
+// - Replaced decRefCount with close() to let the C library
+// handle the reference counting - BMR, Jun 1, 2006
//--------------------------------------------------------------------------
PropList::~PropList()
{
try {
- close();
+ close();
}
catch (Exception& close_error) {
- cerr << "PropList::~PropList - " << close_error.getDetailMsg() << endl;
+ cerr << "PropList::~PropList - " << close_error.getDetailMsg() << endl;
}
}
diff --git a/c++/src/H5PropList.h b/c++/src/H5PropList.h
index faaf68d..154d7b2 100644
--- a/c++/src/H5PropList.h
+++ b/c++/src/H5PropList.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5PropList_H
@@ -28,114 +26,120 @@ namespace H5 {
*/
class H5_DLLCPP PropList : public IdComponent {
public:
- ///\brief Default property list
+ ///\brief Default property list
static const PropList& DEFAULT;
- // Creates a property list of a given type or creates a copy of an
- // existing property list giving the property list id.
- PropList(const hid_t plist_id);
+ // Creates a property list of a given type or creates a copy of an
+ // existing property list giving the property list id.
+ PropList(const hid_t plist_id);
- // Make a copy of the given property list using assignment statement
- PropList& operator=( const PropList& rhs );
+ // Make a copy of the given property list using assignment statement
+ PropList& operator=(const PropList& rhs);
- // Compares this property list or class against the given list or class.
- bool operator==(const PropList& rhs) const;
+ // Compares this property list or class against the given list or class.
+ bool operator==(const PropList& rhs) const;
- // Close this property list.
- virtual void close();
+ // Close this property list.
+ virtual void close();
- // Close a property list class.
- void closeClass() const;
+ // Close a property list class.
+ void closeClass() const;
- // Makes a copy of the given property list.
- void copy( const PropList& like_plist );
+ // Makes a copy of the given property list.
+ void copy(const PropList& like_plist);
- // Copies a property from this property list or class to another
- void copyProp( PropList& dest, const char* name) const;
- void copyProp( PropList& dest, const H5std_string& name) const;
+ // Copies a property from this property list or class to another
+ void copyProp(PropList& dest, const char* name) const;
+ void copyProp(PropList& dest, const H5std_string& name) const;
- // Copies a property from one property list or property class to another
- void copyProp( PropList& dest, PropList& src, const char* name) const;
- void copyProp( PropList& dest, PropList& src, const H5std_string& name) const;
+ // Copies a property from one property list or property class to another
+ void copyProp(PropList& dest, PropList& src, const char* name) const;
+ void copyProp(PropList& dest, PropList& src, const H5std_string& name) const;
- // Gets the class of this property list, i.e. H5P_FILE_CREATE,
- // H5P_FILE_ACCESS, ...
- hid_t getClass() const;
+ // Gets the class of this property list, i.e. H5P_FILE_CREATE,
+ // H5P_FILE_ACCESS, ...
+ hid_t getClass() const;
- // Return the name of a generic property list class.
- H5std_string getClassName() const;
+ // Return the name of a generic property list class.
+ H5std_string getClassName() const;
- // Returns the parent class of a generic property class.
- PropList getClassParent() const;
+ // Returns the parent class of a generic property class.
+ PropList getClassParent() const;
- // Returns the number of properties in this property list or class.
- size_t getNumProps() const;
+ // Returns the number of properties in this property list or class.
+ size_t getNumProps() const;
- // Query the value of a property in a property list.
- void getProperty(const char* name, void* value) const;
- void getProperty(const H5std_string& name, void* value) const;
- H5std_string getProperty(const char* name) const;
- H5std_string getProperty(const H5std_string& name) const;
+ // Query the value of a property in a property list.
+ void getProperty(const char* name, void* value) const;
+ void getProperty(const H5std_string& name, void* value) const;
+ H5std_string getProperty(const char* name) const;
+ H5std_string getProperty(const H5std_string& name) const;
- // Set a property's value in a property list.
- void setProperty(const char* name, void* value) const;
- void setProperty(const char* name, const char* charptr) const;
- void setProperty(const char* name, H5std_string& strg) const;
- void setProperty(const H5std_string& name, void* value) const;
- void setProperty(const H5std_string& name, H5std_string& strg) const;
+ // Set a property's value in a property list.
+ void setProperty(const char* name, const char* charptr) const;
+ void setProperty(const char* name, const void* value) const;
+ void setProperty(const char* name, const H5std_string& strg) const;
+ void setProperty(const H5std_string& name, const void* value) const;
+ void setProperty(const H5std_string& name, const H5std_string& strg) const;
+ // Deprecated after 1.10.1, missing const
+ void setProperty(const char* name, void* value) const;
+ void setProperty(const char* name, H5std_string& strg) const;
+ void setProperty(const H5std_string& name, void* value) const;
+ void setProperty(const H5std_string& name, H5std_string& strg) const;
- // Query the size of a property in a property list or class.
- size_t getPropSize(const char *name) const;
- size_t getPropSize(const H5std_string& name) const;
+ // Query the size of a property in a property list or class.
+ size_t getPropSize(const char *name) const;
+ size_t getPropSize(const H5std_string& name) const;
- // Determines whether a property list is a certain class.
- bool isAClass(const PropList& prop_class) const;
+ // Determines whether a property list is a certain class.
+ bool isAClass(const PropList& prop_class) const;
- /// Query the existance of a property in a property object.
- bool propExist(const char* name) const;
- bool propExist(const H5std_string& name) const;
+ /// Query the existance of a property in a property object.
+ bool propExist(const char* name) const;
+ bool propExist(const H5std_string& name) const;
- // Removes a property from a property list.
- void removeProp(const char *name) const;
- void removeProp(const H5std_string& name) const;
+ // Removes a property from a property list.
+ void removeProp(const char *name) const;
+ void removeProp(const H5std_string& name) const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("PropList"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("PropList"); }
- // Default constructor: creates a stub PropList object.
- PropList();
+ // Default constructor: creates a stub PropList object.
+ PropList();
- // Copy constructor: creates a copy of a PropList object.
- PropList(const PropList& original);
+ // Copy constructor: creates a copy of a PropList object.
+ PropList(const PropList& original);
- // Gets the property list id.
- virtual hid_t getId() const;
+ // Gets the property list id.
+ virtual hid_t getId() const;
- // Destructor: properly terminates access to this property list.
- virtual ~PropList();
+ // Destructor: properly terminates access to this property list.
+ virtual ~PropList();
#ifndef DOXYGEN_SHOULD_SKIP_THIS
- // Deletes the PropList global constant
- static void deleteConstants();
+ // Deletes the PropList global constant
+ static void deleteConstants();
protected:
- hid_t id; // HDF5 property list id
+ hid_t id; // HDF5 property list id
- // Sets the property list id.
- virtual void p_setId(const hid_t new_id);
+ // Sets the property list id.
+ virtual void p_setId(const hid_t new_id);
private:
- static PropList* DEFAULT_;
+ static PropList* DEFAULT_;
- // Dynamically allocates the PropList global constant
- static PropList* getConstant();
+ // Dynamically allocates the PropList global constant
+ static PropList* getConstant();
- // Friend function to set PropList id. For library use only.
- friend void f_PropList_setId(PropList* plist, hid_t new_id);
+ // Friend function to set PropList id. For library use only.
+ friend void f_PropList_setId(PropList* plist, hid_t new_id);
#endif // DOXYGEN_SHOULD_SKIP_THIS
-};
-}
+}; // end of PropList
+} // namespace H5
+
#endif // __H5PropList_H
diff --git a/c++/src/H5StrType.cpp b/c++/src/H5StrType.cpp
index 962a5dc..d5fb744 100644
--- a/c++/src/H5StrType.cpp
+++ b/c++/src/H5StrType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -22,6 +20,7 @@
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
#include "H5DxferProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -35,213 +34,249 @@
namespace H5 {
//--------------------------------------------------------------------------
-// Function: StrType default constructor
-///\brief Default constructor: Creates a stub string datatype
-// Programmer Binh-Minh Ribler - 2000
+// Function: StrType default constructor
+///\brief Default constructor: Creates a stub string datatype
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
StrType::StrType() : AtomType() {}
//--------------------------------------------------------------------------
-// Function: StrType overloaded constructor
-///\brief Creates a string datatype using a predefined type.
-///\param pred_type - IN: Predefined datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: StrType overloaded constructor
+///\brief Creates a string datatype using a predefined type.
+///\param pred_type - IN: Predefined datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-StrType::StrType( const PredType& pred_type ) : AtomType()
+StrType::StrType(const PredType& pred_type) : AtomType()
{
- // use DataType::copy to make a copy of this predefined type
- copy( pred_type );
+ // use DataType::copy to make a copy of this predefined type
+ copy(pred_type);
}
//--------------------------------------------------------------------------
-// Function: StrType overloaded constructor
-// Purpose Creates a string datatype with a specified length
-// Param pred_type - IN: String predefined type to replicate.
-// Param size - IN: Length of the new string type
-// Exception H5::DataTypeIException
+// Function: StrType overloaded constructor
+// Purpose Creates a string datatype with a specified length
+// Param pred_type - IN: String predefined type to replicate.
+// Param size - IN: Length of the new string type
+// Exception H5::DataTypeIException
// Description
-// The 1st argument could have been skipped, but this
-// constructor will collide with the one that takes an
-// existing id.
+// The 1st argument could have been skipped, but this
+// constructor will collide with the one that takes an
+// existing id.
//
-// Update: replacing the 1st argument with a dummy 0 to
-// avoid the clashing problem, that doesn't eliminate the
-// the 1st argument but it's simpler for the user to type
-// a '0' than PredType::C_S1. - Dec 2, 2005
+// Update: replacing the 1st argument with a dummy 0 to
+// avoid the clashing problem, that doesn't eliminate the
+// the 1st argument but it's simpler for the user to type
+// a '0' than PredType::C_S1. - Dec 2, 2005
// Note
-// The use of this constructor can be shortened by using
-// its overloaded below as StrType(0, size).
-// Programmer Binh-Minh Ribler - 2000
+// The use of this constructor can be shortened by using
+// its overloaded below as StrType(0, size).
+// Programmer Binh-Minh Ribler - 2000
// Modification
-// Planned for removal. -BMR, 2005/12/02
-// Removed from documentation. -BMR, 2016/03/07
+// Planned for removal. -BMR, 2005/12/02
+// Removed from documentation. -BMR, 2016/03/07
//--------------------------------------------------------------------------
-StrType::StrType( const PredType& pred_type, const size_t& size ) : AtomType()
+StrType::StrType(const PredType& pred_type, const size_t& size) : AtomType()
{
- // use DataType::copy to make a copy of the string predefined type
- // then set its length
- copy(pred_type);
- setSize(size);
+ // use DataType::copy to make a copy of the string predefined type
+ // then set its length
+ copy(pred_type);
+ setSize(size);
}
//--------------------------------------------------------------------------
-// Function: StrType overloaded constructor
-///\brief Creates a string datatype with a specified length
-///\param dummy - IN: To simplify calling the previous constructor
-/// and avoid prototype clash with another constructor
-///\param size - IN: Length of the new string type
-///\exception H5::DataTypeIException
+// Function: StrType overloaded constructor
+///\brief Creates a string datatype with a specified length
+///\param dummy - IN: To simplify calling the previous constructor
+/// and avoid prototype clash with another constructor
+///\param size - IN: Length of the new string type
+///\exception H5::DataTypeIException
///\par Description
-/// The 1st argument is just a dummy to simplify calling the
-/// previous constructor, such as:
-/// StrType atype(0, size) instead of
-/// StrType atype(PredType::C_S1, size)
+/// The 1st argument is just a dummy to simplify calling the
+/// previous constructor, such as:
+/// StrType atype(0, size) instead of
+/// StrType atype(PredType::C_S1, size)
// Note
-// This constructor replaced the previous one.
-// Programmer Binh-Minh Ribler - Nov 28, 2005
+// This constructor replaced the previous one.
+// Programmer Binh-Minh Ribler - Nov 28, 2005
//--------------------------------------------------------------------------
-StrType::StrType( const int dummy, const size_t& size ) : AtomType()
+StrType::StrType(const int dummy, const size_t& size) : AtomType()
{
- // use DataType::copy to make a copy of the string predefined type
- // then set its length
- copy(PredType::C_S1);
- setSize(size);
+ // use DataType::copy to make a copy of the string predefined type
+ // then set its length
+ copy(PredType::C_S1);
+ setSize(size);
}
//--------------------------------------------------------------------------
-// Function: StrType overloaded constructor
-///\brief Creates an StrType object using the id of an existing datatype.
-///\param existing_id - IN: Id of an existing datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: StrType overloaded constructor
+///\brief Creates an StrType object using the id of an existing datatype.
+///\param existing_id - IN: Id of an existing datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-StrType::StrType( const hid_t existing_id ) : AtomType( existing_id ) {}
+StrType::StrType(const hid_t existing_id) : AtomType( existing_id ) {}
//--------------------------------------------------------------------------
-// Function: StrType copy constructor
-///\brief Copy constructor: makes a copy of the original StrType object.
-// Programmer Binh-Minh Ribler - 2000
+// Function: StrType copy constructor
+///\brief Copy constructor: makes a copy of the original StrType object.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-StrType::StrType( const StrType& original ) : AtomType ( original ) {}
+StrType::StrType(const StrType& original) : AtomType ( original ) {}
//--------------------------------------------------------------------------
-// Function: StrType overloaded constructor
-///\brief Gets the string datatype of the specified dataset
-///\param dataset - IN: Dataset that this string datatype associates with
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: StrType overloaded constructor
+///\brief Gets the string datatype of the specified dataset
+///\param dataset - IN: Dataset that this string datatype associates with
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-StrType::StrType( const DataSet& dataset ) : AtomType ()
+StrType::StrType(const DataSet& dataset) : AtomType ()
{
- // Calls C function H5Dget_type to get the id of the datatype
- id = H5Dget_type( dataset.getId() );
+ // Calls C function H5Dget_type to get the id of the datatype
+ id = H5Dget_type(dataset.getId());
- if( id < 0 )
- {
- throw DataSetIException("StrType constructor", "H5Dget_type failed");
- }
+ if (id < 0)
+ {
+ throw DataSetIException("StrType constructor", "H5Dget_type failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: StrType::getCset
-///\brief Retrieves the character set type of this string datatype.
-///\return Character set type, which can be:
-/// \li \c H5T_CSET_ASCII (0) - Character set is US ASCII.
+// Function: StrType overloaded constructor
+///\brief Creates an StrType instance by opening an HDF5 string datatype
+/// given its name, provided as a C character string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: String type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openStrType(const char*) to
+// improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+StrType::StrType(const H5Location& loc, const char *dtype_name) : AtomType()
+{
+ id = p_opentype(loc, dtype_name);
+}
+
+//--------------------------------------------------------------------------
+// Function: StrType overloaded constructor
+///\brief Creates an StrType instance by opening an HDF5 string datatype
+/// given its name, provided as an \c H5std_string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: String type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openStrType(const H5std_string&)
+// to improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+StrType::StrType(const H5Location& loc, const H5std_string& dtype_name) : AtomType()
+{
+ id = p_opentype(loc, dtype_name.c_str());
+}
+
+//--------------------------------------------------------------------------
+// Function: StrType::getCset
+///\brief Retrieves the character set type of this string datatype.
+///\return Character set type, which can be:
+/// \li \c H5T_CSET_ASCII (0) - Character set is US ASCII.
///\note
-/// ASCII and UTF-8 Unicode are the only currently supported character
-/// encodings. Extended ASCII encodings (for example, ISO 8859) are not
-/// supported. This encoding policy is not enforced by the HDF5 Library.
-/// Using encodings other than ASCII and UTF-8 can lead to compatibility
-/// and usability problems. See the C API entry H5Pset_char_encoding for
-/// more information.
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+/// ASCII and UTF-8 Unicode are the only currently supported character
+/// encodings. Extended ASCII encodings (for example, ISO 8859) are not
+/// supported. This encoding policy is not enforced by the HDF5 Library.
+/// Using encodings other than ASCII and UTF-8 can lead to compatibility
+/// and usability problems. See the C API entry H5Pset_char_encoding for
+/// more information.
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5T_cset_t StrType::getCset() const
{
- H5T_cset_t cset = H5Tget_cset( id );
-
- // Returns a valid character set type if successful
- if( cset == H5T_CSET_ERROR )
- {
- throw DataTypeIException("StrType::getCset", "H5Tget_cset failed");
- }
- return( cset );
+ H5T_cset_t cset = H5Tget_cset(id);
+
+ // Returns a valid character set type if successful
+ if (cset == H5T_CSET_ERROR)
+ {
+ throw DataTypeIException("StrType::getCset", "H5Tget_cset failed");
+ }
+ return(cset);
}
//--------------------------------------------------------------------------
-// Function: StrType::setCset
-///\brief Sets character set to be used.
-///\param cset - IN: character set type, which can be:
-/// \li \c H5T_CSET_ASCII (0) - Character set is US ASCII.
+// Function: StrType::setCset
+///\brief Sets character set to be used.
+///\param cset - IN: character set type, which can be:
+/// \li \c H5T_CSET_ASCII (0) - Character set is US ASCII.
///\note
-/// ASCII and UTF-8 Unicode are the only currently supported character
-/// encodings. Extended ASCII encodings (for example, ISO 8859) are not
-/// supported. This encoding policy is not enforced by the HDF5 Library.
-/// Using encodings other than ASCII and UTF-8 can lead to compatibility
-/// and usability problems. See the C API entry H5Pset_char_encoding for
-/// more information.
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
-//--------------------------------------------------------------------------
-void StrType::setCset( H5T_cset_t cset ) const
+/// ASCII and UTF-8 Unicode are the only currently supported character
+/// encodings. Extended ASCII encodings (for example, ISO 8859) are not
+/// supported. This encoding policy is not enforced by the HDF5 Library.
+/// Using encodings other than ASCII and UTF-8 can lead to compatibility
+/// and usability problems. See the C API entry H5Pset_char_encoding for
+/// more information.
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
+//--------------------------------------------------------------------------
+void StrType::setCset(H5T_cset_t cset) const
{
- herr_t ret_value = H5Tset_cset( id, cset );
-
- if( ret_value < 0 )
- {
- throw DataTypeIException("StrType::setCset", "H5Tset_cset failed");
- }
+ herr_t ret_value = H5Tset_cset(id, cset);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("StrType::setCset", "H5Tset_cset failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: StrType::getStrpad
-///\brief Retrieves the storage mechanism for of this string datatype.
-///\return String storage mechanism, which can be:
-/// \li \c H5T_STR_NULLTERM (0) - Null terminate (as C does)
-/// \li \c H5T_STR_NULLPAD (0) - Pad with zeros
-/// \li \c H5T_STR_SPACEPAD (0) - pad with spaces (as FORTRAN does)
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - 2000
+// Function: StrType::getStrpad
+///\brief Retrieves the storage mechanism for of this string datatype.
+///\return String storage mechanism, which can be:
+/// \li \c H5T_STR_NULLTERM (0) - Null terminate (as C does)
+/// \li \c H5T_STR_NULLPAD (0) - Pad with zeros
+/// \li \c H5T_STR_SPACEPAD (0) - pad with spaces (as FORTRAN does)
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
H5T_str_t StrType::getStrpad() const
{
- H5T_str_t strpad = H5Tget_strpad( id );
-
- // Returns a valid string padding type if successful
- if( strpad == H5T_STR_ERROR )
- {
- throw DataTypeIException("StrType::getStrpad",
- "H5Tget_strpad failed - returned H5T_STR_ERROR");
- }
- return( strpad );
+ H5T_str_t strpad = H5Tget_strpad(id);
+
+ // Returns a valid string padding type if successful
+ if (strpad == H5T_STR_ERROR)
+ {
+ throw DataTypeIException("StrType::getStrpad",
+ "H5Tget_strpad failed - returned H5T_STR_ERROR");
+ }
+ return(strpad);
}
//--------------------------------------------------------------------------
-// Function: StrType::setStrpad
-///\brief Defines the storage mechanism for this string datatype.
-///\param strpad - IN: String padding type
-///\exception H5::DataTypeIException
+// Function: StrType::setStrpad
+///\brief Defines the storage mechanism for this string datatype.
+///\param strpad - IN: String padding type
+///\exception H5::DataTypeIException
///\par Description
-/// For detail, please refer to the C layer Reference Manual at:
+/// For detail, please refer to the C layer Reference Manual at:
/// http://www.hdfgroup.org/HDF5/doc/RM/RM_H5T.html#Datatype-SetStrpad
-// Programmer Binh-Minh Ribler - 2000
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
-void StrType::setStrpad( H5T_str_t strpad ) const
+void StrType::setStrpad(H5T_str_t strpad) const
{
- herr_t ret_value = H5Tset_strpad( id, strpad );
-
- if( ret_value < 0 )
- {
- throw DataTypeIException("StrType::setStrpad", "H5Tset_strpad failed");
- }
+ herr_t ret_value = H5Tset_strpad(id, strpad);
+ if (ret_value < 0)
+ {
+ throw DataTypeIException("StrType::setStrpad", "H5Tset_strpad failed");
+ }
}
//--------------------------------------------------------------------------
-// Function: StrType destructor
-///\brief Properly terminates access to this string datatype.
-// Programmer Binh-Minh Ribler - 2000
+// Function: StrType destructor
+///\brief Properly terminates access to this string datatype.
+// Programmer Binh-Minh Ribler - 2000
//--------------------------------------------------------------------------
StrType::~StrType() {}
diff --git a/c++/src/H5StrType.h b/c++/src/H5StrType.h
index 5d223b3..abac8de 100644
--- a/c++/src/H5StrType.h
+++ b/c++/src/H5StrType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5StrType_H
@@ -27,44 +25,50 @@ namespace H5 {
*/
class H5_DLLCPP StrType : public AtomType {
public:
- // Creates a string type using a predefined type
- StrType(const PredType& pred_type);
+ // Creates a string type using a predefined type
+ StrType(const PredType& pred_type);
- // Creates a string type with specified length - may be obsolete
- StrType(const PredType& pred_type, const size_t& size);
+ // Creates a string type with specified length - may be obsolete
+ StrType(const PredType& pred_type, const size_t& size);
- // Creates a string type with specified length
- StrType(const int dummy, const size_t& size);
+ // Creates a string type with specified length
+ StrType(const int dummy, const size_t& size);
// Gets the string datatype of the specified dataset
- StrType(const DataSet& dataset);
+ StrType(const DataSet& dataset);
- // Retrieves the character set type of this string datatype.
- H5T_cset_t getCset() const;
+ // Constructors that open an HDF5 string datatype, given a location.
+ StrType(const H5Location& loc, const char* name);
+ StrType(const H5Location& loc, const H5std_string& name);
- // Sets character set to be used.
- void setCset(H5T_cset_t cset) const;
+ // Retrieves the character set type of this string datatype.
+ H5T_cset_t getCset() const;
- // Retrieves the string padding method for this string datatype.
- H5T_str_t getStrpad() const;
+ // Sets character set to be used.
+ void setCset(H5T_cset_t cset) const;
- // Defines the storage mechanism for character strings.
- void setStrpad(H5T_str_t strpad) const;
+ // Retrieves the string padding method for this string datatype.
+ H5T_str_t getStrpad() const;
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("StrType"); }
+ // Defines the storage mechanism for character strings.
+ void setStrpad(H5T_str_t strpad) const;
- // default constructor
- StrType();
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("StrType"); }
- // Creates a string datatype using an existing id
- StrType(const hid_t existing_id);
+ // default constructor
+ StrType();
- // Copy constructor - makes a copy of the original object
- StrType(const StrType& original);
+ // Creates a string datatype using an existing id
+ StrType(const hid_t existing_id);
+
+ // Copy constructor - makes a copy of the original object
+ StrType(const StrType& original);
+
+ // Noop destructor.
+ virtual ~StrType();
+
+}; // end of StrType
+} // namespace H5
- // Noop destructor.
- virtual ~StrType();
-};
-}
#endif // __H5StrType_H
diff --git a/c++/src/H5VarLenType.cpp b/c++/src/H5VarLenType.cpp
index fa3a18f..22e1a66 100644
--- a/c++/src/H5VarLenType.cpp
+++ b/c++/src/H5VarLenType.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string>
@@ -21,6 +19,7 @@
#include "H5PropList.h"
#include "H5OcreatProp.h"
#include "H5DcreatProp.h"
+#include "H5LaccProp.h"
#include "H5Location.h"
#include "H5Object.h"
#include "H5DataType.h"
@@ -29,52 +28,90 @@
namespace H5 {
//--------------------------------------------------------------------------
-// Function: VarLenType default constructor
-///\brief Default constructor: Creates a stub variable-length datatype.
+// Function: VarLenType default constructor
+///\brief Default constructor: Creates a stub variable-length datatype.
//--------------------------------------------------------------------------
VarLenType::VarLenType() : DataType() {}
//--------------------------------------------------------------------------
-// Function: VarLenType overloaded constructor
-///\brief Creates an VarLenType object using an existing id.
-///\param existing_id - IN: Id of an existing datatype
-///\exception H5::DataTypeIException
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: VarLenType overloaded constructor
+///\brief Creates an VarLenType object using an existing id.
+///\param existing_id - IN: Id of an existing datatype
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
VarLenType::VarLenType(const hid_t existing_id) : DataType(existing_id) {}
//--------------------------------------------------------------------------
-// Function: VarLenType copy constructor
-///\brief Copy constructor: makes a copy of the original VarLenType object.
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: VarLenType copy constructor
+///\brief Copy constructor: makes a copy of the original VarLenType object.
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
VarLenType::VarLenType(const VarLenType& original) : DataType(original) {}
//--------------------------------------------------------------------------
-// Function: VarLenType overloaded constructor
-///\brief Creates a new variable-length datatype based on the specified
-/// \a base_type.
-///\param base_type - IN: Pointer to existing datatype
-///\exception H5::DataTypeIException
+// Function: VarLenType overloaded constructor
+///\brief Creates a new variable-length datatype based on the specified
+/// \a base_type.
+///\param base_type - IN: Pointer to existing datatype
+///\exception H5::DataTypeIException
// Description
-// DataType passed by pointer to avoid clashing with copy
-// constructor.
-// Programmer Binh-Minh Ribler - May, 2004
+// DataType passed by pointer to avoid clashing with copy
+// constructor.
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
VarLenType::VarLenType(const DataType* base_type) : DataType()
{
- id = H5Tvlen_create(base_type->getId());
- if (id < 0)
- {
- throw DataTypeIException("VarLenType constructor",
+ id = H5Tvlen_create(base_type->getId());
+ if (id < 0)
+ {
+ throw DataTypeIException("VarLenType constructor",
"H5Tvlen_create returns negative value");
- }
+ }
}
//--------------------------------------------------------------------------
-// Function: VarLenType destructor
-///\brief Properly terminates access to this datatype.
-// Programmer Binh-Minh Ribler - May, 2004
+// Function: VarLenType overloaded constructor
+///\brief Creates an VarLenType instance by opening an HDF5 variable
+/// length datatype given its name, provided as a C char*.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Variable length type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openVarLenType(const char*) to
+// improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+VarLenType::VarLenType(const H5Location& loc, const char *dtype_name) : DataType()
+{
+ id = p_opentype(loc, dtype_name);
+}
+
+//--------------------------------------------------------------------------
+// Function: VarLenType overloaded constructor
+///\brief Creates an VarLenType instance by opening an HDF5 variable
+/// length datatype given its name, provided as an \c H5std_string.
+///\param loc - IN: Location of the type
+///\param dtype_name - IN: Variable length type name
+///\exception H5::DataTypeIException
+// Programmer Binh-Minh Ribler - Dec 2016
+// Description
+// In 1.10.1, this constructor was introduced and may replace the
+// existing function CommonFG::openVarLenType(const H5std_string&)
+// to improve usability.
+// -BMR, Dec 2016
+//--------------------------------------------------------------------------
+VarLenType::VarLenType(const H5Location& loc, const H5std_string& dtype_name) : DataType()
+{
+ id = p_opentype(loc, dtype_name.c_str());
+}
+
+//--------------------------------------------------------------------------
+// Function: VarLenType destructor
+///\brief Properly terminates access to this datatype.
+// Programmer Binh-Minh Ribler - May, 2004
//--------------------------------------------------------------------------
VarLenType::~VarLenType() {}
diff --git a/c++/src/H5VarLenType.h b/c++/src/H5VarLenType.h
index d7141b0..4048a4e 100644
--- a/c++/src/H5VarLenType.h
+++ b/c++/src/H5VarLenType.h
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef __H5VarLenType_H
@@ -27,24 +25,30 @@ namespace H5 {
*/
class H5_DLLCPP VarLenType : public DataType {
public:
- // Constructor that creates a variable-length datatype based
- // on the specified base type.
- VarLenType(const DataType* base_type);
+ // Constructor that creates a variable-length datatype based
+ // on the specified base type.
+ VarLenType(const DataType* base_type);
- ///\brief Returns this class name.
- virtual H5std_string fromClass () const { return("VarLenType"); }
+ ///\brief Returns this class name.
+ virtual H5std_string fromClass () const { return("VarLenType"); }
- // Copy constructor: makes copy of the original object.
- VarLenType( const VarLenType& original );
+ // Copy constructor: makes copy of the original object.
+ VarLenType(const VarLenType& original);
- // Constructor that takes an existing id
- VarLenType( const hid_t existing_id );
+ // Constructor that takes an existing id
+ VarLenType(const hid_t existing_id);
- // Noop destructor
- virtual ~VarLenType();
+ // Constructors that open a variable-length datatype, given a location.
+ VarLenType(const H5Location& loc, const char* name);
+ VarLenType(const H5Location& loc, const H5std_string& name);
+
+ // Noop destructor
+ virtual ~VarLenType();
+
+ // Default constructor
+ VarLenType();
+
+}; // end of VarLenType
+} // namespace H5
- // Default constructor
- VarLenType();
-};
-}
#endif // __H5VarLenType_H
diff --git a/c++/src/Makefile.am b/c++/src/Makefile.am
index 6cd4768..efe17dc 100644
--- a/c++/src/Makefile.am
+++ b/c++/src/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -33,25 +31,28 @@ libhdf5_cpp_la_LDFLAGS= -version-info $(LT_CXX_VERS_INTERFACE):$(LT_CXX_VERS_REV
bin_SCRIPTS=h5c++
# Source files for the library
-libhdf5_cpp_la_SOURCES=H5Exception.cpp H5IdComponent.cpp H5Library.cpp \
- H5Attribute.cpp H5Location.cpp H5Object.cpp H5PropList.cpp \
- H5FaccProp.cpp H5FcreatProp.cpp H5DcreatProp.cpp H5DxferProp.cpp \
- H5OcreatProp.cpp H5DataType.cpp H5DataSpace.cpp H5AbstractDs.cpp \
- H5AtomType.cpp H5PredType.cpp H5EnumType.cpp H5IntType.cpp \
- H5FloatType.cpp H5StrType.cpp H5ArrayType.cpp H5VarLenType.cpp \
- H5CompType.cpp H5DataSet.cpp H5CommonFG.cpp H5Group.cpp H5File.cpp
+libhdf5_cpp_la_SOURCES=H5Exception.cpp H5IdComponent.cpp \
+ H5DataSpace.cpp H5PropList.cpp H5Library.cpp \
+ H5FaccProp.cpp H5FcreatProp.cpp H5LaccProp.cpp \
+ H5DxferProp.cpp H5DcreatProp.cpp H5Location.cpp \
+ H5AbstractDs.cpp H5Attribute.cpp H5Object.cpp \
+ H5OcreatProp.cpp H5DataType.cpp H5AtomType.cpp \
+ H5PredType.cpp H5EnumType.cpp H5IntType.cpp \
+ H5FloatType.cpp H5StrType.cpp H5ArrayType.cpp \
+ H5VarLenType.cpp H5CompType.cpp H5DataSet.cpp \
+ H5CommonFG.cpp H5Group.cpp H5File.cpp
# HDF5 C++ library depends on HDF5 Library.
libhdf5_cpp_la_LIBADD=$(LIBHDF5)
# Public headers
include_HEADERS=H5Cpp.h H5AbstractDs.h H5AtomType.h H5Attribute.h H5Classes.h \
- H5CommonFG.h H5CompType.h H5DataSet.h H5DataSpace.h H5DataType.h \
- H5OcreatProp.h H5DcreatProp.h H5DxferProp.h H5EnumType.h \
+ H5CommonFG.h H5CompType.h H5DataSet.h H5DataSpace.h H5DataType.h \
+ H5OcreatProp.h H5DcreatProp.h H5DxferProp.h H5EnumType.h \
H5Exception.h H5FaccProp.h H5FcreatProp.h H5File.h H5FloatType.h \
- H5Group.h H5IdComponent.h H5Include.h H5IntType.h H5Library.h \
- H5Location.h H5Object.h H5PredType.h H5PropList.h H5StrType.h \
- H5CppDoc.h H5ArrayType.h H5VarLenType.h
+ H5Group.h H5IdComponent.h H5Include.h H5IntType.h H5LaccProp.h \
+ H5Library.h H5Location.h H5Object.h H5PredType.h H5PropList.h \
+ H5StrType.h H5CppDoc.h H5ArrayType.h H5VarLenType.h
# h5c++ and libhdf5.settings are generated during configure. Remove only when
# distclean.
diff --git a/c++/src/cpp_doc_config b/c++/src/cpp_doc_config
index 5986eaa..ce23041 100644
--- a/c++/src/cpp_doc_config
+++ b/c++/src/cpp_doc_config
@@ -38,7 +38,7 @@ PROJECT_NAME = "HDF5 C++ API"
# could be handy for archiving the generated documentation or if some version
# control system is used.
-PROJECT_NUMBER = "1.9.236 currently under development"
+PROJECT_NUMBER = "1.11.0"
# Using the PROJECT_BRIEF tag one can provide an optional one line description
# for a project that appears at the top of each page and should give viewer a
diff --git a/c++/src/h5c++.in b/c++/src/h5c++.in
index 7f3c3ce..00502d9 100644
--- a/c++/src/h5c++.in
+++ b/c++/src/h5c++.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
############################################################################
diff --git a/c++/src/header.html b/c++/src/header.html
index c3018d7..cb42565 100644
--- a/c++/src/header.html
+++ b/c++/src/header.html
@@ -12,12 +12,10 @@ xmlns="http://www.w3.org/TR/REC-html40">
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
-->
diff --git a/c++/test/CMakeLists.txt b/c++/test/CMakeLists.txt
index 75ea500..65815f9 100644
--- a/c++/test/CMakeLists.txt
+++ b/c++/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_CPP_TEST)
# --------------------------------------------------------------------
# Notes: When creating unit test executables they should be prefixed
diff --git a/c++/test/CMakeTests.cmake b/c++/test/CMakeTests.cmake
index 9bcc706..6de801e 100644
--- a/c++/test/CMakeTests.cmake
+++ b/c++/test/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
HDFTEST_COPY_FILE("${HDF5_CPP_TEST_SOURCE_DIR}/th5s.h5" "${PROJECT_BINARY_DIR}/th5s.h5" "cpp_testhdf5_files")
add_custom_target(cpp_testhdf5_files ALL COMMENT "Copying files needed by cpp_testhdf5 tests" DEPENDS ${cpp_testhdf5_files_list})
@@ -20,7 +31,20 @@ add_test (
titerate.h5
)
-add_test (NAME CPP_testhdf5 COMMAND $<TARGET_FILE:cpp_testhdf5>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME CPP_testhdf5 COMMAND $<TARGET_FILE:cpp_testhdf5>)
+else ()
+ add_test (NAME CPP_testhdf5 COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:cpp_testhdf5>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=cpp_testhdf5.txt"
+ #-D "TEST_REFERENCE=cpp_testhdf5.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
set_tests_properties (CPP_testhdf5 PROPERTIES DEPENDS CPP_testhdf5-clear-objects)
if (HDF5_TEST_VFD)
@@ -36,9 +60,9 @@ if (HDF5_TEST_VFD)
if (DIRECT_VFD)
set (VFD_LIST ${VFD_LIST} direct)
- endif (DIRECT_VFD)
+ endif ()
- MACRO (ADD_VFD_TEST vfdname resultcode)
+ macro (ADD_VFD_TEST vfdname resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/${vfdname}")
add_test (
@@ -66,12 +90,12 @@ if (HDF5_TEST_VFD)
)
set_tests_properties (CPP_VFD-${vfdname}-cpp_testhdf5 PROPERTIES DEPENDS CPP_VFD-${vfdname}-cpp_testhdf5-clear-objects)
set_tests_properties (CPP_VFD-${vfdname}-cpp_testhdf5 PROPERTIES TIMEOUT 30)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_VFD_TEST)
+ endif ()
+ endmacro ()
# Run test with different Virtual File Driver
foreach (vfd ${VFD_LIST})
ADD_VFD_TEST (${vfd} 0)
- endforeach (vfd ${VFD_LIST})
+ endforeach ()
-endif (HDF5_TEST_VFD)
+endif ()
diff --git a/c++/test/H5srcdir_str.h.in b/c++/test/H5srcdir_str.h.in
index d472124..bab1df3 100644
--- a/c++/test/H5srcdir_str.h.in
+++ b/c++/test/H5srcdir_str.h.in
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* If you are reading this file and it has a '.h' suffix, it was automatically
diff --git a/c++/test/Makefile.am b/c++/test/Makefile.am
index da0a864..0fc17fc 100644
--- a/c++/test/Makefile.am
+++ b/c++/test/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/c++/test/dsets.cpp b/c++/test/dsets.cpp
index 8752744..a765fb1 100644
--- a/c++/test/dsets.cpp
+++ b/c++/test/dsets.cpp
@@ -5,23 +5,21 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
FILE
dsets.cpp - HDF5 C++ testing the functionalities associated with the
- C dataset interface (H5D)
+ C dataset interface (H5D)
EXTERNAL ROUTINES/VARIABLES:
These routines are in the test directory of the C library:
- h5_reset() -- in h5test.c, resets the library by closing it
- h5_fileaccess() -- in h5test.c, returns a file access template
+ h5_reset() -- in h5test.c, resets the library by closing it
+ h5_fileaccess() -- in h5test.c, returns a file access template
***************************************************************************/
@@ -34,20 +32,20 @@ using std::cerr;
using std::endl;
#include <string>
-#include "H5Cpp.h" // C++ API header file
+#include "H5Cpp.h" // C++ API header file
using namespace H5;
#include "h5test.h"
-#include "h5cpputil.h" // C++ utilility header file
+#include "h5cpputil.h" // C++ utilility header file
-const H5std_string FILE1("dataset.h5");
-const H5std_string DSET_DEFAULT_NAME("default");
-const H5std_string DSET_DEFAULT_NAME_PATH("/default");
-const H5std_string DSET_CHUNKED_NAME("chunked");
-const H5std_string DSET_SIMPLE_IO_NAME("simple_io");
-const H5std_string DSET_TCONV_NAME ("tconv");
-const H5std_string DSET_COMPRESS_NAME("compressed");
-const H5std_string DSET_BOGUS_NAME ("bogus");
+const H5std_string FILE1("dataset.h5");
+const H5std_string DSET_DEFAULT_NAME("default");
+const H5std_string DSET_DEFAULT_NAME_PATH("/default");
+const H5std_string DSET_CHUNKED_NAME("chunked");
+const H5std_string DSET_SIMPLE_IO_NAME("simple_io");
+const H5std_string DSET_TCONV_NAME("tconv");
+const H5std_string DSET_COMPRESS_NAME("compressed");
+const H5std_string DSET_BOGUS_NAME("bogus");
/* Temporary filter IDs used for testing */
const int H5Z_FILTER_BOGUS = 305;
@@ -58,16 +56,16 @@ static size_t filter_bogus(unsigned int flags, size_t cd_nelmts,
/*-------------------------------------------------------------------------
- * Function: test_create
+ * Function: test_create
*
- * Purpose: Attempts to create a dataset.
+ * Purpose Attempts to create a dataset.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C version)
- * Friday, January 5, 2001
+ * Programmer Binh-Minh Ribler (using C version)
+ * Friday, January 5, 2001
*
* Modifications:
*
@@ -81,125 +79,125 @@ test_create( H5File& file)
// Setting this to NULL for cleaning up in failure situations
DataSet *dataset = NULL;
try {
- // Create a data space
- hsize_t dims[2];
- dims[0] = 256;
- dims[1] = 512;
- DataSpace space (2, dims, NULL);
-
- // Create a dataset using the default dataset creation properties.
- // We're not sure what they are, so we won't check.
- dataset = new DataSet (file.createDataSet
- (DSET_DEFAULT_NAME, PredType::NATIVE_DOUBLE, space));
-
-
- // Add a comment to the dataset
- file.setComment (DSET_DEFAULT_NAME, "This is a dataset");
-
- // Close the dataset
- delete dataset;
- dataset = NULL;
-
- // Try creating a dataset that already exists. This should fail since a
- // dataset can only be created once. If an exception is not thrown for
- // this action by createDataSet, then throw an invalid action exception.
- try {
- dataset = new DataSet (file.createDataSet
- (DSET_DEFAULT_NAME, PredType::NATIVE_DOUBLE, space));
-
- // continuation here, that means no exception has been thrown
- throw InvalidActionException("H5File::createDataSet", "Library allowed overwrite of existing dataset");
- }
- catch (FileIException& E) // catching invalid creating dataset
- {} // do nothing, exception expected
-
- // Open the dataset we created above and then close it. This is one
- // way to open an existing dataset for accessing.
- dataset = new DataSet (file.openDataSet (DSET_DEFAULT_NAME));
-
- // Get and verify the name of this dataset, using
- // H5std_string getObjName()
- H5std_string ds_name = dataset->getObjName();
- verify_val(ds_name, DSET_DEFAULT_NAME_PATH, "DataSet::getObjName", __LINE__, __FILE__);
-
- // Get and verify the comment from this dataset, using
- // H5std_string getComment(const H5std_string& name, <buf_size=0, by default>)
- H5std_string comment = file.getComment(DSET_DEFAULT_NAME);
- verify_val(comment, "This is a dataset", "DataSet::getComment", __LINE__, __FILE__);
-
- // Close the dataset when accessing is completed
- delete dataset;
-
- // This is another way to open an existing dataset for accessing.
- DataSet another_dataset(file.openDataSet (DSET_DEFAULT_NAME));
-
- // Try opening a non-existent dataset. This should fail so if an
- // exception is not thrown for this action by openDataSet, then
- // display failure information and throw an exception.
- try {
- dataset = new DataSet (file.openDataSet( "does_not_exist" ));
-
- // continuation here, that means no exception has been thrown
- throw InvalidActionException("H5File::openDataSet", "Attempted to open a non-existent dataset");
- }
- catch (FileIException& E ) // catching creating non-existent dataset
- {} // do nothing, exception expected
-
- // Create a new dataset that uses chunked storage instead of the default
- // layout.
- DSetCreatPropList create_parms;
- hsize_t csize[2];
- csize[0] = 5;
- csize[1] = 100;
- create_parms.setChunk( 2, csize );
-
- dataset = new DataSet (file.createDataSet
- (DSET_CHUNKED_NAME, PredType::NATIVE_DOUBLE, space, create_parms));
- // Note: this one has no error message in C when failure occurs?
-
- // clean up and return with success
- delete dataset;
-
- PASSED();
- return 0;
- } // outer most try block
+ // Create a data space
+ hsize_t dims[2];
+ dims[0] = 256;
+ dims[1] = 512;
+ DataSpace space (2, dims, NULL);
+
+ // Create a dataset using the default dataset creation properties.
+ // We're not sure what they are, so we won't check.
+ dataset = new DataSet (file.createDataSet
+ (DSET_DEFAULT_NAME, PredType::NATIVE_DOUBLE, space));
+
+
+ // Add a comment to the dataset
+ file.setComment (DSET_DEFAULT_NAME, "This is a dataset");
+
+ // Close the dataset
+ delete dataset;
+ dataset = NULL;
+
+ // Try creating a dataset that already exists. This should fail since a
+ // dataset can only be created once. If an exception is not thrown for
+ // this action by createDataSet, then throw an invalid action exception.
+ try {
+ dataset = new DataSet (file.createDataSet
+ (DSET_DEFAULT_NAME, PredType::NATIVE_DOUBLE, space));
+
+ // continuation here, that means no exception has been thrown
+ throw InvalidActionException("H5File::createDataSet", "Library allowed overwrite of existing dataset");
+ }
+ catch (FileIException& E) // catching invalid creating dataset
+ {} // do nothing, exception expected
+
+ // Open the dataset we created above and then close it. This is one
+ // way to open an existing dataset for accessing.
+ dataset = new DataSet (file.openDataSet (DSET_DEFAULT_NAME));
+
+ // Get and verify the name of this dataset, using
+ // H5std_string getObjName()
+ H5std_string ds_name = dataset->getObjName();
+ verify_val(ds_name, DSET_DEFAULT_NAME_PATH, "DataSet::getObjName", __LINE__, __FILE__);
+
+ // Get and verify the comment from this dataset, using
+ // H5std_string getComment(const H5std_string& name, <buf_size=0, by default>)
+ H5std_string comment = file.getComment(DSET_DEFAULT_NAME);
+ verify_val(comment, "This is a dataset", "DataSet::getComment", __LINE__, __FILE__);
+
+ // Close the dataset when accessing is completed
+ delete dataset;
+
+ // This is another way to open an existing dataset for accessing.
+ DataSet another_dataset(file.openDataSet (DSET_DEFAULT_NAME));
+
+ // Try opening a non-existent dataset. This should fail so if an
+ // exception is not thrown for this action by openDataSet, then
+ // display failure information and throw an exception.
+ try {
+ dataset = new DataSet (file.openDataSet( "does_not_exist" ));
+
+ // continuation here, that means no exception has been thrown
+ throw InvalidActionException("H5File::openDataSet", "Attempted to open a non-existent dataset");
+ }
+ catch (FileIException& E ) // catching creating non-existent dataset
+ {} // do nothing, exception expected
+
+ // Create a new dataset that uses chunked storage instead of the default
+ // layout.
+ DSetCreatPropList create_parms;
+ hsize_t csize[2];
+ csize[0] = 5;
+ csize[1] = 100;
+ create_parms.setChunk( 2, csize );
+
+ dataset = new DataSet (file.createDataSet(DSET_CHUNKED_NAME,
+ PredType::NATIVE_DOUBLE, space, create_parms));
+ // Note: this one has no error message in C when failure occurs?
+
+ // clean up and return with success
+ delete dataset;
+
+ PASSED();
+ return 0;
+ } // outer most try block
catch (InvalidActionException& E)
{
- cerr << " FAILED" << endl;
- cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
- // clean up and return with failure
- if (dataset != NULL)
- delete dataset;
- return -1;
+ // clean up and return with failure
+ if (dataset != NULL)
+ delete dataset;
+ return -1;
}
// catch all other exceptions
catch (Exception& E)
{
- issue_fail_msg("test_create", __LINE__, __FILE__);
+ issue_fail_msg("test_create", __LINE__, __FILE__);
- // clean up and return with failure
- if (dataset != NULL)
- delete dataset;
- return -1;
+ // clean up and return with failure
+ if (dataset != NULL)
+ delete dataset;
+ return -1;
}
} // test_create
/*-------------------------------------------------------------------------
- * Function: test_simple_io
+ * Function: test_simple_io
*
- * Purpose: Tests simple I/O. That is, reading and writing a complete
- * multi-dimensional array without data type or data space
- * conversions, without compression, and stored contiguously.
+ * Purpose Tests simple I/O. That is, reading and writing a complete
+ * multi-dimensional array without data type or data space
+ * conversions, without compression, and stored contiguously.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C version)
- * Friday, January 5, 2001
+ * Programmer Binh-Minh Ribler (using C version)
+ * Friday, January 5, 2001
*
* Modifications:
*
@@ -211,81 +209,81 @@ test_simple_io( H5File& file)
SUBTEST("Simple I/O");
- int points[100][200];
- int check[100][200];
- int i, j, n;
+ int points[100][200];
+ int check[100][200];
+ int i, j, n;
// Initialize the dataset
for (i = n = 0; i < 100; i++)
{
- for (j = 0; j < 200; j++) {
- points[i][j] = n++;
- }
+ for (j = 0; j < 200; j++) {
+ points[i][j] = n++;
+ }
}
char* tconv_buf = new char [1000];
try
{
- // Create the data space
- hsize_t dims[2];
- dims[0] = 100;
- dims[1] = 200;
- DataSpace space (2, dims, NULL);
-
- // Create a small conversion buffer to test strip mining
- DSetMemXferPropList xfer;
-
- xfer.setBuffer (1000, tconv_buf, NULL);
-
- // Create the dataset
- DataSet dataset (file.createDataSet (DSET_SIMPLE_IO_NAME, PredType::NATIVE_INT, space));
-
- // Write the data to the dataset
- dataset.write(static_cast<void*>(points), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- // Read the dataset back
- dataset.read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- // Check that the values read are the same as the values written
- for (i = 0; i < 100; i++)
- for (j = 0; j < 200; j++)
- {
- int status = check_values (i, j, points[i][j], check[i][j]);
- if (status == -1)
- throw Exception("DataSet::read");
- }
-
- // clean up and return with success
- delete [] tconv_buf;
- PASSED();
- return 0;
+ // Create the data space
+ hsize_t dims[2];
+ dims[0] = 100;
+ dims[1] = 200;
+ DataSpace space (2, dims, NULL);
+
+ // Create a small conversion buffer to test strip mining
+ DSetMemXferPropList xfer;
+
+ xfer.setBuffer (1000, tconv_buf, NULL);
+
+ // Create the dataset
+ DataSet dataset (file.createDataSet (DSET_SIMPLE_IO_NAME, PredType::NATIVE_INT, space));
+
+ // Write the data to the dataset
+ dataset.write(static_cast<void*>(points), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ // Read the dataset back
+ dataset.read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ // Check that the values read are the same as the values written
+ for (i = 0; i < 100; i++)
+ for (j = 0; j < 200; j++)
+ {
+ int status = check_values (i, j, points[i][j], check[i][j]);
+ if (status == -1)
+ throw Exception("DataSet::read");
+ }
+
+ // clean up and return with success
+ delete [] tconv_buf;
+ PASSED();
+ return 0;
} // end try
// catch all dataset, space, plist exceptions
catch (Exception& E)
{
- cerr << " FAILED" << endl;
- cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
- // clean up and return with failure
- if (tconv_buf)
- delete [] tconv_buf;
- return -1;
+ // clean up and return with failure
+ if (tconv_buf)
+ delete [] tconv_buf;
+ return -1;
}
} // test_simple_io
/*-------------------------------------------------------------------------
- * Function: test_datasize
+ * Function: test_datasize
*
- * Purpose: Tests DataSet::getInMemDataSize().
+ * Purpose Tests DataSet::getInMemDataSize().
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * Thursday, March 22, 2012
+ * Programmer Binh-Minh Ribler
+ * Thursday, March 22, 2012
*
* Modifications:
*
@@ -297,65 +295,65 @@ test_datasize(FileAccPropList &fapl)
SUBTEST("DataSet::getInMemDataSize()");
try
{
- // Open FILE1.
- H5File file(FILE1, H5F_ACC_RDWR, FileCreatPropList::DEFAULT, fapl);
-
- // Open dataset DSET_SIMPLE_IO_NAME.
- DataSet dset = file.openDataSet (DSET_SIMPLE_IO_NAME);
-
- // Get the dataset's dataspace to calculate the size for verification.
- DataSpace space(dset.getSpace());
-
- // Get the dimension sizes.
- hsize_t dims[2];
- int n_dims = space.getSimpleExtentDims(dims);
- if (n_dims < 0)
- {
- throw Exception("test_compression", "DataSpace::getSimpleExtentDims() failed");
- }
-
- // Calculate the supposed size. Size of each value is int (4), from
- // test_simple_io.
- size_t expected_size = 4 * dims[0] * dims[1];
-
- // getInMemDataSize() returns the in memory size of the data.
- size_t ds_size = dset.getInMemDataSize();
-
- // Verify the data size.
- if (ds_size != expected_size)
- {
- H5_FAILED();
- cerr << " Expected data size = " << expected_size;
- cerr << " but dset.getInMemDataSize() returned " << ds_size << endl;
- throw Exception("test_compression", "Failed in testing DataSet::getInMemDataSize()");
- }
-
- PASSED();
- return 0;
+ // Open FILE1.
+ H5File file(FILE1, H5F_ACC_RDWR, FileCreatPropList::DEFAULT, fapl);
+
+ // Open dataset DSET_SIMPLE_IO_NAME.
+ DataSet dset = file.openDataSet (DSET_SIMPLE_IO_NAME);
+
+ // Get the dataset's dataspace to calculate the size for verification.
+ DataSpace space(dset.getSpace());
+
+ // Get the dimension sizes.
+ hsize_t dims[2];
+ int n_dims = space.getSimpleExtentDims(dims);
+ if (n_dims < 0)
+ {
+ throw Exception("test_compression", "DataSpace::getSimpleExtentDims() failed");
+ }
+
+ // Calculate the supposed size. Size of each value is int (4), from
+ // test_simple_io.
+ size_t expected_size = 4 * dims[0] * dims[1];
+
+ // getInMemDataSize() returns the in memory size of the data.
+ size_t ds_size = dset.getInMemDataSize();
+
+ // Verify the data size.
+ if (ds_size != expected_size)
+ {
+ H5_FAILED();
+ cerr << " Expected data size = " << expected_size;
+ cerr << " but dset.getInMemDataSize() returned " << ds_size << endl;
+ throw Exception("test_compression", "Failed in testing DataSet::getInMemDataSize()");
+ }
+
+ PASSED();
+ return 0;
} // end try
// catch all dataset, space, plist exceptions
catch (Exception& E)
{
- cerr << " FAILED" << endl;
- cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
- return -1;
+ return -1;
}
} // test_datasize
/*-------------------------------------------------------------------------
- * Function: test_tconv
+ * Function: test_tconv
*
- * Purpose: Test some simple data type conversion stuff.
+ * Purpose Test some simple data type conversion stuff.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C version)
- * Friday, January 5, 2001
+ * Programmer Binh-Minh Ribler (using C version)
+ * Friday, January 5, 2001
*
* Modifications:
*
@@ -365,7 +363,7 @@ static herr_t
test_tconv(H5File& file)
{
// Prepare buffers for input/output
- char *out=NULL, *in=NULL;
+ char *out=NULL, *in=NULL;
out = new char [4*1000000];
// assert (out); - should use exception handler for new - BMR
in = new char [4*1000000];
@@ -375,81 +373,81 @@ test_tconv(H5File& file)
// Initialize the dataset
for (int i = 0; i < 1000000; i++) {
- out[i*4+0] = 0x11;
- out[i*4+1] = 0x22;
- out[i*4+2] = 0x33;
- out[i*4+3] = 0x44;
+ out[i*4+0] = 0x11;
+ out[i*4+1] = 0x22;
+ out[i*4+2] = 0x33;
+ out[i*4+3] = 0x44;
}
try
{
- // Create the data space
- hsize_t dims[1];
- dims[0] = 1000000;
- DataSpace space (1, dims, NULL);
-
- // Create the data set
- DataSet dataset (file.createDataSet (DSET_TCONV_NAME, PredType::STD_I32LE, space));
-
- // Write the data to the dataset
- dataset.write (static_cast<void*>(out), PredType::STD_I32LE);
-
- // Read data with byte order conversion
- dataset.read (static_cast<void*>(in), PredType::STD_I32BE);
-
- // Check
- for (int i = 0; i < 1000000; i++) {
- if (in[4*i+0]!=out[4*i+3] ||
- in[4*i+1]!=out[4*i+2] ||
- in[4*i+2]!=out[4*i+1] ||
- in[4*i+3]!=out[4*i+0])
- {
- throw Exception("DataSet::read", "Read with byte order conversion failed");
- }
- }
-
- // clean up and return with success
- delete [] out;
- delete [] in;
- PASSED();
- return 0;
+ // Create the data space
+ hsize_t dims[1];
+ dims[0] = 1000000;
+ DataSpace space (1, dims, NULL);
+
+ // Create the data set
+ DataSet dataset (file.createDataSet (DSET_TCONV_NAME, PredType::STD_I32LE, space));
+
+ // Write the data to the dataset
+ dataset.write (static_cast<void*>(out), PredType::STD_I32LE);
+
+ // Read data with byte order conversion
+ dataset.read (static_cast<void*>(in), PredType::STD_I32BE);
+
+ // Check
+ for (int i = 0; i < 1000000; i++) {
+ if (in[4*i+0]!=out[4*i+3] ||
+ in[4*i+1]!=out[4*i+2] ||
+ in[4*i+2]!=out[4*i+1] ||
+ in[4*i+3]!=out[4*i+0])
+ {
+ throw Exception("DataSet::read", "Read with byte order conversion failed");
+ }
+ }
+
+ // clean up and return with success
+ delete [] out;
+ delete [] in;
+ PASSED();
+ return 0;
} // end try
// catch all dataset and space exceptions
catch (Exception& E)
{
- cerr << " FAILED" << endl;
- cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
- // clean up and return with failure
- delete [] out;
- delete [] in;
- return -1;
+ // clean up and return with failure
+ delete [] out;
+ delete [] in;
+ return -1;
}
} // test_tconv
/* This message derives from H5Z */
const H5Z_class2_t H5Z_BOGUS[1] = {{
- H5Z_CLASS_T_VERS, /* H5Z_class_t version number */
- H5Z_FILTER_BOGUS, /* Filter id number */
- 1, 1, /* Encode and decode enabled */
- "bogus", /* Filter name for debugging */
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version number */
+ H5Z_FILTER_BOGUS, /* Filter id number */
+ 1, 1, /* Encode and decode enabled */
+ "bogus", /* Filter name for debugging */
NULL, /* The "can apply" callback */
NULL, /* The "set local" callback */
- (H5Z_func_t)filter_bogus, /* The actual filter function */
+ (H5Z_func_t)filter_bogus, /* The actual filter function */
}};
/*-------------------------------------------------------------------------
- * Function: bogus
+ * Function: bogus
*
- * Purpose: A bogus compression method that doesn't do anything.
+ * Purpose A bogus compression method that doesn't do anything.
*
- * Return: Success: Data chunk size
+ * Return Success: Data chunk size
*
- * Failure: 0
+ * Failure: 0
*
- * Programmer: Robb Matzke
- * Tuesday, April 21, 1998
+ * Programmer Robb Matzke
+ * Tuesday, April 21, 1998
*
* Modifications:
*
@@ -466,19 +464,19 @@ filter_bogus(unsigned int flags, size_t cd_nelmts,
/*-------------------------------------------------------------------------
- * Function: test_compression
+ * Function: test_compression
*
- * Purpose: Tests dataset compression. If compression is requested when
- * it hasn't been compiled into the library (such as when
- * updating an existing compressed dataset) then data is sent to
- * the file uncompressed but no errors are returned.
+ * Purpose Tests dataset compression. If compression is requested when
+ * it hasn't been compiled into the library (such as when
+ * updating an existing compressed dataset) then data is sent to
+ * the file uncompressed but no errors are returned.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C version)
- * Friday, January 5, 2001
+ * Programmer Binh-Minh Ribler (using C version)
+ * Friday, January 5, 2001
*
* Modifications:
*
@@ -488,266 +486,266 @@ static herr_t
test_compression(H5File& file)
{
#ifndef H5_HAVE_FILTER_DEFLATE
- const char *not_supported;
+ const char *not_supported;
not_supported = " Deflate compression is not enabled.";
#endif /* H5_HAVE_FILTER_DEFLATE */
- int points[100][200];
- int check[100][200];
- hsize_t i, j, n;
+ int points[100][200];
+ int check[100][200];
+ hsize_t i, j, n;
// Initialize the dataset
for (i = n = 0; i < 100; i++)
{
- for (j = 0; j < 200; j++) {
- points[i][j] = static_cast<int>(n++);
- }
+ for (j = 0; j < 200; j++) {
+ points[i][j] = static_cast<int>(n++);
+ }
}
char* tconv_buf = new char [1000];
DataSet* dataset = NULL;
try
{
- const hsize_t size[2] = {100, 200};
- // Create the data space
- DataSpace space1(2, size, NULL);
+ const hsize_t size[2] = {100, 200};
+ // Create the data space
+ DataSpace space1(2, size, NULL);
- // Create a small conversion buffer to test strip mining
- DSetMemXferPropList xfer;
+ // Create a small conversion buffer to test strip mining
+ DSetMemXferPropList xfer;
- xfer.setBuffer (1000, tconv_buf, NULL);
+ xfer.setBuffer (1000, tconv_buf, NULL);
- // Use chunked storage with compression
- DSetCreatPropList dscreatplist;
+ // Use chunked storage with compression
+ DSetCreatPropList dscreatplist;
- const hsize_t chunk_size[2] = {2, 25};
- dscreatplist.setChunk (2, chunk_size);
- dscreatplist.setDeflate (6);
+ const hsize_t chunk_size[2] = {2, 25};
+ dscreatplist.setChunk (2, chunk_size);
+ dscreatplist.setDeflate (6);
#ifdef H5_HAVE_FILTER_DEFLATE
- SUBTEST("Compression (setup)");
-
- // Create the dataset
- dataset = new DataSet (file.createDataSet
- (DSET_COMPRESS_NAME, PredType::NATIVE_INT, space1, dscreatplist));
-
- PASSED();
-
- /*----------------------------------------------------------------------
- * STEP 1: Read uninitialized data. It should be zero.
- *----------------------------------------------------------------------
- */
- SUBTEST("Compression (uninitialized read)");
-
- dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- for (i=0; i<size[0]; i++) {
- for (j=0; j<size[1]; j++) {
- if (0!=check[i][j]) {
- H5_FAILED();
- cerr << " Read a non-zero value." << endl;
- cerr << " At index " << static_cast<unsigned long>(i) << "," <<
- static_cast<unsigned long>(j) << endl;
- throw Exception("test_compression", "Failed in uninitialized read");
- }
- }
- }
- PASSED();
-
- /*----------------------------------------------------------------------
- * STEP 2: Test compression by setting up a chunked dataset and writing
- * to it.
- *----------------------------------------------------------------------
- */
- SUBTEST("Compression (write)");
-
- for (i=n=0; i<size[0]; i++)
- {
- for (j=0; j<size[1]; j++)
- {
- points[i][j] = static_cast<int>(n++);
- }
- }
-
- dataset->write (static_cast<void*>(points), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- PASSED();
-
- /*----------------------------------------------------------------------
- * STEP 3: Try to read the data we just wrote.
- *----------------------------------------------------------------------
- */
- SUBTEST("Compression (read)");
-
- // Read the dataset back
- dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- // Check that the values read are the same as the values written
- for (i = 0; i < size[0]; i++)
- for (j = 0; j < size[1]; j++)
- {
- int status = check_values (i, j, points[i][j], check[i][j]);
- if (status == -1)
- throw Exception("test_compression", "Failed in read");
- }
-
- PASSED();
-
- /*----------------------------------------------------------------------
- * STEP 4: Write new data over the top of the old data. The new data is
- * random thus not very compressible, and will cause the chunks to move
- * around as they grow. We only change values for the left half of the
- * dataset although we rewrite the whole thing.
- *----------------------------------------------------------------------
- */
- SUBTEST("Compression (modify)");
-
- for (i=0; i<size[0]; i++)
- {
- for (j=0; j<size[1]/2; j++)
- {
- points[i][j] = rand ();
- }
- }
- dataset->write (static_cast<void*>(points), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- // Read the dataset back and check it
- dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- // Check that the values read are the same as the values written
- for (i = 0; i < size[0]; i++)
- for (j = 0; j < size[1]; j++)
- {
- int status = check_values (i, j, points[i][j], check[i][j]);
- if (status == -1)
- throw Exception("test_compression", "Failed in modify");
- }
-
- PASSED();
-
- /*----------------------------------------------------------------------
- * STEP 5: Close the dataset and then open it and read it again. This
- * insures that the compression message is picked up properly from the
- * object header.
- *----------------------------------------------------------------------
- */
- SUBTEST("Compression (re-open)");
-
- // close this dataset to reuse the var
- delete dataset;
-
- dataset = new DataSet (file.openDataSet (DSET_COMPRESS_NAME));
- dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- // Check that the values read are the same as the values written
- for (i = 0; i < size[0]; i++)
- for (j = 0; j < size[1]; j++)
- {
- int status = check_values (i, j, points[i][j], check[i][j]);
- if (status == -1)
- throw Exception("test_compression", "Failed in re-open");
- }
-
- PASSED();
-
-
- /*----------------------------------------------------------------------
- * STEP 6: Test partial I/O by writing to and then reading from a
- * hyperslab of the dataset. The hyperslab does not line up on chunk
- * boundaries (we know that case already works from above tests).
- *----------------------------------------------------------------------
- */
- SUBTEST("Compression (partial I/O)");
-
- const hsize_t hs_size[2] = {4, 50};
- const hsize_t hs_offset[2] = {7, 30};
- for (i = 0; i < hs_size[0]; i++) {
- for (j = 0; j < hs_size[1]; j++) {
- points[hs_offset[0]+i][hs_offset[1]+j] = rand ();
- }
- }
- space1.selectHyperslab( H5S_SELECT_SET, hs_size, hs_offset );
- dataset->write (static_cast<void*>(points), PredType::NATIVE_INT, space1, space1, xfer);
- dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, space1, space1, xfer);
-
- // Check that the values read are the same as the values written
- for (i=0; i<hs_size[0]; i++) {
- for (j=0; j<hs_size[1]; j++) {
- if (points[hs_offset[0]+i][hs_offset[1]+j] !=
- check[hs_offset[0]+i][hs_offset[1]+j]) {
- H5_FAILED();
- cerr << " Read different values than written.\n" << endl;
- cerr << " At index " << static_cast<unsigned long>((hs_offset[0]+i)) <<
- "," << static_cast<unsigned long>((hs_offset[1]+j)) << endl;
-
- cerr << " At original: " << static_cast<int>(points[hs_offset[0]+i][hs_offset[1]+j]) << endl;
- cerr << " At returned: " << static_cast<int>(check[hs_offset[0]+i][hs_offset[1]+j]) << endl;
- throw Exception("test_compression", "Failed in partial I/O");
- }
- } // for j
- } // for i
-
- delete dataset;
- dataset = NULL;
-
- PASSED();
+ SUBTEST("Compression (setup)");
+
+ // Create the dataset
+ dataset = new DataSet (file.createDataSet
+ (DSET_COMPRESS_NAME, PredType::NATIVE_INT, space1, dscreatplist));
+
+ PASSED();
+
+ /*----------------------------------------------------------------------
+ * STEP 1: Read uninitialized data. It should be zero.
+ *----------------------------------------------------------------------
+ */
+ SUBTEST("Compression (uninitialized read)");
+
+ dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ for (i=0; i<size[0]; i++) {
+ for (j=0; j<size[1]; j++) {
+ if (0!=check[i][j]) {
+ H5_FAILED();
+ cerr << " Read a non-zero value." << endl;
+ cerr << " At index " << static_cast<unsigned long>(i)
+ << "," << static_cast<unsigned long>(j) << endl;
+ throw Exception("test_compression", "Failed in uninitialized read");
+ }
+ }
+ }
+ PASSED();
+
+ /*----------------------------------------------------------------------
+ * STEP 2: Test compression by setting up a chunked dataset and writing
+ * to it.
+ *----------------------------------------------------------------------
+ */
+ SUBTEST("Compression (write)");
+
+ for (i=n=0; i<size[0]; i++)
+ {
+ for (j=0; j<size[1]; j++)
+ {
+ points[i][j] = static_cast<int>(n++);
+ }
+ }
+
+ dataset->write (static_cast<void*>(points), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ PASSED();
+
+ /*----------------------------------------------------------------------
+ * STEP 3: Try to read the data we just wrote.
+ *----------------------------------------------------------------------
+ */
+ SUBTEST("Compression (read)");
+
+ // Read the dataset back
+ dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ // Check that the values read are the same as the values written
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ {
+ int status = check_values (i, j, points[i][j], check[i][j]);
+ if (status == -1)
+ throw Exception("test_compression", "Failed in read");
+ }
+
+ PASSED();
+
+ /*----------------------------------------------------------------------
+ * STEP 4: Write new data over the top of the old data. The new data is
+ * random thus not very compressible, and will cause the chunks to move
+ * around as they grow. We only change values for the left half of the
+ * dataset although we rewrite the whole thing.
+ *----------------------------------------------------------------------
+ */
+ SUBTEST("Compression (modify)");
+
+ for (i=0; i<size[0]; i++)
+ {
+ for (j=0; j<size[1]/2; j++)
+ {
+ points[i][j] = rand ();
+ }
+ }
+ dataset->write (static_cast<void*>(points), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ // Read the dataset back and check it
+ dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ // Check that the values read are the same as the values written
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ {
+ int status = check_values (i, j, points[i][j], check[i][j]);
+ if (status == -1)
+ throw Exception("test_compression", "Failed in modify");
+ }
+
+ PASSED();
+
+ /*----------------------------------------------------------------------
+ * STEP 5: Close the dataset and then open it and read it again. This
+ * insures that the compression message is picked up properly from the
+ * object header.
+ *----------------------------------------------------------------------
+ */
+ SUBTEST("Compression (re-open)");
+
+ // close this dataset to reuse the var
+ delete dataset;
+
+ dataset = new DataSet (file.openDataSet (DSET_COMPRESS_NAME));
+ dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ // Check that the values read are the same as the values written
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ {
+ int status = check_values (i, j, points[i][j], check[i][j]);
+ if (status == -1)
+ throw Exception("test_compression", "Failed in re-open");
+ }
+
+ PASSED();
+
+
+ /*----------------------------------------------------------------------
+ * STEP 6: Test partial I/O by writing to and then reading from a
+ * hyperslab of the dataset. The hyperslab does not line up on chunk
+ * boundaries (we know that case already works from above tests).
+ *----------------------------------------------------------------------
+ */
+ SUBTEST("Compression (partial I/O)");
+
+ const hsize_t hs_size[2] = {4, 50};
+ const hsize_t hs_offset[2] = {7, 30};
+ for (i = 0; i < hs_size[0]; i++) {
+ for (j = 0; j < hs_size[1]; j++) {
+ points[hs_offset[0]+i][hs_offset[1]+j] = rand ();
+ }
+ }
+ space1.selectHyperslab( H5S_SELECT_SET, hs_size, hs_offset );
+ dataset->write (static_cast<void*>(points), PredType::NATIVE_INT, space1, space1, xfer);
+ dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, space1, space1, xfer);
+
+ // Check that the values read are the same as the values written
+ for (i=0; i<hs_size[0]; i++) {
+ for (j=0; j<hs_size[1]; j++) {
+ if (points[hs_offset[0]+i][hs_offset[1]+j] !=
+ check[hs_offset[0]+i][hs_offset[1]+j]) {
+ H5_FAILED();
+ cerr << " Read different values than written.\n" << endl;
+ cerr << " At index " << static_cast<unsigned long>((hs_offset[0]+i)) <<
+ "," << static_cast<unsigned long>((hs_offset[1]+j)) << endl;
+
+ cerr << " At original: " << static_cast<int>(points[hs_offset[0]+i][hs_offset[1]+j]) << endl;
+ cerr << " At returned: " << static_cast<int>(check[hs_offset[0]+i][hs_offset[1]+j]) << endl;
+ throw Exception("test_compression", "Failed in partial I/O");
+ }
+ } // for j
+ } // for i
+
+ delete dataset;
+ dataset = NULL;
+
+ PASSED();
#else
- SUBTEST("deflate filter");
- SKIPPED();
- cerr << not_supported << endl;
+ SUBTEST("deflate filter");
+ SKIPPED();
+ cerr << not_supported << endl;
#endif
- /*----------------------------------------------------------------------
- * STEP 7: Register an application-defined compression method and use it
- * to write and then read the dataset.
- *----------------------------------------------------------------------
- */
- SUBTEST("Compression (app-defined method)");
+ /*----------------------------------------------------------------------
+ * STEP 7: Register an application-defined compression method and use it
+ * to write and then read the dataset.
+ *----------------------------------------------------------------------
+ */
+ SUBTEST("Compression (app-defined method)");
if (H5Zregister (H5Z_BOGUS)<0)
- throw Exception("test_compression", "Failed in app-defined method");
- if (H5Pset_filter (dscreatplist.getId(), H5Z_FILTER_BOGUS, 0, 0, NULL)<0)
- throw Exception("test_compression", "Failed in app-defined method");
- dscreatplist.setFilter (H5Z_FILTER_BOGUS, 0, 0, NULL);
-
- DataSpace space2 (2, size, NULL);
- dataset = new DataSet (file.createDataSet (DSET_BOGUS_NAME, PredType::NATIVE_INT, space2, dscreatplist));
-
- dataset->write (static_cast<void*>(points), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
- dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
-
- // Check that the values read are the same as the values written
- for (i = 0; i < size[0]; i++)
- for (j = 0; j < size[1]; j++)
- {
- int status = check_values (i, j, points[i][j], check[i][j]);
- if (status == -1)
- throw Exception("test_compression", "Failed in app-defined method");
- }
-
- PASSED();
-
- /*----------------------------------------------------------------------
- * Cleanup
- *----------------------------------------------------------------------
- */
- delete dataset;
- delete [] tconv_buf;
- return 0;
+ throw Exception("test_compression", "Failed in app-defined method");
+ if (H5Pset_filter (dscreatplist.getId(), H5Z_FILTER_BOGUS, 0, 0, NULL)<0)
+ throw Exception("test_compression", "Failed in app-defined method");
+ dscreatplist.setFilter (H5Z_FILTER_BOGUS, 0, 0, NULL);
+
+ DataSpace space2 (2, size, NULL);
+ dataset = new DataSet (file.createDataSet (DSET_BOGUS_NAME, PredType::NATIVE_INT, space2, dscreatplist));
+
+ dataset->write (static_cast<void*>(points), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+ dataset->read (static_cast<void*>(check), PredType::NATIVE_INT, DataSpace::ALL, DataSpace::ALL, xfer);
+
+ // Check that the values read are the same as the values written
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ {
+ int status = check_values (i, j, points[i][j], check[i][j]);
+ if (status == -1)
+ throw Exception("test_compression", "Failed in app-defined method");
+ }
+
+ PASSED();
+
+ /*----------------------------------------------------------------------
+ * Cleanup
+ *----------------------------------------------------------------------
+ */
+ delete dataset;
+ delete [] tconv_buf;
+ return 0;
} // end try
// catch all dataset, file, space, and plist exceptions
catch (Exception& E)
{
- cerr << " FAILED" << endl;
- cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
-
- // clean up and return with failure
- if (dataset != NULL)
- delete dataset;
- if (tconv_buf)
- delete [] tconv_buf;
- return -1;
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
+
+ // clean up and return with failure
+ if (dataset != NULL)
+ delete dataset;
+ if (tconv_buf)
+ delete [] tconv_buf;
+ return -1;
}
} // test_compression
@@ -755,18 +753,18 @@ test_compression(H5File& file)
/*-------------------------------------------------------------------------
* Function: test_nbit_methods
*
- * Purpose: Tests setting nbit compression methods.
+ * Purpose Tests setting nbit compression methods.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * Friday, April 22, 2016
+ * Programmer Binh-Minh Ribler
+ * Friday, April 22, 2016
*
*-------------------------------------------------------------------------
*/
-const H5std_string DSET_NBIT_NAME("nbit_dataset");
+const H5std_string DSET_NBIT_NAME("nbit_dataset");
const hsize_t DIM1 = 2;
const hsize_t DIM2 = 5;
static herr_t test_nbit_compression(H5File& file)
@@ -787,101 +785,101 @@ static herr_t test_nbit_compression(H5File& file)
try
{
- // Define datatypes of members of compound datatype
- IntType i_type(PredType::NATIVE_INT);
- IntType c_type(PredType::NATIVE_CHAR);
- IntType s_type(PredType::NATIVE_SHORT);
-
- // Create a dataset compound datatype
- CompType cmpd(sizeof(s1_t));
- cmpd.insertMember("i", HOFFSET(s1_t, i), i_type);
- cmpd.insertMember("c", HOFFSET(s1_t, c), c_type);
- cmpd.insertMember("s", HOFFSET(s1_t, s), s_type);
-
- // Create a memory compound datatype
- CompType mem_cmpd(sizeof(s1_t));
- mem_cmpd.insertMember("i", HOFFSET(s1_t, i), i_type);
- mem_cmpd.insertMember("c", HOFFSET(s1_t, c), c_type);
- mem_cmpd.insertMember("s", HOFFSET(s1_t, s), s_type);
-
- // Set order of dataset compound datatype
- //cmpd.setOrder(H5T_ORDER_BE); only for atomic type?
-
- // Create the data space
- DataSpace space(2, size);
-
- // Use nbit filter
- DSetCreatPropList dscreat;
- dscreat.setChunk(2, chunk_size);
- dscreat.setNbit();
-
- // Create the dataset
- DataSet dataset(file.createDataSet(DSET_NBIT_NAME, cmpd, space, dscreat));
-
- // Initialize data, assuming size of long long >= size of member datatypes
- for (i = 0; i < size[0]; i++)
- for (j = 0; j < size[1]; j++)
- {
- orig_data[i][j].i = static_cast<int>(i * j);
- orig_data[i][j].c = static_cast<char>('a' + i);
- orig_data[i][j].s = static_cast<short>(i + j);
-
- // Some even-numbered integer values are negative
- if ((i*size[1]+j+1)%2 == 0) {
- orig_data[i][j].i = -orig_data[i][j].i;
- orig_data[i][j].s = static_cast<short>(-orig_data[i][j].s);
- }
- }
-
- // Write to the dataset
- dataset.write(static_cast<void*>(orig_data), mem_cmpd);
-
- // Read the dataset back */
- dataset.read(static_cast<void*>(new_data), mem_cmpd);
-
- // Check that the values read are the same as the values written.
- for (i = 0; i < size[0]; i++)
- for (j = 0; j < size[1]; j++)
- {
- if((new_data[i][j].i != orig_data[i][j].i) ||
- (new_data[i][j].c != orig_data[i][j].c) ||
- (new_data[i][j].s != orig_data[i][j].s))
- {
- H5_FAILED();
- printf(" Read different values than written.\n");
- printf(" At index %lu,%lu\n", static_cast<unsigned long>(i), static_cast<unsigned long>(j));
- }
+ // Define datatypes of members of compound datatype
+ IntType i_type(PredType::NATIVE_INT);
+ IntType c_type(PredType::NATIVE_CHAR);
+ IntType s_type(PredType::NATIVE_SHORT);
+
+ // Create a dataset compound datatype
+ CompType cmpd(sizeof(s1_t));
+ cmpd.insertMember("i", HOFFSET(s1_t, i), i_type);
+ cmpd.insertMember("c", HOFFSET(s1_t, c), c_type);
+ cmpd.insertMember("s", HOFFSET(s1_t, s), s_type);
+
+ // Create a memory compound datatype
+ CompType mem_cmpd(sizeof(s1_t));
+ mem_cmpd.insertMember("i", HOFFSET(s1_t, i), i_type);
+ mem_cmpd.insertMember("c", HOFFSET(s1_t, c), c_type);
+ mem_cmpd.insertMember("s", HOFFSET(s1_t, s), s_type);
+
+ // Set order of dataset compound datatype
+ //cmpd.setOrder(H5T_ORDER_BE); only for atomic type?
+
+ // Create the data space
+ DataSpace space(2, size);
+
+ // Use nbit filter
+ DSetCreatPropList dscreat;
+ dscreat.setChunk(2, chunk_size);
+ dscreat.setNbit();
+
+ // Create the dataset
+ DataSet dataset(file.createDataSet(DSET_NBIT_NAME, cmpd, space, dscreat));
+
+ // Initialize data, assuming size of long long >= size of member datatypes
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ {
+ orig_data[i][j].i = static_cast<int>(i * j);
+ orig_data[i][j].c = static_cast<char>('a' + i);
+ orig_data[i][j].s = static_cast<short>(i + j);
+
+ // Some even-numbered integer values are negative
+ if ((i*size[1]+j+1)%2 == 0) {
+ orig_data[i][j].i = -orig_data[i][j].i;
+ orig_data[i][j].s = static_cast<short>(-orig_data[i][j].s);
+ }
+ }
+
+ // Write to the dataset
+ dataset.write(static_cast<void*>(orig_data), mem_cmpd);
+
+ // Read the dataset back */
+ dataset.read(static_cast<void*>(new_data), mem_cmpd);
+
+ // Check that the values read are the same as the values written.
+ for (i = 0; i < size[0]; i++)
+ for (j = 0; j < size[1]; j++)
+ {
+ if((new_data[i][j].i != orig_data[i][j].i) ||
+ (new_data[i][j].c != orig_data[i][j].c) ||
+ (new_data[i][j].s != orig_data[i][j].s))
+ {
+ H5_FAILED();
+ printf(" Read different values than written.\n");
+ printf(" At index %lu,%lu\n", static_cast<unsigned long>(i), static_cast<unsigned long>(j));
+ }
}
- PASSED();
- return 0;
+ PASSED();
+ return 0;
} // end try block
// catch all dataset, file, space, and plist exceptions
catch (Exception& E)
{
- cerr << " FAILED" << endl;
- cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
- return -1;
+ return -1;
}
} // test_nbit_compression
/*-------------------------------------------------------------------------
- * Function: test_multiopen
+ * Function: test_multiopen
*
- * Purpose: Tests that a bug no longer exists. If a dataset is opened
- * twice and one of the handles is used to extend the dataset,
- * then the other handle should return the new size when
- * queried.
+ * Purpose Tests that a bug no longer exists. If a dataset is opened
+ * twice and one of the handles is used to extend the dataset,
+ * then the other handle should return the new size when
+ * queried.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C version)
- * Saturday, February 17, 2001
+ * Programmer Binh-Minh Ribler (using C version)
+ * Saturday, February 17, 2001
*
* Modifications:
*
@@ -896,74 +894,75 @@ test_multiopen (H5File& file)
DataSpace* space = NULL;
try {
- // Create a dataset creation property list
- DSetCreatPropList dcpl;
+ // Create a dataset creation property list
+ DSetCreatPropList dcpl;
- // Set chunk size to given size
- hsize_t cur_size[1] = {10};
- dcpl.setChunk (1, cur_size);
+ // Set chunk size to given size
+ hsize_t cur_size[1] = {10};
+ dcpl.setChunk (1, cur_size);
- // Create a simple data space with unlimited size
- hsize_t max_size[1] = {H5S_UNLIMITED};
- space = new DataSpace (1, cur_size, max_size);
+ // Create a simple data space with unlimited size
+ hsize_t max_size[1] = {H5S_UNLIMITED};
+ space = new DataSpace (1, cur_size, max_size);
- // Create first dataset
- DataSet dset1 = file.createDataSet ("multiopen", PredType::NATIVE_INT, *space, dcpl);
+ // Create first dataset
+ DataSet dset1 = file.createDataSet ("multiopen", PredType::NATIVE_INT, *space, dcpl);
- // Open again the first dataset from the file to another DataSet object.
- DataSet dset2 = file.openDataSet ("multiopen");
+ // Open again the first dataset from the file to another DataSet object.
+ DataSet dset2 = file.openDataSet ("multiopen");
- // Relieve the dataspace
- delete space;
- space = NULL;
+ // Relieve the dataspace
+ delete space;
+ space = NULL;
- // Extend the dimensionality of the first dataset
- cur_size[0] = 20;
- dset1.extend (cur_size);
+ // Extend the dimensionality of the first dataset
+ cur_size[0] = 20;
+ dset1.extend (cur_size);
- // Get the size from the second handle
- space = new DataSpace (dset2.getSpace());
+ // Get the size from the second handle
+ space = new DataSpace (dset2.getSpace());
- hsize_t tmp_size[1];
- space->getSimpleExtentDims (tmp_size);
- if (cur_size[0]!=tmp_size[0])
- {
- cerr << " Got " << static_cast<int>(tmp_size[0]) << " instead of "
- << static_cast<int>(cur_size[0]) << "!" << endl;
- throw Exception("test_multiopen", "Failed in multi-open with extending");
- }
+ hsize_t tmp_size[1];
+ space->getSimpleExtentDims (tmp_size);
+ if (cur_size[0]!=tmp_size[0])
+ {
+ cerr << " Got " << static_cast<int>(tmp_size[0])
+ << " instead of " << static_cast<int>(cur_size[0])
+ << "!" << endl;
+ throw Exception("test_multiopen", "Failed in multi-open with extending");
+ }
- // clean up and return with success
- delete space;
- PASSED();
- return 0;
+ // clean up and return with success
+ delete space;
+ PASSED();
+ return 0;
} // end try block
// catch all dataset, file, space, and plist exceptions
catch (Exception& E)
{
- cerr << " FAILED" << endl;
- cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
- // clean up and return with failure
- if (space != NULL)
- delete space;
- return -1;
+ // clean up and return with failure
+ if (space != NULL)
+ delete space;
+ return -1;
}
} // test_multiopen
/*-------------------------------------------------------------------------
- * Function: test_types
+ * Function: test_types
*
- * Purpose: Test various types - should be moved to dtypes.cpp
+ * Purpose Test various types - should be moved to dtypes.cpp
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C version)
- * February 17, 2001
+ * Programmer Binh-Minh Ribler (using C version)
+ * February 17, 2001
*
* Modifications:
*
@@ -974,239 +973,320 @@ test_types(H5File& file)
{
SUBTEST("Various datatypes");
- size_t i;
+ size_t i;
DataSet* dset = NULL;
try {
- // Create a group in the file that was passed in from the caller
- Group grp = file.createGroup ("typetests");
-
- /* bitfield_1 */
- unsigned char buf[32];
- hsize_t nelmts = sizeof(buf);
- DataType type;
- try { // block of bitfield_1
- // test copying a predefined type
- type.copy (PredType::STD_B8LE);
-
- // Test copying a user-defined type using DataType::copy
- DataType copied_type;
- copied_type.copy(type);
-
- // Test copying a user-defined type using DataType::operator=
- DataType another_copied_type;
- another_copied_type = type;
-
- // Test copying a user-defined int type using DataType::operator=
- IntType orig_int(PredType::STD_B8LE);
- DataType generic_type;
- generic_type = orig_int;
-
- // Test copying an integer predefined type
- IntType new_int_type(PredType::STD_B8LE);
-
- // Test copying an int predefined type using DataType::operator=
- IntType another_int_type;
- another_int_type = new_int_type;
-
- DataSpace space (1, &nelmts);
- dset = new DataSet(grp.createDataSet("bitfield_1", type, space));
-
- // Fill buffer
- for (i=0; i<sizeof buf; i++)
- buf[i] = static_cast<unsigned char>(0xff) ^ static_cast<unsigned char>(i);
-
- // Write data from buf using all default dataspaces and property list
- dset->write (buf, type);
-
- // no failure in bitfield_1, close this dataset
- delete dset;
- } // end try block of bitfield_1
-
- // catch exceptions thrown in try block of bitfield_1
- catch (Exception& E)
- {
- cerr << " FAILED" << endl;
- cerr << " <<< " << "bitfield_1: " << E.getFuncName()
- << " - " << E.getDetailMsg() << " >>>" << endl << endl;
- if (dset != NULL)
- delete dset;
- return -1;
- }
-
- /* bitfield_2 */
- nelmts = sizeof(buf)/2;
- try { // bitfield_2 block
- type.copy (PredType::STD_B16LE);
- DataSpace space (1, &nelmts);
- dset = new DataSet(grp.createDataSet("bitfield_2", type, space));
-
- // Fill buffer
- for (i=0; i<sizeof(buf); i++)
- buf[i] = static_cast<unsigned char>(0xff) ^ static_cast<unsigned char>(i);
-
- // Write data from buf using all default dataspaces and property
- // list; if writing fails, deallocate dset and return.
- dset->write (buf, type);
-
- // no failure in bitfield_2, close this dataset and reset for
- // variable reuse
- delete dset;
- dset = NULL;
- } // end try block of bitfield_2
-
- // catch exceptions thrown in try block of bitfield_2
- catch (Exception& E)
- {
- cerr << " FAILED" << endl;
- cerr << " <<< " << "bitfield_2: " << E.getFuncName()
- << " - " << E.getDetailMsg() << " >>>" << endl << endl;
- if (dset != NULL)
- delete dset;
- throw E; // propagate the exception
- }
-
- /* opaque_1 */
- DataType* optype = NULL;
- try { // opaque_1 block
- optype = new DataType(H5T_OPAQUE, 1);
- nelmts = sizeof(buf);
- DataSpace space (1, &nelmts);
- optype->setTag ("testing 1-byte opaque type");
- dset = new DataSet(grp.createDataSet("opaque_1", *optype, space));
-
- // Fill buffer
- for (i=0; i<sizeof buf; i++)
- buf[i] = static_cast<unsigned char>(0xff) ^ static_cast<unsigned char>(i);
-
- // Write data from buf using all default dataspaces and property
- // list; if writing fails, deallocate dset and return.
- dset->write (buf, *optype);
-
- // no failure in opaque_1
- delete dset; dset = NULL;
- delete optype; optype = NULL;
- } // end try block of opaque_1
-
- // catch exceptions thrown in try block of opaque_1
- catch (Exception& E)
- {
- cerr << " FAILED" << endl;
- cerr << " <<< " << "opaque_1: " << E.getFuncName()
- << " - " << E.getDetailMsg() << " >>>" << endl << endl;
- if (dset != NULL)
- delete dset;
- if (optype != NULL)
- delete optype;
- throw E; // propagate the exception
- }
-
- /* opaque_2 */
- try { // block opaque_2
- nelmts = sizeof(buf)/4;
- DataSpace space (1, &nelmts);
- optype = new DataType(H5T_OPAQUE, 4);
- optype->setTag ("testing 4-byte opaque type");
- dset = new DataSet(grp.createDataSet("opaque_2", *optype, space));
-
- // Fill buffer
- for (i=0; i<sizeof(buf); i++)
- buf[i] = static_cast<unsigned char>(0xff) ^ static_cast<unsigned char>(i);
-
- // Write data from buf using all default dataspaces and property
- // list; if writing fails, deallocate dset and return.
- dset->write (buf, *optype);
-
- // no failure in opaque_1
- delete dset; dset = NULL;
- delete optype; optype = NULL;
- } //end try block of opaque_2
-
- // catch exceptions thrown in try block of opaque_2
- catch (Exception& E)
- {
- cerr << " FAILED" << endl;
- cerr << " <<< " << "opaque_2: " << E.getFuncName()
- << " - " << E.getDetailMsg() << " >>>" << endl << endl;
- if (dset != NULL)
- delete dset;
- if (optype != NULL)
- delete optype;
- throw E; // propagate the exception
- }
-
- PASSED();
- return 0;
+ // Create a group in the file that was passed in from the caller
+ Group grp = file.createGroup ("typetests");
+
+ /* bitfield_1 */
+ unsigned char buf[32];
+ hsize_t nelmts = sizeof(buf);
+ DataType type;
+ try { // block of bitfield_1
+ // test copying a predefined type
+ type.copy (PredType::STD_B8LE);
+
+ // Test copying a user-defined type using DataType::copy
+ DataType copied_type;
+ copied_type.copy(type);
+
+ // Test copying a user-defined type using DataType::operator=
+ DataType another_copied_type;
+ another_copied_type = type;
+
+ // Test copying a user-defined int type using DataType::operator=
+ IntType orig_int(PredType::STD_B8LE);
+ DataType generic_type;
+ generic_type = orig_int;
+
+ // Test copying an integer predefined type
+ IntType new_int_type(PredType::STD_B8LE);
+
+ // Test copying an int predefined type using DataType::operator=
+ IntType another_int_type;
+ another_int_type = new_int_type;
+
+ DataSpace space (1, &nelmts);
+ dset = new DataSet(grp.createDataSet("bitfield_1", type, space));
+
+ // Fill buffer
+ for (i=0; i<sizeof buf; i++)
+ buf[i] = static_cast<unsigned char>(0xff) ^ static_cast<unsigned char>(i);
+
+ // Write data from buf using all default dataspaces and property list
+ dset->write (buf, type);
+
+ // no failure in bitfield_1, close this dataset
+ delete dset;
+ } // end try block of bitfield_1
+
+ // catch exceptions thrown in try block of bitfield_1
+ catch (Exception& E)
+ {
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << "bitfield_1: " << E.getFuncName()
+ << " - " << E.getDetailMsg() << " >>>" << endl << endl;
+ if (dset != NULL)
+ delete dset;
+ return -1;
+ }
+
+ /* bitfield_2 */
+ nelmts = sizeof(buf)/2;
+ try { // bitfield_2 block
+ type.copy (PredType::STD_B16LE);
+ DataSpace space (1, &nelmts);
+ dset = new DataSet(grp.createDataSet("bitfield_2", type, space));
+
+ // Fill buffer
+ for (i=0; i<sizeof(buf); i++)
+ buf[i] = static_cast<unsigned char>(0xff) ^ static_cast<unsigned char>(i);
+
+ // Write data from buf using all default dataspaces and property
+ // list; if writing fails, deallocate dset and return.
+ dset->write (buf, type);
+
+ // no failure in bitfield_2, close this dataset and reset for
+ // variable reuse
+ delete dset;
+ dset = NULL;
+ } // end try block of bitfield_2
+
+ // catch exceptions thrown in try block of bitfield_2
+ catch (Exception& E)
+ {
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << "bitfield_2: " << E.getFuncName()
+ << " - " << E.getDetailMsg() << " >>>" << endl << endl;
+ if (dset != NULL)
+ delete dset;
+ throw E; // propagate the exception
+ }
+
+ /* opaque_1 */
+ DataType* optype = NULL;
+ try { // opaque_1 block
+ optype = new DataType(H5T_OPAQUE, 1);
+ nelmts = sizeof(buf);
+ DataSpace space (1, &nelmts);
+ optype->setTag ("testing 1-byte opaque type");
+ dset = new DataSet(grp.createDataSet("opaque_1", *optype, space));
+
+ // Fill buffer
+ for (i=0; i<sizeof buf; i++)
+ buf[i] = static_cast<unsigned char>(0xff) ^ static_cast<unsigned char>(i);
+
+ // Write data from buf using all default dataspaces and property
+ // list; if writing fails, deallocate dset and return.
+ dset->write (buf, *optype);
+
+ // no failure in opaque_1
+ delete dset; dset = NULL;
+ delete optype; optype = NULL;
+ } // end try block of opaque_1
+
+ // catch exceptions thrown in try block of opaque_1
+ catch (Exception& E)
+ {
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << "opaque_1: " << E.getFuncName()
+ << " - " << E.getDetailMsg() << " >>>" << endl << endl;
+ if (dset != NULL)
+ delete dset;
+ if (optype != NULL)
+ delete optype;
+ throw E; // propagate the exception
+ }
+
+ /* opaque_2 */
+ try { // block opaque_2
+ nelmts = sizeof(buf)/4;
+ DataSpace space (1, &nelmts);
+ optype = new DataType(H5T_OPAQUE, 4);
+ optype->setTag ("testing 4-byte opaque type");
+ dset = new DataSet(grp.createDataSet("opaque_2", *optype, space));
+
+ // Fill buffer
+ for (i=0; i<sizeof(buf); i++)
+ buf[i] = static_cast<unsigned char>(0xff) ^ static_cast<unsigned char>(i);
+
+ // Write data from buf using all default dataspaces and property
+ // list; if writing fails, deallocate dset and return.
+ dset->write (buf, *optype);
+
+ // no failure in opaque_1
+ delete dset; dset = NULL;
+ delete optype; optype = NULL;
+ } //end try block of opaque_2
+
+ // catch exceptions thrown in try block of opaque_2
+ catch (Exception& E)
+ {
+ cerr << " FAILED" << endl;
+ cerr << " <<< " << "opaque_2: " << E.getFuncName()
+ << " - " << E.getDetailMsg() << " >>>" << endl << endl;
+ if (dset != NULL)
+ delete dset;
+ if (optype != NULL)
+ delete optype;
+ throw E; // propagate the exception
+ }
+
+ PASSED();
+ return 0;
} // end top try block
catch (Exception& E)
{
- return -1;
+ return -1;
}
} // test_types
/*-------------------------------------------------------------------------
- * Function: test_dset
+ * Function: test_virtual
+ *
+ * Purpose: Tests fixed, unlimited, and printf selections in the same
+ * VDS
+ *
+ * Return: Success: 0
+ *
+ * Failure: number of errors
+ *
+ * Programmer: Binh-Minh Ribler
+ * Friday, March 10, 2017
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+const int RANK = 2;
+static herr_t
+test_virtual()
+{
+ SUBTEST("DSetCreatPropList::setVirtual");
+
+ try {
+ // Create DCPLs
+ DSetCreatPropList dcpl;
+ DSetCreatPropList srcdcpl;
+
+ // Set fill value
+ char *fill = NULL;
+ dcpl.setFillValue(PredType::NATIVE_INT, &fill);
+
+ // Set chunk dimensions
+ hsize_t cdims[RANK];
+ cdims[0] = 2;
+ cdims[1] = 2;
+ srcdcpl.setChunk(RANK, cdims);
+
+ // Create memory space
+ hsize_t mdims[RANK];
+ mdims[0] = 10;
+ mdims[1] = 10;
+ DataSpace memspace(RANK, mdims);
+
+ // Get the current layout, should be default, H5D_CONTIGUOUS
+ H5D_layout_t layout = dcpl.getLayout();
+ verify_val(layout, H5D_CONTIGUOUS, "DSetCreatPropList::getLayout", __LINE__, __FILE__);
+
+ // Create fixed mapping
+ hsize_t dims[RANK];
+ dims[0] = 6;
+ dims[1] = 6;
+ DataSpace vspace(RANK, dims, mdims);
+
+ hsize_t start[RANK]; // Hyperslab start
+ hsize_t count[RANK]; // Hyperslab count
+ start[0] = start[1] = 3;
+ count[0] = count[1] = 3;
+ vspace.selectHyperslab(H5S_SELECT_SET, count, start);
+
+ DataSpace srcspace(RANK, count);
+
+ H5std_string src_file = "src_file_map.h5";
+ H5std_string src_dset = "src_dset_fixed";
+ dcpl.setVirtual(vspace, src_file, src_dset, srcspace);
+
+ // Get and verify the new layout
+ layout = dcpl.getLayout();
+ verify_val(layout, H5D_VIRTUAL, "DSetCreatPropList::getLayout", __LINE__, __FILE__);
+
+ PASSED();
+ return 0;
+ } // end top try block
+
+ catch (Exception& E)
+ {
+ return -1;
+ }
+} // test_virtual
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_dset
*
- * Purpose: Tests the dataset interface (H5D)
+ * Purpose Tests the dataset interface (H5D)
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C version)
- * Friday, January 5, 2001
+ * Programmer Binh-Minh Ribler (using C version)
+ * Friday, January 5, 2001
*
* Modifications:
- * Nov 12, 01:
- * - moved h5_cleanup to outside of try block because
- * dataset.h5 cannot be removed until "file" is out of
- * scope and dataset.h5 is closed.
- * Feb 20, 05:
- * - cleanup_dsets took care of the cleanup now.
+ * Nov 12, 01:
+ * - moved h5_cleanup to outside of try block because
+ * dataset.h5 cannot be removed until "file" is out of
+ * scope and dataset.h5 is closed.
+ * Feb 20, 05:
+ * - cleanup_dsets took care of the cleanup now.
*
*-------------------------------------------------------------------------
*/
extern "C"
void test_dset()
{
- hid_t fapl_id;
+ hid_t fapl_id;
fapl_id = h5_fileaccess(); // in h5test.c, returns a file access template
-
- int nerrors=0; // keep track of number of failures occurr
+ int nerrors=0; // keep track of number of failures occur
try
{
- // Use the file access template id to create a file access prop.
- // list object to pass in H5File::H5File
- FileAccPropList fapl(fapl_id);
+ // Use the file access template id to create a file access prop.
+ // list object to pass in H5File::H5File
+ FileAccPropList fapl(fapl_id);
- H5File file(FILE1, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
+ H5File file(FILE1, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
- // Cause the library to emit initial messages
- Group grp = file.createGroup( "emit diagnostics", 0);
- grp.setComment("Causes diagnostic messages to be emitted");
+ // Cause the library to emit initial messages
+ Group grp = file.createGroup( "emit diagnostics", 0);
+ grp.setComment("Causes diagnostic messages to be emitted");
- nerrors += test_create(file) < 0 ? 1:0;
- nerrors += test_simple_io(file) < 0 ? 1:0;
- nerrors += test_tconv(file) < 0 ? 1:0;
- nerrors += test_compression(file) < 0 ? 1:0;
- nerrors += test_nbit_compression(file) < 0 ? 1:0;
- nerrors += test_multiopen (file) < 0 ? 1:0;
- nerrors += test_types(file) < 0 ? 1:0;
+ nerrors += test_create(file) < 0 ? 1:0;
+ nerrors += test_simple_io(file) < 0 ? 1:0;
+ nerrors += test_tconv(file) < 0 ? 1:0;
+ nerrors += test_compression(file) < 0 ? 1:0;
+ nerrors += test_nbit_compression(file) < 0 ? 1:0;
+ nerrors += test_multiopen (file) < 0 ? 1:0;
+ nerrors += test_types(file) < 0 ? 1:0;
+ nerrors += test_virtual() < 0 ? 1:0;
- // Close group "emit diagnostics".
- grp.close();
+ // Close group "emit diagnostics".
+ grp.close();
- // Close the file before testing data size.
- file.close();
+ // Close the file before testing data size.
+ file.close();
- nerrors += test_datasize(fapl) <0 ? 1:0;
+ nerrors += test_datasize(fapl) <0 ? 1:0;
}
catch (Exception& E)
{
- test_report(nerrors, H5std_string(" Dataset"));
+ test_report(nerrors, H5std_string(" Dataset"));
}
// Clean up data file
@@ -1216,11 +1296,11 @@ void test_dset()
/*-------------------------------------------------------------------------
* Function: cleanup_dsets
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: (use C version)
+ * Programmer (use C version)
*
* Modifications:
*
diff --git a/c++/test/h5cpputil.cpp b/c++/test/h5cpputil.cpp
index 95d29a8..f0c403c 100644
--- a/c++/test/h5cpputil.cpp
+++ b/c++/test/h5cpputil.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -37,18 +35,18 @@ using namespace H5;
#include "h5cpputil.h" // C++ utilility header file
/*-------------------------------------------------------------------------
- * Function: test_report
+ * Function: test_report
*
- * Purpose: Prints out the number of errors for the tests indicated
- * by 'testname,' if there were any failures occurred. If
- * no failure, test_report prints out the tests passed message.
+ * Purpose Prints out the number of errors for the tests indicated
+ * by 'testname,' if there were any failures occurred. If
+ * no failure, test_report prints out the tests passed message.
*
- * Return: if any failure has occurred: 1
+ * Return if any failure has occurred: 1
*
- * if no failure occurs: 0
+ * if no failure occurs: 0
*
- * Programmer: Binh-Minh Ribler (using C code segment for reporting tests)
- * Friday, February 6, 2001
+ * Programmer Binh-Minh Ribler (using C code segment for reporting tests)
+ * Friday, February 6, 2001
*
* Modifications:
*
@@ -59,12 +57,12 @@ int test_report( int nerrors, const H5std_string& testname )
if (nerrors)
{
nerrors = MAX(1, nerrors);
- if (1 == nerrors)
- cerr << "***** " << nerrors << testname
- << " TEST FAILED! *****" << endl;
- else
- cerr << "***** " << nerrors << testname
- << " TESTS FAILED! *****" << endl;
+ if (1 == nerrors)
+ cerr << "***** " << nerrors << testname
+ << " TEST FAILED! *****" << endl;
+ else
+ cerr << "***** " << nerrors << testname
+ << " TESTS FAILED! *****" << endl;
return 1;
}
else
@@ -75,68 +73,68 @@ int test_report( int nerrors, const H5std_string& testname )
}
/*-------------------------------------------------------------------------
- * Function: issue_fail_msg
+ * Function: issue_fail_msg
*
- * Purpose: Displays that a function has failed with its location.
+ * Purpose Displays that a function has failed with its location.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (copied and modified macro CHECK from C)
- * Monday, December 20, 2004
+ * Programmer Binh-Minh Ribler (copied and modified macro CHECK from C)
+ * Monday, December 20, 2004
*
*-------------------------------------------------------------------------
*/
void issue_fail_msg(const char* where, int line, const char* file_name,
- const char* message)
+ const char* message)
{
if (GetTestVerbosity()>=VERBO_HI)
{
- cerr << endl;
+ cerr << endl;
cerr << ">>> FAILED in " << where << " at line " << line
<< " in " << file_name << " - " << message << endl << endl;
}
}
/*-------------------------------------------------------------------------
- * Function: issue_fail_msg
+ * Function: issue_fail_msg
*
- * Purpose: Displays that a function has failed with its location.
+ * Purpose Displays that a function has failed with its location.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (copied and modified macro CHECK from C)
- * Monday, December 20, 2004
+ * Programmer Binh-Minh Ribler (copied and modified macro CHECK from C)
+ * Monday, December 20, 2004
*
*-------------------------------------------------------------------------
*/
void issue_fail_msg(const char* where, int line, const char* file_name,
- const char* func_name, const char* message)
+ const char* func_name, const char* message)
{
if (GetTestVerbosity()>=VERBO_HI)
{
- cerr << endl;
+ cerr << endl;
cerr << ">>> FAILED in " << where << ": " << func_name << endl <<
- " at line " << line << " in " << file_name << endl <<
- " C library detail: " << message << endl << endl;
+ " at line " << line << " in " << file_name << endl <<
+ " C library detail: " << message << endl << endl;
}
}
/*-------------------------------------------------------------------------
- * Function: check_values
+ * Function: check_values
*
- * Purpose: Checks a read value against the written value. If they are
- * different, the function will print out a message and the
- * different values. This function is made to reuse the code
- * segment that is used in various places throughout
- * the test code. Where the C version of this code segment
- * "goto error," this function will return -1.
+ * Purpose Checks a read value against the written value. If they are
+ * different, the function will print out a message and the
+ * different values. This function is made to reuse the code
+ * segment that is used in various places throughout
+ * the test code. Where the C version of this code segment
+ * "goto error," this function will return -1.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C code segment for checking values)
- * Friday, February 6, 2001
+ * Programmer Binh-Minh Ribler (using C code segment for checking values)
+ * Friday, February 6, 2001
*
* Modifications:
*
@@ -146,26 +144,26 @@ int check_values (hsize_t i, hsize_t j, int apoint, int acheck)
{
if (apoint != acheck)
{
- cerr << " Read different values than written.\n" << endl;
- cerr << " At index " << static_cast<unsigned long>(i) << "," <<
- static_cast<unsigned long>(j) << endl;
- return -1;
+ cerr << " Read different values than written.\n" << endl;
+ cerr << " At index " << static_cast<unsigned long>(i) << "," <<
+ static_cast<unsigned long>(j) << endl;
+ return -1;
}
return 0;
} // check_values
/*-------------------------------------------------------------------------
- * Function: check_values
+ * Function: check_values
*
- * Purpose: Checks a char string pointer for NULL. If it is NULL,
- * the function will print out a message
+ * Purpose Checks a char string pointer for NULL. If it is NULL,
+ * the function will print out a message
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler (using C code segment for checking values)
- * Friday, September 16, 2016
+ * Programmer Binh-Minh Ribler (using C code segment for checking values)
+ * Friday, September 16, 2016
*
*-------------------------------------------------------------------------
*/
@@ -173,26 +171,26 @@ void check_values(const char *value, const char* msg, int line, const char* file
{
if (value == NULL)
{
- cerr << endl;
+ cerr << endl;
cerr << "*** ERROR: " << msg << ", at line " << line << endl;
- IncTestNumErrs();
- throw TestFailedException(file_name, msg);
+ IncTestNumErrs();
+ throw TestFailedException(file_name, msg);
}
}
/*-------------------------------------------------------------------------
- * Function: verify_val (const char*, const char*,...)
+ * Function: verify_val (const char*, const char*,...)
*
- * Purpose: Compares two character strings. If they are
- * different, the function will print out a message and the
- * different values.
+ * Purpose Compares two character strings. If they are
+ * different, the function will print out a message and the
+ * different values.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * May 2, 2010
+ * Programmer Binh-Minh Ribler
+ * May 2, 2010
*
* Modifications:
*
@@ -225,12 +223,12 @@ InvalidActionException::InvalidActionException():Exception(){}
//--------------------------------------------------------------------------
// Function: InvalidActionException overloaded constructor
//
-// Purpose: Creates an InvalidActionException with the name of the function,
+// Purpose Creates an InvalidActionException with the name of the function,
// which the failure should have occurred but didn't, and a
-// message explaining why it should fail.
+// message explaining why it should fail.
// Parameters
-// func - IN: Name of the function where failure should occur
-// message - IN: Message
+// func - IN: Name of the function where failure should occur
+// message - IN: Message
//--------------------------------------------------------------------------
InvalidActionException::InvalidActionException(const H5std_string func, const H5std_string message) : Exception(func, message) {}
@@ -247,12 +245,12 @@ TestFailedException::TestFailedException():Exception(){}
//--------------------------------------------------------------------------
// Function: TestFailedException overloaded constructor
//
-// Purpose: Creates an TestFailedException with the name of the function,
+// Purpose Creates an TestFailedException with the name of the function,
// which the failure should have occurred but didn't, and a
-// message explaining why it should fail.
+// message explaining why it should fail.
// Parameters
-// func - IN: Name of the function where failure should occur
-// message - IN: Message
+// func - IN: Name of the function where failure should occur
+// message - IN: Message
//--------------------------------------------------------------------------
TestFailedException::TestFailedException(const H5std_string func, const H5std_string message) : Exception(func, message) {}
diff --git a/c++/test/h5cpputil.h b/c++/test/h5cpputil.h
index f8aaec7..18fd44f 100644
--- a/c++/test/h5cpputil.h
+++ b/c++/test/h5cpputil.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -37,22 +35,22 @@ int check_values (hsize_t i, hsize_t j, int apoint, int acheck);
void check_values(const char *value, const char* msg, int line, const char* file_name);
int test_report (int, const H5std_string&);
void issue_fail_msg(const char* where, int line, const char* file_name,
- const char* message="");
+ const char* message="");
void issue_fail_msg(const char* where, int line, const char* file_name,
- const char* func_name, const char* message);
+ const char* func_name, const char* message);
class InvalidActionException : public Exception {
public:
- InvalidActionException(const H5std_string func_name, const H5std_string message = DEFAULT_MSG);
- InvalidActionException();
- virtual ~InvalidActionException() throw();
+ InvalidActionException(const H5std_string func_name, const H5std_string message = DEFAULT_MSG);
+ InvalidActionException();
+ virtual ~InvalidActionException() throw();
};
class TestFailedException : public Exception {
public:
- TestFailedException(const H5std_string func_name, const H5std_string message = DEFAULT_MSG);
- TestFailedException();
- virtual ~TestFailedException() throw();
+ TestFailedException(const H5std_string func_name, const H5std_string message = DEFAULT_MSG);
+ TestFailedException();
+ virtual ~TestFailedException() throw();
};
// Overloaded/Template functions to verify values and display proper info
@@ -65,18 +63,18 @@ template <class Type1, class Type2>
{
if (GetTestVerbosity()>=VERBO_HI)
{
- cerr << endl;
+ cerr << endl;
cerr << " Call to routine: " << where << " at line " << line
- << " in " << file_name << " had value " << x << endl;
+ << " in " << file_name << " had value " << x << endl;
}
if (x != value)
{
- cerr << endl;
+ cerr << endl;
cerr << "*** UNEXPECTED VALUE from " << where << " should be "
- << value << ", but is " << x << " at line " << line
- << " in " << file_name << endl;
- IncTestNumErrs();
- throw TestFailedException(where, "");
+ << value << ", but is " << x << " at line " << line
+ << " in " << file_name << endl;
+ IncTestNumErrs();
+ throw TestFailedException(where, "");
}
}
@@ -85,12 +83,12 @@ template <class Type1, class Type2>
{
if (x != value)
{
- cerr << endl;
+ cerr << endl;
cerr << "*** UNEXPECTED VALUE: " << file_name << ":line " << line
- << ": " << msg << " different: " << x << ", should be " << value
- << endl;
- IncTestNumErrs();
- throw TestFailedException(file_name, msg);
+ << ": " << msg << " different: " << x << ", should be " << value
+ << endl;
+ IncTestNumErrs();
+ throw TestFailedException(file_name, msg);
}
}
@@ -99,17 +97,17 @@ template <class Type1, class Type2>
{
if (GetTestVerbosity()>=VERBO_HI)
{
- cerr << endl;
+ cerr << endl;
cerr << " Call to routine: " << where << " at line " << line
- << " in " << file_name << " had value " << x << endl;
+ << " in " << file_name << " had value " << x << endl;
}
if (x == value)
{
- cerr << endl;
+ cerr << endl;
cerr << "*** UNEXPECTED VALUE from " << where << " should not be "
- << value << " at line " << line << " in " << file_name << endl;
- IncTestNumErrs();
- throw TestFailedException(where, "");
+ << value << " at line " << line << " in " << file_name << endl;
+ IncTestNumErrs();
+ throw TestFailedException(where, "");
}
}
@@ -118,10 +116,10 @@ template <class Type1, class Type2>
{
if (x == value)
{
- cerr << endl;
+ cerr << endl;
cerr << "*** Function " << msg << " FAILED at line " << line << endl;
- IncTestNumErrs();
- throw TestFailedException(file_name, msg);
+ IncTestNumErrs();
+ throw TestFailedException(file_name, msg);
}
}
@@ -130,12 +128,12 @@ template <class Type1, class Type2>
{
if (x == value)
{
- cerr << endl;
- cerr << "*** UNEXPECTED FLOAT VALUE: " << file_name << ":line " << line
- << ": " << msg << " different: " << x << ", should be " << value
- << " (epsilon=" << epsilon << ")" << endl;
- IncTestNumErrs();
- throw TestFailedException(file_name, msg);
+ cerr << endl;
+ cerr << "*** UNEXPECTED FLOAT VALUE: " << file_name << ":line " << line
+ << ": " << msg << " different: " << x << ", should be " << value
+ << " (epsilon=" << epsilon << ")" << endl;
+ IncTestNumErrs();
+ throw TestFailedException(file_name, msg);
}
}
diff --git a/c++/test/tarray.cpp b/c++/test/tarray.cpp
index 7fe2e6b..a1de9c5 100644
--- a/c++/test/tarray.cpp
+++ b/c++/test/tarray.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -34,6 +32,7 @@ using namespace H5;
#include "h5cpputil.h" // C++ utilility header file
const H5std_string FILENAME("tarray.h5");
+const H5std_string ARRAYTYPE_NAME("/Array type 1");
const int SPACE1_RANK = 1;
const hsize_t SPACE1_DIM1 = 4;
const int ARRAY1_RANK = 1;
@@ -52,12 +51,12 @@ typedef enum int_t {
/*-------------------------------------------------------------------------
* Function: test_array_compound_array
*
- * Purpose: Tests 1-D array of compound datatypes (with array fields)
+ * Purpose Tests 1-D array of compound datatypes (with array fields)
*
- * Return: None.
+ * Return None.
*
- * Programmer: Binh-Minh Ribler (using C version)
- * January, 2016
+ * Programmer Binh-Minh Ribler (using C version)
+ * January, 2016
*
* Modifications:
*
@@ -75,7 +74,7 @@ static void test_array_compound_array()
hsize_t sdims1[] = {SPACE1_DIM1};
hsize_t tdims1[] = {ARRAY1_DIM1};
int nmemb; // Number of compound members
- int ii; // counting variables
+ int ii; // counting variables
hsize_t idxi, idxj, idxk; // dimension indicing variables
H5T_class_t mclass; // Datatype class for field
@@ -84,180 +83,193 @@ static void test_array_compound_array()
for (idxj = 0; idxj < ARRAY1_DIM1; idxj++) {
wdata[idxi][idxj].i = idxi * 10 + idxj;
for(idxk = 0; idxk < ARRAY1_DIM1; idxk++)
- {
+ {
float temp = idxi * 10.0 + idxj * 2.5 + idxk;
wdata[idxi][idxj].f[idxk] = temp;
- }
+ }
} // end for
try {
- // Create File
- H5File file1(FILENAME, H5F_ACC_TRUNC);
+ // Create File
+ H5File file1(FILENAME, H5F_ACC_TRUNC);
- // Create dataspace for datasets
- DataSpace space(SPACE1_RANK, sdims1, NULL);
+ // Create dataspace for datasets
+ DataSpace space(SPACE1_RANK, sdims1, NULL);
- /*
- * Create an array datatype of compounds, arrtype. Each compound
- * datatype, comptype, contains an integer and an array of floats,
- * arrfltype.
- */
+ /*
+ * Create an array datatype of compounds, arrtype. Each compound
+ * datatype, comptype, contains an integer and an array of floats,
+ * arrfltype.
+ */
- // Create a compound datatype
- CompType comptype(sizeof(s1_t));
+ // Create a compound datatype
+ CompType comptype(sizeof(s1_t));
- // Insert integer field
- comptype.insertMember("i", HOFFSET(s1_t, i), PredType::NATIVE_INT);
+ // Insert integer field
+ comptype.insertMember("i", HOFFSET(s1_t, i), PredType::NATIVE_INT);
- // Create an array of floats datatype
- ArrayType arrfltype(PredType::NATIVE_FLOAT, ARRAY1_RANK, tdims1);
+ // Create an array of floats datatype
+ ArrayType arrfltype(PredType::NATIVE_FLOAT, ARRAY1_RANK, tdims1);
- // Insert float array field
- comptype.insertMember("f", HOFFSET(s1_t, f), arrfltype);
+ // Insert float array field
+ comptype.insertMember("f", HOFFSET(s1_t, f), arrfltype);
- // Close array of floats field datatype
- arrfltype.close();
+ // Close array of floats field datatype
+ arrfltype.close();
- // Create an array datatype of the compound datatype
- ArrayType arrtype(comptype, ARRAY1_RANK, tdims1);
+ // Create an array datatype of the compound datatype
+ ArrayType arrtype(comptype, ARRAY1_RANK, tdims1);
- // Close compound datatype comptype
- comptype.close();
+ // Close compound datatype comptype
+ comptype.close();
- // Create a dataset
- DataSet dataset = file1.createDataSet("Dataset1", arrtype, space);
+ // Create a dataset
+ DataSet dataset = file1.createDataSet("Dataset1", arrtype, space);
- // Write dataset to disk
- dataset.write(wdata, arrtype);
+ // Write dataset to disk
+ dataset.write(wdata, arrtype);
- // Close all
- dataset.close();
- arrtype.close();
- space.close();
- file1.close();
+ // Test opening ArrayType with opening constructor (Dec 2016)
- // Re-open file
- file1.openFile(FILENAME, H5F_ACC_RDONLY);
+ // Commit the arrtype to give it a name
+ arrtype.commit(file1, ARRAYTYPE_NAME);
- // Open the dataset
- dataset = file1.openDataSet("Dataset1");
+ // Close it, then re-open with the opening constructor
+ arrtype.close();
+ ArrayType named_type(file1, ARRAYTYPE_NAME);
- /*
- * Check the datatype array of compounds
- */
+ // Get and verify the type's name
+ H5std_string type_name = named_type.getObjName();
+ verify_val(type_name, ARRAYTYPE_NAME, "DataType::getObjName tests constructor", __LINE__, __FILE__);
+ named_type.close();
- // Verify that it is an array of compounds
- DataType dstype = dataset.getDataType();
- mclass = dstype.getClass();
- verify_val(mclass==H5T_ARRAY, true, "f2_type.getClass", __LINE__, __FILE__);
+ // Close all
+ dataset.close();
+ space.close();
+ file1.close();
- dstype.close();
+ // Re-open file
+ file1.openFile(FILENAME, H5F_ACC_RDONLY);
- // Get the array datatype to check
- ArrayType atype_check = dataset.getArrayType();
+ // Open the dataset
+ dataset = file1.openDataSet("Dataset1");
- // Check the array rank
- int ndims = atype_check.getArrayNDims();
- verify_val(ndims, ARRAY1_RANK, "atype_check.getArrayNDims", __LINE__, __FILE__);
+ /*
+ * Check the datatype array of compounds
+ */
- // Get the array dimensions
- hsize_t rdims1[H5S_MAX_RANK];
- atype_check.getArrayDims(rdims1);
+ // Verify that it is an array of compounds
+ DataType dstype = dataset.getDataType();
+ mclass = dstype.getClass();
+ verify_val(mclass==H5T_ARRAY, true, "f2_type.getClass", __LINE__, __FILE__);
- // Check the array dimensions
- for (ii =0; ii <ndims; ii++)
- if (rdims1[ii]!=tdims1[ii]) {
- TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%zd, tdims1[%d]=z%d\n", ii, rdims1[ii], ii, tdims1[ii]);
+ dstype.close();
+
+ // Get the array datatype to check
+ ArrayType atype_check = dataset.getArrayType();
+
+ // Check the array rank
+ int ndims = atype_check.getArrayNDims();
+ verify_val(ndims, ARRAY1_RANK, "atype_check.getArrayNDims", __LINE__, __FILE__);
+
+ // Get the array dimensions
+ hsize_t rdims1[H5S_MAX_RANK];
+ atype_check.getArrayDims(rdims1);
+
+ // Check the array dimensions
+ for (ii =0; ii <ndims; ii++)
+ if (rdims1[ii]!=tdims1[ii]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%zd, tdims1[%d]=z%d\n", ii, rdims1[ii], ii, tdims1[ii]);
continue;
} // end if
- // Test ArrayType::ArrayType(const hid_t existing_id)
- ArrayType new_arrtype(atype_check.getId());
+ // Test ArrayType::ArrayType(const hid_t existing_id)
+ ArrayType new_arrtype(atype_check.getId());
- // Check the array rank
- ndims = new_arrtype.getArrayNDims();
- verify_val(ndims, ARRAY1_RANK, "new_arrtype.getArrayNDims", __LINE__, __FILE__);
+ // Check the array rank
+ ndims = new_arrtype.getArrayNDims();
+ verify_val(ndims, ARRAY1_RANK, "new_arrtype.getArrayNDims", __LINE__, __FILE__);
- // Get the array dimensions
- new_arrtype.getArrayDims(rdims1);
+ // Get the array dimensions
+ new_arrtype.getArrayDims(rdims1);
- // Check the array dimensions
- for (ii = 0; ii < ndims; ii++)
- if (rdims1[ii] != tdims1[ii]) {
- TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%zd, tdims1[%d]=%zd\n", ii, rdims1[ii], ii, tdims1[ii]);
+ // Check the array dimensions
+ for (ii = 0; ii < ndims; ii++)
+ if (rdims1[ii] != tdims1[ii]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%zd, tdims1[%d]=%zd\n", ii, rdims1[ii], ii, tdims1[ii]);
continue;
} // end if
- /*
- * Check the compound datatype and the array of floats datatype
- * in the compound.
- */
- // Get the compound datatype, which is the base datatype of the
- // array datatype atype_check.
- DataType base_type = atype_check.getSuper();
- mclass = base_type.getClass();
- verify_val(mclass==H5T_COMPOUND, true, "atype_check.getClass", __LINE__, __FILE__);
-
- // Verify the compound datatype info
- CompType ctype_check(base_type.getId());
- base_type.close();
-
- // Check the number of members
- nmemb = ctype_check.getNmembers();
- verify_val(nmemb, 2, "ctype_check.getNmembers", __LINE__, __FILE__);
-
- // Check the 2nd field's name
- H5std_string field2_name = ctype_check.getMemberName(1);
- if (HDstrcmp(field2_name.c_str(),"f") != 0)
- TestErrPrintf("Compound field name doesn't match!, field2_name=%s\n",field2_name.c_str());
-
- // Get the 2nd field's datatype
- DataType f2_type = ctype_check.getMemberDataType(1);
-
- // Get the 2nd field's class, this 2nd field should have an array type
- mclass = f2_type.getClass();
- verify_val(mclass==H5T_ARRAY, true, "f2_type.getClass", __LINE__, __FILE__);
- f2_type.close();
-
- // Get the 2nd field, array of floats datatype, to check
- ArrayType f2_atype_check = ctype_check.getMemberArrayType(1);
-
- // Check the array rank
- ndims = f2_atype_check.getArrayNDims();
- verify_val(ndims, ARRAY1_RANK, "f2_atype_check.getArrayNDims", __LINE__, __FILE__);
-
- // Get the array dimensions
- HDmemset(rdims1, 0, H5S_MAX_RANK);
- f2_atype_check.getArrayDims(rdims1);
-
- // Check the array dimensions
- for (ii = 0; ii < ndims; ii++)
- if (rdims1[ii] != tdims1[ii]) {
- TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%zd, tdims1[%d]=%zd\n", ii, rdims1[ii], ii, tdims1[ii]);
- continue;
- } // end if
-
- // Close done datatypes
- f2_atype_check.close();
- ctype_check.close();
-
- // Read dataset from disk
- dataset.read(rdata, atype_check);
-
- // Compare data read in
- for (idxi = 0; idxi < SPACE1_DIM1; idxi++) {
- for (idxj = 0; idxj < ARRAY1_DIM1; idxj++) {
- if (wdata[idxi][idxj].i != rdata[idxi][idxj].i) {
- TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].i=%d, rdata[%d][%d].i=%d\n",idxi,idxj,wdata[idxi][idxj].i,idxi,idxj,rdata[idxi][idxj].i);
- continue;
- } // end if
- } // end for
- } // end for
-
- // Close all
- atype_check.close();
- dataset.close();
- file1.close();
+ /*
+ * Check the compound datatype and the array of floats datatype
+ * in the compound.
+ */
+ // Get the compound datatype, which is the base datatype of the
+ // array datatype atype_check.
+ DataType base_type = atype_check.getSuper();
+ mclass = base_type.getClass();
+ verify_val(mclass==H5T_COMPOUND, true, "atype_check.getClass", __LINE__, __FILE__);
+
+ // Verify the compound datatype info
+ CompType ctype_check(base_type.getId());
+ base_type.close();
+
+ // Check the number of members
+ nmemb = ctype_check.getNmembers();
+ verify_val(nmemb, 2, "ctype_check.getNmembers", __LINE__, __FILE__);
+
+ // Check the 2nd field's name
+ H5std_string field2_name = ctype_check.getMemberName(1);
+ if (HDstrcmp(field2_name.c_str(),"f") != 0)
+ TestErrPrintf("Compound field name doesn't match!, field2_name=%s\n",field2_name.c_str());
+
+ // Get the 2nd field's datatype
+ DataType f2_type = ctype_check.getMemberDataType(1);
+
+ // Get the 2nd field's class, this 2nd field should have an array type
+ mclass = f2_type.getClass();
+ verify_val(mclass==H5T_ARRAY, true, "f2_type.getClass", __LINE__, __FILE__);
+ f2_type.close();
+
+ // Get the 2nd field, array of floats datatype, to check
+ ArrayType f2_atype_check = ctype_check.getMemberArrayType(1);
+
+ // Check the array rank
+ ndims = f2_atype_check.getArrayNDims();
+ verify_val(ndims, ARRAY1_RANK, "f2_atype_check.getArrayNDims", __LINE__, __FILE__);
+
+ // Get the array dimensions
+ HDmemset(rdims1, 0, H5S_MAX_RANK);
+ f2_atype_check.getArrayDims(rdims1);
+
+ // Check the array dimensions
+ for (ii = 0; ii < ndims; ii++)
+ if (rdims1[ii] != tdims1[ii]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%zd, tdims1[%d]=%zd\n", ii, rdims1[ii], ii, tdims1[ii]);
+ continue;
+ } // end if
+
+ // Close done datatypes
+ f2_atype_check.close();
+ ctype_check.close();
+
+ // Read dataset from disk
+ dataset.read(rdata, atype_check);
+
+ // Compare data read in
+ for (idxi = 0; idxi < SPACE1_DIM1; idxi++) {
+ for (idxj = 0; idxj < ARRAY1_DIM1; idxj++) {
+ if (wdata[idxi][idxj].i != rdata[idxi][idxj].i) {
+ TestErrPrintf("Array data information doesn't match!, wdata[%d][%d].i=%d, rdata[%d][%d].i=%d\n",idxi,idxj,wdata[idxi][idxj].i,idxi,idxj,rdata[idxi][idxj].i);
+ continue;
+ } // end if
+ } // end for
+ } // end for
+
+ // Close all
+ atype_check.close();
+ dataset.close();
+ file1.close();
PASSED();
} // end of try block
catch (Exception& E)
@@ -271,15 +283,15 @@ static void test_array_compound_array()
/*-------------------------------------------------------------------------
* Function: test_array_assignment
*
- * Purpose: Tests the operator=
+ * Purpose Tests the operator=
*
- * Return: None.
+ * Return None.
*
- * Programmer: Binh-Minh Ribler (using C version)
- * March, 2016
+ * Programmer Binh-Minh Ribler (using C version)
+ * March, 2016
*
* Description:
- * Used user's sample code in HDFFV-9562
+ * Used user's sample code in HDFFV-9562
*
* Modifications:
*
@@ -303,35 +315,35 @@ static void test_array_assignment()
SUBTEST("ArrayType::operator=");
try {
- // Create File
- H5File file1(FILENAME, H5F_ACC_TRUNC);
+ // Create File
+ H5File file1(FILENAME, H5F_ACC_TRUNC);
- // Create dataspace for datasets
- DataSpace space(SPACE1_RANK, sdims1, NULL);
+ // Create dataspace for datasets
+ DataSpace space(SPACE1_RANK, sdims1, NULL);
- /*
- * Create an array datatype of compounds, arrtype. Each compound
- * datatype, comptype, contains an integer and an array of floats,
- * arrfltype.
- */
+ /*
+ * Create an array datatype of compounds, arrtype. Each compound
+ * datatype, comptype, contains an integer and an array of floats,
+ * arrfltype.
+ */
- // Create a compound datatype
- CompType comptype(static_cast<size_t>(24));
+ // Create a compound datatype
+ CompType comptype(static_cast<size_t>(24));
- // Insert integer field
- comptype.insertMember("i", 0, PredType::NATIVE_INT);
+ // Insert integer field
+ comptype.insertMember("i", 0, PredType::NATIVE_INT);
- // Insert float array field
- comptype.insertMember("a", 4, getArr());
+ // Insert float array field
+ comptype.insertMember("a", 4, getArr());
- // Create a dataset
- DataSet dataset = file1.createDataSet("Dataset1", comptype, space);
+ // Create a dataset
+ DataSet dataset = file1.createDataSet("Dataset1", comptype, space);
- // Close all
- dataset.close();
- comptype.close();
- space.close();
- file1.close();
+ // Close all
+ dataset.close();
+ comptype.close();
+ space.close();
+ file1.close();
PASSED();
} // end of try block
@@ -345,12 +357,12 @@ static void test_array_assignment()
/*-------------------------------------------------------------------------
* Function: test_array_info
*
- * Purpose: Tests getting array information using the const methods.
+ * Purpose Tests getting array information using the const methods.
*
- * Return: None.
+ * Return None.
*
- * Programmer: Binh-Minh Ribler
- * April, 2016
+ * Programmer Binh-Minh Ribler
+ * April, 2016
*
* Modifications:
*
@@ -368,7 +380,7 @@ static void test_array_info()
hsize_t sdims1[] = {SPACE1_DIM1};
hsize_t tdims1[] = {ARRAY1_DIM1};
int nmemb; // Number of compound members
- int ii; // counting variables
+ int ii; // counting variables
hsize_t idxi, idxj, idxk; // dimension indicing variables
H5T_class_t mclass; // Datatype class for field
@@ -377,84 +389,84 @@ static void test_array_info()
for (idxj = 0; idxj < ARRAY1_DIM1; idxj++) {
wdata[idxi][idxj].i = idxi * 10 + idxj;
for(idxk = 0; idxk < ARRAY1_DIM1; idxk++)
- {
+ {
float temp = idxi * 10.0 + idxj * 2.5 + idxk;
wdata[idxi][idxj].f[idxk] = temp;
- }
+ }
} // end for
try {
- // Create File
- H5File file1(FILENAME, H5F_ACC_TRUNC);
+ // Create File
+ H5File file1(FILENAME, H5F_ACC_TRUNC);
- // Create dataspace for datasets
- DataSpace space(SPACE1_RANK, sdims1, NULL);
+ // Create dataspace for datasets
+ DataSpace space(SPACE1_RANK, sdims1, NULL);
- /*
- * Create some array datatypes, then close the file.
- */
+ /*
+ * Create some array datatypes, then close the file.
+ */
- // Create an array of floats datatype
- ArrayType arrfltype(PredType::NATIVE_FLOAT, ARRAY1_RANK, tdims1);
+ // Create an array of floats datatype
+ ArrayType arrfltype(PredType::NATIVE_FLOAT, ARRAY1_RANK, tdims1);
- // Create an array datatype of the compound datatype
- ArrayType arrtype(PredType::NATIVE_UINT, ARRAY1_RANK, tdims1);
+ // Create an array datatype of the compound datatype
+ ArrayType arrtype(PredType::NATIVE_UINT, ARRAY1_RANK, tdims1);
- // Create a dataset
- DataSet dataset = file1.createDataSet("Dataset1", arrtype, space);
+ // Create a dataset
+ DataSet dataset = file1.createDataSet("Dataset1", arrtype, space);
- // Write dataset to disk
- dataset.write(wdata, arrtype);
+ // Write dataset to disk
+ dataset.write(wdata, arrtype);
- // Close array of floats field datatype
- arrfltype.close();
+ // Close array of floats field datatype
+ arrfltype.close();
- // Close all
- dataset.close();
- arrtype.close();
- space.close();
- file1.close();
+ // Close all
+ dataset.close();
+ arrtype.close();
+ space.close();
+ file1.close();
- // Re-open file
- file1.openFile(FILENAME, H5F_ACC_RDONLY);
+ // Re-open file
+ file1.openFile(FILENAME, H5F_ACC_RDONLY);
- // Open the dataset
- dataset = file1.openDataSet("Dataset1");
+ // Open the dataset
+ dataset = file1.openDataSet("Dataset1");
- /*
- * Check the datatype array of compounds
- */
+ /*
+ * Check the datatype array of compounds
+ */
- // Verify that it is an array of compounds
- DataType dstype = dataset.getDataType();
- mclass = dstype.getClass();
- verify_val(mclass==H5T_ARRAY, true, "f2_type.getClass", __LINE__, __FILE__);
+ // Verify that it is an array of compounds
+ DataType dstype = dataset.getDataType();
+ mclass = dstype.getClass();
+ verify_val(mclass==H5T_ARRAY, true, "f2_type.getClass", __LINE__, __FILE__);
- dstype.close();
+ dstype.close();
- { // Let atype_check go out of scope
- // Get the array datatype, declared as const
- const ArrayType atype_check = dataset.getArrayType();
+ { // Let atype_check go out of scope
+ // Get the array datatype, declared as const
+ const ArrayType atype_check = dataset.getArrayType();
- // Check the array rank with the const method
- int ndims = atype_check.getArrayNDims();
- verify_val(ndims, ARRAY1_RANK, "atype_check.getArrayNDims", __LINE__, __FILE__);
+ // Check the array rank with the const method
+ int ndims = atype_check.getArrayNDims();
+ verify_val(ndims, ARRAY1_RANK, "atype_check.getArrayNDims", __LINE__, __FILE__);
- // Get the array dimensions with the const method
- hsize_t rdims1[H5S_MAX_RANK];
- atype_check.getArrayDims(rdims1);
+ // Get the array dimensions with the const method
+ hsize_t rdims1[H5S_MAX_RANK];
+ atype_check.getArrayDims(rdims1);
- // Check the array dimensions
- for (ii =0; ii <ndims; ii++)
- if (rdims1[ii]!=tdims1[ii]) {
- TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%zd, tdims1[%d]=z%d\n", ii, rdims1[ii], ii, tdims1[ii]);
+ // Check the array dimensions
+ for (ii =0; ii <ndims; ii++)
+ if (rdims1[ii]!=tdims1[ii]) {
+ TestErrPrintf("Array dimension information doesn't match!, rdims1[%d]=%zd, tdims1[%d]=z%d\n", ii, rdims1[ii], ii, tdims1[ii]);
continue;
} // end if
- }
+ }
- // Close all
- dataset.close();
- file1.close();
+ // Close all
+ dataset.close();
+ file1.close();
PASSED();
} // end of try block
catch (Exception& E)
@@ -489,14 +501,14 @@ void test_array()
/*-------------------------------------------------------------------------
- * Function: cleanup_array
+ * Function: cleanup_array
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: Binh-Minh Ribler (using C version)
- * January, 2016
+ * Programmer Binh-Minh Ribler (using C version)
+ * January, 2016
*
* Modifications:
*
diff --git a/c++/test/tattr.cpp b/c++/test/tattr.cpp
index bc46d0f..d1a0d67 100644
--- a/c++/test/tattr.cpp
+++ b/c++/test/tattr.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -34,14 +32,14 @@ using namespace H5;
#include "h5test.h"
#include "h5cpputil.h" // C++ utilility header file
-const H5std_string FILE_BASIC("tattr_basic.h5");
-const H5std_string FILE_COMPOUND("tattr_compound.h5");
-const H5std_string FILE_SCALAR("tattr_scalar.h5");
-const H5std_string FILE_MULTI("tattr_multi.h5");
-const H5std_string FILE_DTYPE("tattr_dtype.h5");
-const H5std_string ATTR_TMP_NAME("temp_attr_name");
-const H5std_string FATTR_TMP_NAME("temp_fattr_name");
-const size_t ATTR_MAX_DIMS = 7;
+const H5std_string FILE_BASIC("tattr_basic.h5");
+const H5std_string FILE_COMPOUND("tattr_compound.h5");
+const H5std_string FILE_SCALAR("tattr_scalar.h5");
+const H5std_string FILE_MULTI("tattr_multi.h5");
+const H5std_string FILE_DTYPE("tattr_dtype.h5");
+const H5std_string ATTR_TMP_NAME("temp_attr_name");
+const H5std_string FATTR_TMP_NAME("temp_fattr_name");
+const size_t ATTR_MAX_DIMS = 7;
/* 3-D dataset with fixed dimensions */
const int SPACE1_RANK = 3;
@@ -97,7 +95,7 @@ struct attr4_struct {
const H5std_string ATTR5_NAME("Attr5");
const int ATTR5_RANK = 0;
-float attr_data5 = (float)-5.123; // Test data for 5th attribute
+float attr_data5 = (float)-5.123; // Test data for 5th attribute
/* Info for another attribute */
const H5std_string ATTR1A_NAME("Attr1_a");
@@ -121,117 +119,117 @@ static void test_attr_basic_write()
SUBTEST("Basic Attribute Writing Functions");
try {
- // Create file
- H5File fid1 (FILE_BASIC, H5F_ACC_TRUNC);
+ // Create file
+ H5File fid1 (FILE_BASIC, H5F_ACC_TRUNC);
- // Create dataspace for dataset
- DataSpace ds_space (SPACE1_RANK, dims1);
+ // Create dataspace for dataset
+ DataSpace ds_space (SPACE1_RANK, dims1);
- /*
- * Test attribute with dataset
- */
+ /*
+ * Test attribute with dataset
+ */
- // Create a dataset
- DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR, ds_space);
+ // Create a dataset
+ DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR, ds_space);
- // Create dataspace for attribute
- DataSpace att_space (ATTR1_RANK, dims2);
+ // Create dataspace for attribute
+ DataSpace att_space (ATTR1_RANK, dims2);
- // Create a file attribute
- Attribute file_attr2 = fid1.createAttribute (FATTR1_NAME, PredType::NATIVE_INT, att_space);
+ // Create a file attribute
+ Attribute file_attr2 = fid1.createAttribute (FATTR1_NAME, PredType::NATIVE_INT, att_space);
- // Create a file attribute
- Attribute file_attr1 = fid1.createAttribute (FATTR2_NAME, PredType::NATIVE_INT, att_space);
+ // Create a file attribute
+ Attribute file_attr1 = fid1.createAttribute (FATTR2_NAME, PredType::NATIVE_INT, att_space);
- // Create an attribute for the dataset
- Attribute ds_attr1 = dataset.createAttribute (ATTR1_NAME, PredType::NATIVE_INT, att_space);
+ // Create an attribute for the dataset
+ Attribute ds_attr1 = dataset.createAttribute (ATTR1_NAME, PredType::NATIVE_INT, att_space);
- // Try creating an attribute that already exists. This should fail
- // since two attributes cannot have the same name. If an exception
- // is not thrown for this action by createAttribute, then throw an
- // invalid action exception.
- try {
- Attribute invalid_attr = dataset.createAttribute (ATTR1_NAME, PredType::NATIVE_INT, att_space);
+ // Try creating an attribute that already exists. This should fail
+ // since two attributes cannot have the same name. If an exception
+ // is not thrown for this action by createAttribute, then throw an
+ // invalid action exception.
+ try {
+ Attribute invalid_attr = dataset.createAttribute (ATTR1_NAME, PredType::NATIVE_INT, att_space);
- // continuation here, that means no exception has been thrown
- throw InvalidActionException("H5File::createDataSet", "Library allowed overwrite of existing dataset");
- }
- catch (AttributeIException& E) // catching invalid creating attribute
+ // continuation here, that means no exception has been thrown
+ throw InvalidActionException("H5File::createDataSet", "Library allowed overwrite of existing dataset");
+ }
+ catch (AttributeIException& E) // catching invalid creating attribute
{} // do nothing, exception expected
- // Write attribute information
- ds_attr1.write (PredType::NATIVE_INT, attr_data1);
+ // Write attribute information
+ ds_attr1.write (PredType::NATIVE_INT, attr_data1);
- // Read attribute information immediately, without closing attribute
- ds_attr1.read (PredType::NATIVE_INT, read_data1);
+ // Read attribute information immediately, without closing attribute
+ ds_attr1.read (PredType::NATIVE_INT, read_data1);
- // Verify values read in
- for(i=0; i<ATTR1_DIM1; i++)
- if(attr_data1[i]!=read_data1[i])
- TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1[i],i,read_data1[i]);
+ // Verify values read in
+ for(i=0; i<ATTR1_DIM1; i++)
+ if(attr_data1[i]!=read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1[i],i,read_data1[i]);
- // Create two more attributes for this dataset, but only write to one.
- Attribute ds_attr2 = dataset.createAttribute (ATTR2_NAME, PredType::NATIVE_INT, att_space);
- Attribute ds_attr3 = dataset.createAttribute (ATTR3_NAME, PredType::NATIVE_INT, att_space);
+ // Create two more attributes for this dataset, but only write to one.
+ Attribute ds_attr2 = dataset.createAttribute (ATTR2_NAME, PredType::NATIVE_INT, att_space);
+ Attribute ds_attr3 = dataset.createAttribute (ATTR3_NAME, PredType::NATIVE_INT, att_space);
- // Write attribute information
- ds_attr2.write (PredType::NATIVE_INT, attr_data1a);
+ // Write attribute information
+ ds_attr2.write (PredType::NATIVE_INT, attr_data1a);
- // Read attribute information immediately, without closing attribute
- ds_attr2.read (PredType::NATIVE_INT, read_data1);
+ // Read attribute information immediately, without closing attribute
+ ds_attr2.read (PredType::NATIVE_INT, read_data1);
- // Verify values read in
- for(i=0; i<ATTR1_DIM1; i++)
- if(attr_data1a[i]!=read_data1[i])
- TestErrPrintf("%d: attribute data different: attr_data1a[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1a[i],i,read_data1[i]);
+ // Verify values read in
+ for(i=0; i<ATTR1_DIM1; i++)
+ if(attr_data1a[i]!=read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1a[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1a[i],i,read_data1[i]);
- // Close both attributes
- ds_attr1.close();
- ds_attr2.close();
- ds_attr3.close();
+ // Close both attributes
+ ds_attr1.close();
+ ds_attr2.close();
+ ds_attr3.close();
- /*
- * Test attribute with group
- */
+ /*
+ * Test attribute with group
+ */
- // Create group in file fid1
- Group group = fid1.createGroup (GROUP1_NAME);
+ // Create group in file fid1
+ Group group = fid1.createGroup (GROUP1_NAME);
- // Create dataspace for attribute
- DataSpace sid3(ATTR2_RANK, dims3);
+ // Create dataspace for attribute
+ DataSpace sid3(ATTR2_RANK, dims3);
- // Create an attribute for the group
- Attribute gr_attr = group.createAttribute (ATTR2_NAME, PredType::NATIVE_INT, sid3);
+ // Create an attribute for the group
+ Attribute gr_attr = group.createAttribute (ATTR2_NAME, PredType::NATIVE_INT, sid3);
- // Check storage size for attribute
- hsize_t attr_size = gr_attr.getStorageSize();
- verify_val((long)attr_size, (long)(ATTR2_DIM1*ATTR2_DIM2*sizeof(int)),
- "Attribute::getStorageSize",__LINE__,__FILE__);
+ // Check storage size for attribute
+ hsize_t attr_size = gr_attr.getStorageSize();
+ verify_val((long)attr_size, (long)(ATTR2_DIM1*ATTR2_DIM2*sizeof(int)),
+ "Attribute::getStorageSize",__LINE__,__FILE__);
- // Try to create the same attribute again (should fail)
- try {
- Attribute invalid_attr = group.createAttribute (ATTR2_NAME, PredType::NATIVE_INT, sid3);
+ // Try to create the same attribute again (should fail)
+ try {
+ Attribute invalid_attr = group.createAttribute (ATTR2_NAME, PredType::NATIVE_INT, sid3);
// continuation here, that means no exception has been thrown
throw InvalidActionException("H5Group::createAttribute",
- "Attempting to create an existing attribute");
- }
+ "Attempting to create an existing attribute");
+ }
catch (AttributeIException& E) // catching invalid creating attribute
{} // do nothing, exception expected
- // Write attribute information
- gr_attr.write (PredType::NATIVE_INT, attr_data2);
+ // Write attribute information
+ gr_attr.write (PredType::NATIVE_INT, attr_data2);
- // Check storage size for attribute
- attr_size = gr_attr.getStorageSize();
- verify_val((long)attr_size, (long)(ATTR2_DIM1*ATTR2_DIM2*sizeof(int)),
- "Attribute::getStorageSize", __LINE__, __FILE__);
+ // Check storage size for attribute
+ attr_size = gr_attr.getStorageSize();
+ verify_val((long)attr_size, (long)(ATTR2_DIM1*ATTR2_DIM2*sizeof(int)),
+ "Attribute::getStorageSize", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_basic_write()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_basic_write()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_basic_write()
@@ -248,14 +246,14 @@ static void test_attr_basic_write()
** B. ssize_t Attribute::getName(H5std_string& attr_name, size_t buf_size)
** 1. With buffer smaller than the actual name
** 2. Same test but with retiring overloaded function
-** ssize_t Attribute::getName(size_t buf_size, H5std_string& attr_name)
+** ssize_t Attribute::getName(size_t buf_size, H5std_string& attr_name)
**
** C. H5std_string Attribute::getName()
**
** D. H5std_string Attribute::getName(size_t len)
**
** E. ssize_t Attribute::getName(H5std_string& attr_name, size_t buf_size)
-** With buffer size equals the name's length, i.e., buf_size=0
+** With buffer size equals the name's length, i.e., buf_size=0
**
****************************************************************/
static void test_attr_getname()
@@ -264,109 +262,109 @@ static void test_attr_getname()
SUBTEST("Testing all overloads of Attribute::getName");
try {
- //
- // Open the file FILE_BASIC and test getName with its attribute
- //
-
- // Open file
- H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
-
- // Check for existence of attribute FATTR1_NAME
- bool attr_exists = fid1.attrExists(FATTR1_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
-
- // Open attribute
- Attribute fattr1(fid1.openAttribute(FATTR1_NAME));
-
- // A. Get attribute name with
- // ssize_t Attribute::getName(char* attr_name, size_t buf_size)
- // using different buffer sizes and verify against FATTR1_NAME (3 cases)
-
- // 1. With arbitrary buf_size that is larger than the name size
- size_t buf_size = FATTR1_NAME.length() + 10;
- char* fattr1_name = new char[buf_size+1];
- HDmemset(fattr1_name, 0, buf_size+1);
- ssize_t name_size = 0; // actual length of attribute name
- name_size = fattr1.getName(fattr1_name, buf_size+1);
- CHECK(name_size, FAIL, "Attribute::getName", __LINE__, __FILE__);
- verify_val((size_t)name_size, FATTR1_NAME.length(), "Attribute::getName", __LINE__, __FILE__);
- verify_val((const char*)fattr1_name, FATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
- delete []fattr1_name;
-
- // 2. With arbitrary buf_size that is smaller than the name's length.
- // Let's try 4 first characters in the name.
- buf_size = 4;
- char short_name[5] = "File"; // to verify the read name
- fattr1_name = new char[buf_size+1];
- HDmemset(fattr1_name, 0, buf_size+1);
- name_size = fattr1.getName(fattr1_name, buf_size+1);
- CHECK(name_size, FAIL, "Attribute::getName", __LINE__, __FILE__);
- verify_val((size_t)name_size, FATTR1_NAME.size(), "Attribute::getName", __LINE__, __FILE__);
- verify_val((const char*)fattr1_name, (const char*)short_name, "Attribute::getName", __LINE__, __FILE__);
- delete []fattr1_name;
-
- // 3. With a buf_size that equals the name's length.
- buf_size = FATTR1_NAME.length();
- fattr1_name = new char[buf_size+1];
- HDmemset(fattr1_name, 0, buf_size+1);
- name_size = fattr1.getName(fattr1_name, buf_size+1);
- CHECK(name_size, FAIL, "Attribute::getName", __LINE__, __FILE__);
- verify_val(fattr1_name, FATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
- delete []fattr1_name;
-
- // B. Get attribute name with
- // ssize_t Attribute::getName(H5std_string& attr_name, size_t buf_size)
- // using buffer smaller than the actual name
- buf_size = 4;
- H5std_string fattr1_name2;
- name_size = fattr1.getName(fattr1_name2, buf_size);
- verify_val(fattr1_name2, "File", "Attribute::getName", __LINE__, __FILE__);
-
- // Same test as above, but with deprecated overloaded function
- // ssize_t Attribute::getName(size_t buf_size, H5std_string& attr_name)
- // using buffer smaller than the actual name
- H5std_string fattr1_name2a;
- name_size = fattr1.getName(fattr1_name2a, buf_size);
- verify_val(fattr1_name2a, "File", "Attribute::getName", __LINE__, __FILE__);
-
- // C. Get file attribute's name with
- // H5std_string Attribute::getName()
- H5std_string fattr1_name3 = fattr1.getName();
- verify_val(fattr1_name3, FATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
-
- //
- // D. Test getName getting part of an attribute's name using
- // H5std_string Attribute::getName(len)
- //
-
- // Open dataset DSET1_NAME
- DataSet dataset = fid1.openDataSet(DSET1_NAME);
-
- // Check for existence of attribute
- attr_exists = dataset.attrExists(ATTR1_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
-
- // Open attribute
- Attribute attr1(dataset.openAttribute(ATTR1_NAME));
-
- size_t len = 4;
- H5std_string dattr_name1 = attr1.getName(len);
- verify_val(dattr_name1, "Attr", "Attribute::getName", __LINE__, __FILE__);
-
- // E. Get dataset's attribute name with
- // H5std_string Attribute::getName(H5std_string attr_name, buf_size=0)
- H5std_string dattr_name2;
- name_size = attr1.getName(dattr_name2);
- verify_val(dattr_name2, ATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
-
- PASSED();
+ //
+ // Open the file FILE_BASIC and test getName with its attribute
+ //
+
+ // Open file
+ H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
+
+ // Check for existence of attribute FATTR1_NAME
+ bool attr_exists = fid1.attrExists(FATTR1_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
+
+ // Open attribute
+ Attribute fattr1(fid1.openAttribute(FATTR1_NAME));
+
+ // A. Get attribute name with
+ // ssize_t Attribute::getName(char* attr_name, size_t buf_size)
+ // using different buffer sizes and verify against FATTR1_NAME (3 cases)
+
+ // 1. With arbitrary buf_size that is larger than the name size
+ size_t buf_size = FATTR1_NAME.length() + 10;
+ char* fattr1_name = new char[buf_size+1];
+ HDmemset(fattr1_name, 0, buf_size+1);
+ ssize_t name_size = 0; // actual length of attribute name
+ name_size = fattr1.getName(fattr1_name, buf_size+1);
+ CHECK(name_size, FAIL, "Attribute::getName", __LINE__, __FILE__);
+ verify_val((size_t)name_size, FATTR1_NAME.length(), "Attribute::getName", __LINE__, __FILE__);
+ verify_val((const char*)fattr1_name, FATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
+ delete []fattr1_name;
+
+ // 2. With arbitrary buf_size that is smaller than the name's length.
+ // Let's try 4 first characters in the name.
+ buf_size = 4;
+ char short_name[5] = "File"; // to verify the read name
+ fattr1_name = new char[buf_size+1];
+ HDmemset(fattr1_name, 0, buf_size+1);
+ name_size = fattr1.getName(fattr1_name, buf_size+1);
+ CHECK(name_size, FAIL, "Attribute::getName", __LINE__, __FILE__);
+ verify_val((size_t)name_size, FATTR1_NAME.size(), "Attribute::getName", __LINE__, __FILE__);
+ verify_val((const char*)fattr1_name, (const char*)short_name, "Attribute::getName", __LINE__, __FILE__);
+ delete []fattr1_name;
+
+ // 3. With a buf_size that equals the name's length.
+ buf_size = FATTR1_NAME.length();
+ fattr1_name = new char[buf_size+1];
+ HDmemset(fattr1_name, 0, buf_size+1);
+ name_size = fattr1.getName(fattr1_name, buf_size+1);
+ CHECK(name_size, FAIL, "Attribute::getName", __LINE__, __FILE__);
+ verify_val(fattr1_name, FATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
+ delete []fattr1_name;
+
+ // B. Get attribute name with
+ // ssize_t Attribute::getName(H5std_string& attr_name, size_t buf_size)
+ // using buffer smaller than the actual name
+ buf_size = 4;
+ H5std_string fattr1_name2;
+ name_size = fattr1.getName(fattr1_name2, buf_size);
+ verify_val(fattr1_name2, "File", "Attribute::getName", __LINE__, __FILE__);
+
+ // Same test as above, but with deprecated overloaded function
+ // ssize_t Attribute::getName(size_t buf_size, H5std_string& attr_name)
+ // using buffer smaller than the actual name
+ H5std_string fattr1_name2a;
+ name_size = fattr1.getName(fattr1_name2a, buf_size);
+ verify_val(fattr1_name2a, "File", "Attribute::getName", __LINE__, __FILE__);
+
+ // C. Get file attribute's name with
+ // H5std_string Attribute::getName()
+ H5std_string fattr1_name3 = fattr1.getName();
+ verify_val(fattr1_name3, FATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
+
+ //
+ // D. Test getName getting part of an attribute's name using
+ // H5std_string Attribute::getName(len)
+ //
+
+ // Open dataset DSET1_NAME
+ DataSet dataset = fid1.openDataSet(DSET1_NAME);
+
+ // Check for existence of attribute
+ attr_exists = dataset.attrExists(ATTR1_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
+
+ // Open attribute
+ Attribute attr1(dataset.openAttribute(ATTR1_NAME));
+
+ size_t len = 4;
+ H5std_string dattr_name1 = attr1.getName(len);
+ verify_val(dattr_name1, "Attr", "Attribute::getName", __LINE__, __FILE__);
+
+ // E. Get dataset's attribute name with
+ // H5std_string Attribute::getName(H5std_string attr_name, buf_size=0)
+ H5std_string dattr_name2;
+ name_size = attr1.getName(dattr_name2);
+ verify_val(dattr_name2, ATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
+
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_getname()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_getname()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_getname()
@@ -384,100 +382,100 @@ static void test_attr_rename()
SUBTEST("Checking for Existence and Renaming Attribute");
try {
- // Open file
- H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
+ // Open file
+ H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
- // Check and rename attribute belonging to a file
+ // Check and rename attribute belonging to a file
- // Check for existence of attribute
- bool attr_exists = fid1.attrExists(FATTR1_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
+ // Check for existence of attribute
+ bool attr_exists = fid1.attrExists(FATTR1_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
- // Change attribute name
- fid1.renameAttr(FATTR1_NAME, FATTR_TMP_NAME);
+ // Change attribute name
+ fid1.renameAttr(FATTR1_NAME, FATTR_TMP_NAME);
- // Open attribute again
- Attribute fattr1(fid1.openAttribute(FATTR_TMP_NAME));
+ // Open attribute again
+ Attribute fattr1(fid1.openAttribute(FATTR_TMP_NAME));
- // Verify new attribute name
- H5std_string fattr_name = fattr1.getName();
- verify_val(fattr_name, FATTR_TMP_NAME, "Attribute::getName", __LINE__, __FILE__);
+ // Verify new attribute name
+ H5std_string fattr_name = fattr1.getName();
+ verify_val(fattr_name, FATTR_TMP_NAME, "Attribute::getName", __LINE__, __FILE__);
- int num_attrs = fid1.getNumAttrs();
- verify_val(num_attrs, 2, "Attribute::getNumAttrs", __LINE__, __FILE__);
+ int num_attrs = fid1.getNumAttrs();
+ verify_val(num_attrs, 2, "Attribute::getNumAttrs", __LINE__, __FILE__);
- // Change first file attribute back to the original name
- fid1.renameAttr(FATTR_TMP_NAME, FATTR1_NAME);
+ // Change first file attribute back to the original name
+ fid1.renameAttr(FATTR_TMP_NAME, FATTR1_NAME);
- // Open the dataset
- DataSet dataset = fid1.openDataSet(DSET1_NAME);
+ // Open the dataset
+ DataSet dataset = fid1.openDataSet(DSET1_NAME);
- // Check and rename attribute belonging to a dataset
+ // Check and rename attribute belonging to a dataset
- // Check for existence of attribute
- attr_exists = dataset.attrExists(ATTR1_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
+ // Check for existence of attribute
+ attr_exists = dataset.attrExists(ATTR1_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
- // Change attribute name
- dataset.renameAttr(ATTR1_NAME, ATTR_TMP_NAME);
+ // Change attribute name
+ dataset.renameAttr(ATTR1_NAME, ATTR_TMP_NAME);
- // Open attribute again
- Attribute attr1(dataset.openAttribute(ATTR_TMP_NAME));
+ // Open attribute again
+ Attribute attr1(dataset.openAttribute(ATTR_TMP_NAME));
- // Verify new attribute name
- H5std_string attr_name = attr1.getName();
- verify_val(attr_name, ATTR_TMP_NAME, "Attribute::getName", __LINE__, __FILE__);
+ // Verify new attribute name
+ H5std_string attr_name = attr1.getName();
+ verify_val(attr_name, ATTR_TMP_NAME, "Attribute::getName", __LINE__, __FILE__);
- // Read attribute information immediately, without closing attribute
- attr1.read (PredType::NATIVE_INT, read_data1);
+ // Read attribute information immediately, without closing attribute
+ attr1.read (PredType::NATIVE_INT, read_data1);
- // Verify values read in
- for(i=0; i<ATTR1_DIM1; i++)
- if(attr_data1[i]!=read_data1[i])
- TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1[i],i,read_data1[i]);
+ // Verify values read in
+ for(i=0; i<ATTR1_DIM1; i++)
+ if(attr_data1[i]!=read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1[i],i,read_data1[i]);
- // Close attribute
- attr1.close();
+ // Close attribute
+ attr1.close();
- // Check for existence of second attribute
- attr_exists = dataset.attrExists(ATTR2_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
+ // Check for existence of second attribute
+ attr_exists = dataset.attrExists(ATTR2_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
- // Open the second attribute
- Attribute attr2(dataset.openAttribute(ATTR2_NAME));
+ // Open the second attribute
+ Attribute attr2(dataset.openAttribute(ATTR2_NAME));
- // Verify second attribute name
- H5std_string attr2_name = attr2.getName();
- verify_val(attr2_name, ATTR2_NAME, "Attribute::getName", __LINE__, __FILE__);
+ // Verify second attribute name
+ H5std_string attr2_name = attr2.getName();
+ verify_val(attr2_name, ATTR2_NAME, "Attribute::getName", __LINE__, __FILE__);
- // Read attribute information immediately, without closing attribute
- attr2.read (PredType::NATIVE_INT, read_data1);
+ // Read attribute information immediately, without closing attribute
+ attr2.read (PredType::NATIVE_INT, read_data1);
- // Verify values read in
- for(i=0; i<ATTR1_DIM1; i++)
- if(attr_data1a[i]!=read_data1[i])
- TestErrPrintf("%d: attribute data different: attr_data1a[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1a[i],i,read_data1[i]);
+ // Verify values read in
+ for(i=0; i<ATTR1_DIM1; i++)
+ if(attr_data1a[i]!=read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1a[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1a[i],i,read_data1[i]);
- // Close attribute
- attr2.close();
+ // Close attribute
+ attr2.close();
- // Change first attribute back to the original name
- dataset.renameAttr(ATTR_TMP_NAME, ATTR1_NAME);
+ // Change first attribute back to the original name
+ dataset.renameAttr(ATTR_TMP_NAME, ATTR1_NAME);
- // Check for existence of attribute after renaming
- attr_exists = dataset.attrExists(ATTR1_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
+ // Check for existence of attribute after renaming
+ attr_exists = dataset.attrExists(ATTR1_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "Attribute should exist but does not");
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_rename()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_rename()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_rename()
@@ -494,59 +492,59 @@ static void test_attr_basic_read()
SUBTEST("Basic Attribute Reading Functions");
try {
- // Open file
- H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
+ // Open file
+ H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
- // Open the dataset
- DataSet dataset = fid1.openDataSet(DSET1_NAME);
+ // Open the dataset
+ DataSet dataset = fid1.openDataSet(DSET1_NAME);
- // Verify the correct number of attributes
- int num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ int num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__);
- // Open an attribute for the dataset
- Attribute ds_attr=dataset.openAttribute(ATTR1_NAME);
+ // Open an attribute for the dataset
+ Attribute ds_attr=dataset.openAttribute(ATTR1_NAME);
- // Read attribute information
- int read_data1[ATTR1_DIM1]={0}; // Buffer for reading 1st attribute
- ds_attr.read(PredType::NATIVE_INT, &read_data1);
+ // Read attribute information
+ int read_data1[ATTR1_DIM1]={0}; // Buffer for reading 1st attribute
+ ds_attr.read(PredType::NATIVE_INT, &read_data1);
- // Verify values read in
- for(i=0; i<ATTR1_DIM1; i++)
+ // Verify values read in
+ for(i=0; i<ATTR1_DIM1; i++)
if(attr_data1[i]!=read_data1[i])
- TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n",__LINE__,i,attr_data1[i],i,read_data1[i]);
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d, read_data1[%d]=%d\n",__LINE__,i,attr_data1[i],i,read_data1[i]);
- /*
- * Test attribute with group
- */
- // Open the group
- Group group = fid1.openGroup(GROUP1_NAME);
+ /*
+ * Test attribute with group
+ */
+ // Open the group
+ Group group = fid1.openGroup(GROUP1_NAME);
- // Verify the correct number of attributes
- num_attrs = group.getNumAttrs();
- verify_val(num_attrs, 1, "H5Group::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ num_attrs = group.getNumAttrs();
+ verify_val(num_attrs, 1, "H5Group::getNumAttrs", __LINE__, __FILE__);
- // Open an attribute for the group
- Attribute gr_attr = group.openAttribute(ATTR2_NAME);
+ // Open an attribute for the group
+ Attribute gr_attr = group.openAttribute(ATTR2_NAME);
- // Buffer for reading 2nd attribute
- int read_data2[ATTR2_DIM1][ATTR2_DIM2]={{0}};
+ // Buffer for reading 2nd attribute
+ int read_data2[ATTR2_DIM1][ATTR2_DIM2]={{0}};
- // Read attribute information
- gr_attr.read(PredType::NATIVE_INT, read_data2);
+ // Read attribute information
+ gr_attr.read(PredType::NATIVE_INT, read_data2);
- // Verify values read in
- for(i=0; i<ATTR2_DIM1; i++)
+ // Verify values read in
+ for(i=0; i<ATTR2_DIM1; i++)
for(j=0; j<ATTR2_DIM2; j++)
- if(attr_data2[i][j]!=read_data2[i][j]) {
- TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n",__LINE__, i,j,attr_data2[i][j],i,j,read_data2[i][j]);
- }
- PASSED();
+ if(attr_data2[i][j]!=read_data2[i][j]) {
+ TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n",__LINE__, i,j,attr_data2[i][j],i,j,read_data2[i][j]);
+ }
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_basic_read()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_basic_read()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_basic_read()
@@ -558,55 +556,55 @@ static void test_attr_basic_read()
static void test_attr_compound_write()
{
- // Output message about test being performed
+ // Output message about test being performed
SUBTEST("Multiple Attribute Functions");
try {
- // Create file
- H5File fid1(FILE_COMPOUND.c_str(), H5F_ACC_TRUNC);
+ // Create file
+ H5File fid1(FILE_COMPOUND.c_str(), H5F_ACC_TRUNC);
- // Create dataspace for dataset
- hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
- DataSpace sid1(SPACE1_RANK, dims1);
+ // Create dataspace for dataset
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ DataSpace sid1(SPACE1_RANK, dims1);
- // Create a dataset
- DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR,sid1);
+ // Create a dataset
+ DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR,sid1);
- // Create the attribute datatype.
- CompType comp_type(sizeof(struct attr4_struct));
+ // Create the attribute datatype.
+ CompType comp_type(sizeof(struct attr4_struct));
- attr4_field1_off = HOFFSET(struct attr4_struct, i);
- comp_type.insertMember(ATTR4_FIELDNAME1, attr4_field1_off, PredType::NATIVE_INT);
+ attr4_field1_off = HOFFSET(struct attr4_struct, i);
+ comp_type.insertMember(ATTR4_FIELDNAME1, attr4_field1_off, PredType::NATIVE_INT);
- attr4_field2_off = HOFFSET(struct attr4_struct, d);
- comp_type.insertMember(ATTR4_FIELDNAME2, attr4_field2_off, PredType::NATIVE_DOUBLE);
+ attr4_field2_off = HOFFSET(struct attr4_struct, d);
+ comp_type.insertMember(ATTR4_FIELDNAME2, attr4_field2_off, PredType::NATIVE_DOUBLE);
- attr4_field3_off = HOFFSET(struct attr4_struct, c);
- comp_type.insertMember(ATTR4_FIELDNAME3, attr4_field3_off, PredType::NATIVE_SCHAR);
+ attr4_field3_off = HOFFSET(struct attr4_struct, c);
+ comp_type.insertMember(ATTR4_FIELDNAME3, attr4_field3_off, PredType::NATIVE_SCHAR);
- // Create dataspace for 1st attribute
- hsize_t dims2[] = {ATTR4_DIM1,ATTR4_DIM2};
- DataSpace sid2(ATTR4_RANK, dims2);
+ // Create dataspace for 1st attribute
+ hsize_t dims2[] = {ATTR4_DIM1,ATTR4_DIM2};
+ DataSpace sid2(ATTR4_RANK, dims2);
- // Create complex attribute for the dataset
- Attribute attr = dataset.createAttribute(ATTR4_NAME, comp_type, sid2);
+ // Create complex attribute for the dataset
+ Attribute attr = dataset.createAttribute(ATTR4_NAME, comp_type, sid2);
- // Try to create the same attribute again (should fail)
- try {
- Attribute invalid_attr = dataset.createAttribute (ATTR4_NAME, comp_type, sid2);
- }
+ // Try to create the same attribute again (should fail)
+ try {
+ Attribute invalid_attr = dataset.createAttribute (ATTR4_NAME, comp_type, sid2);
+ }
catch (AttributeIException& E) // catching invalid creating attribute
{} // do nothing, exception expected
- // Write complex attribute data
- attr.write(comp_type, attr_data4);
+ // Write complex attribute data
+ attr.write(comp_type, attr_data4);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_compound_write()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_compound_write()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_compound_write()
@@ -617,7 +615,7 @@ static void test_attr_compound_write()
****************************************************************/
static void test_attr_compound_read()
{
- hsize_t dims[ATTR_MAX_DIMS]; // Attribute dimensions
+ hsize_t dims[ATTR_MAX_DIMS]; // Attribute dimensions
size_t size; // Attribute datatype size as stored in file
size_t offset; // Attribute datatype field offset
struct attr4_struct read_data4[ATTR4_DIM1][ATTR4_DIM2]; // Buffer for reading 4th attribute
@@ -626,139 +624,139 @@ static void test_attr_compound_read()
SUBTEST("Basic Attribute Functions");
try {
- // Open file
- H5File fid1(FILE_COMPOUND, H5F_ACC_RDWR);
+ // Open file
+ H5File fid1(FILE_COMPOUND, H5F_ACC_RDWR);
- // Open the dataset
- DataSet dataset = fid1.openDataSet(DSET1_NAME);
+ // Open the dataset
+ DataSet dataset = fid1.openDataSet(DSET1_NAME);
- // Verify the correct number of attributes
- int num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 1, "DataSet::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ int num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 1, "DataSet::getNumAttrs", __LINE__, __FILE__);
- // Open 1st attribute for the dataset
- Attribute attr = dataset.openAttribute((unsigned)0);
+ // Open 1st attribute for the dataset
+ Attribute attr = dataset.openAttribute((unsigned)0);
- /* Verify Dataspace */
+ /* Verify Dataspace */
- // Get the dataspace of the attribute
- DataSpace space = attr.getSpace();
+ // Get the dataspace of the attribute
+ DataSpace space = attr.getSpace();
- // Get the rank of the dataspace and verify it
- int rank = space.getSimpleExtentNdims();
- verify_val(rank, ATTR4_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+ // Get the rank of the dataspace and verify it
+ int rank = space.getSimpleExtentNdims();
+ verify_val(rank, ATTR4_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
- // Get the dims of the dataspace and verify them
- int ndims = space.getSimpleExtentDims(dims);
- verify_val(ndims, ATTR4_RANK, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+ // Get the dims of the dataspace and verify them
+ int ndims = space.getSimpleExtentDims(dims);
+ verify_val(ndims, ATTR4_RANK, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
verify_val((long)dims[0], (long)ATTR4_DIM1, "DataSpace::getSimpleExtentDims",__LINE__, __FILE__);
verify_val((long)dims[1], (long)ATTR4_DIM2, "DataSpace::getSimpleExtentDims",__LINE__, __FILE__);
- // Get the class of the datatype that is used by attr
- H5T_class_t type_class = attr.getTypeClass();
+ // Get the class of the datatype that is used by attr
+ H5T_class_t type_class = attr.getTypeClass();
- // Verify that the type is of compound datatype
- verify_val(type_class, H5T_COMPOUND, "Attribute::getTypeClass", __LINE__, __FILE__);
+ // Verify that the type is of compound datatype
+ verify_val(type_class, H5T_COMPOUND, "Attribute::getTypeClass", __LINE__, __FILE__);
- // Get the compound datatype
- CompType datatype = attr.getCompType();
+ // Get the compound datatype
+ CompType datatype = attr.getCompType();
- // Verify the number of fields in the datatype, which must be 3
- int fields = datatype.getNmembers();
- verify_val(fields, 3, "CompType::getNmembers", __LINE__, __FILE__);
+ // Verify the number of fields in the datatype, which must be 3
+ int fields = datatype.getNmembers();
+ verify_val(fields, 3, "CompType::getNmembers", __LINE__, __FILE__);
- // Verify that the fields have the same names as when the type
- // was created
- int j;
- for(j=0; j<fields; j++)
- {
- H5std_string fieldname = datatype.getMemberName(j);
- if(!((fieldname == ATTR4_FIELDNAME1) ||
- (fieldname == ATTR4_FIELDNAME2) ||
- (fieldname == ATTR4_FIELDNAME3)))
+ // Verify that the fields have the same names as when the type
+ // was created
+ int j;
+ for(j=0; j<fields; j++)
+ {
+ H5std_string fieldname = datatype.getMemberName(j);
+ if(!((fieldname == ATTR4_FIELDNAME1) ||
+ (fieldname == ATTR4_FIELDNAME2) ||
+ (fieldname == ATTR4_FIELDNAME3)))
TestErrPrintf("%d:invalid field name for field #%d: %s\n",__LINE__,j,fieldname.c_str());
- } /* end for */
-
- offset = datatype.getMemberOffset(0);
- verify_val(offset, attr4_field1_off, "DataType::getMemberOffset", __LINE__, __FILE__);
-
- offset = datatype.getMemberOffset(1);
- verify_val(offset, attr4_field2_off, "DataType::getMemberOffset", __LINE__, __FILE__);
-
- offset = datatype.getMemberOffset(2);
- verify_val(offset, attr4_field3_off, "DataType::getMemberOffset", __LINE__, __FILE__);
-
- /* Verify each field's type, class & size */
-
- // Get and verify the type class of the first member
- type_class = datatype.getMemberClass(0);
- verify_val(type_class, H5T_INTEGER, "DataType::getMemberClass", __LINE__, __FILE__);
- // Get and verify the order of this member's type
- IntType i_type = datatype.getMemberIntType(0);
- H5T_order_t order = i_type.getOrder();
- verify_val(order, PredType::NATIVE_INT.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
-
- // Get and verify the size of this member's type
- size = i_type.getSize();
- verify_val(size, PredType::NATIVE_INT.getSize(), "DataType::getSize", __LINE__, __FILE__);
-
- // Get and verify class, order, and size of the second member's type
- type_class = datatype.getMemberClass(1);
- verify_val(type_class, H5T_FLOAT, "DataType::getMemberClass", __LINE__, __FILE__);
- FloatType f_type = datatype.getMemberFloatType(1);
- order = f_type.getOrder();
- verify_val(order, PredType::NATIVE_DOUBLE.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
- size = f_type.getSize();
- verify_val(size, PredType::NATIVE_DOUBLE.getSize(), "DataType::getSize", __LINE__, __FILE__);
-
- // Get and verify class, order, and size of the third member's type
- type_class = datatype.getMemberClass(2);
- verify_val(type_class, H5T_INTEGER, "DataType::getMemberClass", __LINE__, __FILE__);
- // Note: H5T_INTEGER is correct here!
-
- StrType s_type = datatype.getMemberStrType(2);
- order = s_type.getOrder();
- verify_val(order, PredType::NATIVE_SCHAR.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
- size = s_type.getSize();
- verify_val(size, PredType::NATIVE_SCHAR.getSize(), "DataType::getSize", __LINE__, __FILE__);
-
- // Read attribute information
- attr.read(datatype, read_data4);
-
- // Verify values read in
- hsize_t ii, jj;
- for(ii=0; ii<ATTR4_DIM1; ii++)
- for(jj=0; jj<ATTR4_DIM2; jj++)
- if(HDmemcmp(&attr_data4[ii][jj],&read_data4[ii][jj],sizeof(struct attr4_struct))) {
- TestErrPrintf("%d:attribute data different: attr_data4[%d][%d].i=%d, read_data4[%d][%d].i=%d\n",__LINE__,ii,jj,attr_data4[ii][jj].i,ii,jj,read_data4[ii][jj].i);
- TestErrPrintf("%d:attribute data different: attr_data4[%d][%d].d=%f, read_data4[%d][%d].d=%f\n",__LINE__,ii,jj,attr_data4[ii][jj].d,ii,jj,read_data4[ii][jj].d);
- TestErrPrintf("%d:attribute data different: attr_data4[%d][%d].c=%c, read_data4[%d][%d].c=%c\n",__LINE__,ii,jj,attr_data4[ii][jj].c,ii,jj,read_data4[ii][jj].c);
+ } /* end for */
+
+ offset = datatype.getMemberOffset(0);
+ verify_val(offset, attr4_field1_off, "DataType::getMemberOffset", __LINE__, __FILE__);
+
+ offset = datatype.getMemberOffset(1);
+ verify_val(offset, attr4_field2_off, "DataType::getMemberOffset", __LINE__, __FILE__);
+
+ offset = datatype.getMemberOffset(2);
+ verify_val(offset, attr4_field3_off, "DataType::getMemberOffset", __LINE__, __FILE__);
+
+ /* Verify each field's type, class & size */
+
+ // Get and verify the type class of the first member
+ type_class = datatype.getMemberClass(0);
+ verify_val(type_class, H5T_INTEGER, "DataType::getMemberClass", __LINE__, __FILE__);
+ // Get and verify the order of this member's type
+ IntType i_type = datatype.getMemberIntType(0);
+ H5T_order_t order = i_type.getOrder();
+ verify_val(order, PredType::NATIVE_INT.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
+
+ // Get and verify the size of this member's type
+ size = i_type.getSize();
+ verify_val(size, PredType::NATIVE_INT.getSize(), "DataType::getSize", __LINE__, __FILE__);
+
+ // Get and verify class, order, and size of the second member's type
+ type_class = datatype.getMemberClass(1);
+ verify_val(type_class, H5T_FLOAT, "DataType::getMemberClass", __LINE__, __FILE__);
+ FloatType f_type = datatype.getMemberFloatType(1);
+ order = f_type.getOrder();
+ verify_val(order, PredType::NATIVE_DOUBLE.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
+ size = f_type.getSize();
+ verify_val(size, PredType::NATIVE_DOUBLE.getSize(), "DataType::getSize", __LINE__, __FILE__);
+
+ // Get and verify class, order, and size of the third member's type
+ type_class = datatype.getMemberClass(2);
+ verify_val(type_class, H5T_INTEGER, "DataType::getMemberClass", __LINE__, __FILE__);
+ // Note: H5T_INTEGER is correct here!
+
+ StrType s_type = datatype.getMemberStrType(2);
+ order = s_type.getOrder();
+ verify_val(order, PredType::NATIVE_SCHAR.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
+ size = s_type.getSize();
+ verify_val(size, PredType::NATIVE_SCHAR.getSize(), "DataType::getSize", __LINE__, __FILE__);
+
+ // Read attribute information
+ attr.read(datatype, read_data4);
+
+ // Verify values read in
+ hsize_t ii, jj;
+ for(ii=0; ii<ATTR4_DIM1; ii++)
+ for(jj=0; jj<ATTR4_DIM2; jj++)
+ if(HDmemcmp(&attr_data4[ii][jj],&read_data4[ii][jj],sizeof(struct attr4_struct))) {
+ TestErrPrintf("%d:attribute data different: attr_data4[%d][%d].i=%d, read_data4[%d][%d].i=%d\n",__LINE__,ii,jj,attr_data4[ii][jj].i,ii,jj,read_data4[ii][jj].i);
+ TestErrPrintf("%d:attribute data different: attr_data4[%d][%d].d=%f, read_data4[%d][%d].d=%f\n",__LINE__,ii,jj,attr_data4[ii][jj].d,ii,jj,read_data4[ii][jj].d);
+ TestErrPrintf("%d:attribute data different: attr_data4[%d][%d].c=%c, read_data4[%d][%d].c=%c\n",__LINE__,ii,jj,attr_data4[ii][jj].c,ii,jj,read_data4[ii][jj].c);
} /* end if */
- // Verify name
- H5std_string attr_name = attr.getName();
- verify_val(attr_name, ATTR4_NAME, "Attribute::getName", __LINE__, __FILE__);
+ // Verify name
+ H5std_string attr_name = attr.getName();
+ verify_val(attr_name, ATTR4_NAME, "Attribute::getName", __LINE__, __FILE__);
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_compound_read()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_compound_read()", __LINE__, __FILE__, E.getCDetailMsg());
}
try
{
- // Now, try truncating the file to make sure reference counting is good.
- // If any references to ids in the previous block are left unterminated,
- // the truncating will fail, because the file will not be closed in
- // the file.close() above.
- H5File file1(FILE_COMPOUND, H5F_ACC_TRUNC);
+ // Now, try truncating the file to make sure reference counting is good.
+ // If any references to ids in the previous block are left unterminated,
+ // the truncating will fail, because the file will not be closed in
+ // the file.close() above.
+ H5File file1(FILE_COMPOUND, H5F_ACC_TRUNC);
- PASSED();
+ PASSED();
} // end try block
catch (FileIException& E)
{
- issue_fail_msg("test_attr_compound_read()", __LINE__, __FILE__, "Unable to truncate file, possibly because some objects are left opened");
+ issue_fail_msg("test_attr_compound_read()", __LINE__, __FILE__, "Unable to truncate file, possibly because some objects are left opened");
}
} // test_attr_compound_read()
@@ -773,47 +771,47 @@ static void test_attr_scalar_write()
SUBTEST("Basic Scalar Attribute Writing Functions");
try {
- // Create file
- H5File fid1(FILE_SCALAR, H5F_ACC_TRUNC);
+ // Create file
+ H5File fid1(FILE_SCALAR, H5F_ACC_TRUNC);
- // Create dataspace for dataset
- hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
- DataSpace sid1(SPACE1_RANK, dims1);
+ // Create dataspace for dataset
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ DataSpace sid1(SPACE1_RANK, dims1);
- // Create a dataset
- DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR,sid1);
+ // Create a dataset
+ DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR,sid1);
- // Close dataset's dataspace
- sid1.close();
+ // Close dataset's dataspace
+ sid1.close();
- // Create dataspace for attribute
- DataSpace att_space(ATTR5_RANK, NULL);
+ // Create dataspace for attribute
+ DataSpace att_space(ATTR5_RANK, NULL);
- // Create an attribute for the dataset
- Attribute ds_attr = dataset.createAttribute (ATTR5_NAME, PredType::NATIVE_FLOAT, att_space);
+ // Create an attribute for the dataset
+ Attribute ds_attr = dataset.createAttribute (ATTR5_NAME, PredType::NATIVE_FLOAT, att_space);
- // Try creating an attribute that already exists. This should fail
- // since two attributes cannot have the same name. If an exception
- // is not thrown for this action by createAttribute, then throw an
- // invalid action exception.
- try {
- Attribute invalid_attr = dataset.createAttribute (ATTR5_NAME, PredType::NATIVE_FLOAT, att_space);
+ // Try creating an attribute that already exists. This should fail
+ // since two attributes cannot have the same name. If an exception
+ // is not thrown for this action by createAttribute, then throw an
+ // invalid action exception.
+ try {
+ Attribute invalid_attr = dataset.createAttribute (ATTR5_NAME, PredType::NATIVE_FLOAT, att_space);
- // continuation here, that means no exception has been thrown
- throw InvalidActionException("H5File::createDataSet", "Library allowed overwrite of existing dataset");
- }
- catch (AttributeIException& E) // catching invalid creating attribute
+ // continuation here, that means no exception has been thrown
+ throw InvalidActionException("H5File::createDataSet", "Library allowed overwrite of existing dataset");
+ }
+ catch (AttributeIException& E) // catching invalid creating attribute
{} // do nothing, exception expected
- // Write attribute information
- ds_attr.write (PredType::NATIVE_FLOAT, &attr_data5);
+ // Write attribute information
+ ds_attr.write (PredType::NATIVE_FLOAT, &attr_data5);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_scalar_write()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_scalar_write()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_scalar_write()
@@ -831,38 +829,38 @@ static void test_attr_scalar_read()
SUBTEST("Basic Scalar Attribute Reading Functions");
try {
- // Open file
- H5File fid1(FILE_SCALAR, H5F_ACC_RDWR);
+ // Open file
+ H5File fid1(FILE_SCALAR, H5F_ACC_RDWR);
- // Open the dataset
- DataSet dataset = fid1.openDataSet(DSET1_NAME);
+ // Open the dataset
+ DataSet dataset = fid1.openDataSet(DSET1_NAME);
- // Verify the correct number of attributes
- int num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 1, "DataSet::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ int num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 1, "DataSet::getNumAttrs", __LINE__, __FILE__);
- // Open an attribute for the dataset
- Attribute ds_attr=dataset.openAttribute(ATTR5_NAME);
+ // Open an attribute for the dataset
+ Attribute ds_attr=dataset.openAttribute(ATTR5_NAME);
- // Read attribute information
- float read_data2=0.0; // Buffer for reading 1st attribute
- ds_attr.read(PredType::NATIVE_FLOAT,&read_data2);
- if (HDfabs(read_data2 - attr_data5) > FP_EPSILON)
- verify_val(read_data2, attr_data5, FP_EPSILON, "Attribute::read", __LINE__, __FILE__);
+ // Read attribute information
+ float read_data2=0.0; // Buffer for reading 1st attribute
+ ds_attr.read(PredType::NATIVE_FLOAT,&read_data2);
+ if (HDfabs(read_data2 - attr_data5) > FP_EPSILON)
+ verify_val(read_data2, attr_data5, FP_EPSILON, "Attribute::read", __LINE__, __FILE__);
- // Get the dataspace of the attribute
- DataSpace att_space = ds_attr.getSpace();
+ // Get the dataspace of the attribute
+ DataSpace att_space = ds_attr.getSpace();
- // Make certain the dataspace is scalar
- H5S_class_t space_type = att_space.getSimpleExtentType();
- verify_val(space_type, H5S_SCALAR, "DataSpace::getSimpleExtentType", __LINE__, __FILE__);
+ // Make certain the dataspace is scalar
+ H5S_class_t space_type = att_space.getSimpleExtentType();
+ verify_val(space_type, H5S_SCALAR, "DataSpace::getSimpleExtentType", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_scalar_read()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_scalar_read()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_scalar_read()
@@ -877,65 +875,65 @@ static void test_attr_mult_write()
SUBTEST("Multiple Attribute Writing Functions");
try {
- // Create file
- H5File fid1 (FILE_MULTI, H5F_ACC_TRUNC);
+ // Create file
+ H5File fid1 (FILE_MULTI, H5F_ACC_TRUNC);
- // Create dataspace for dataset
- hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
- DataSpace ds_space (SPACE1_RANK, dims1);
+ // Create dataspace for dataset
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ DataSpace ds_space (SPACE1_RANK, dims1);
- // Create a dataset
- DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR, ds_space);
+ // Create a dataset
+ DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR, ds_space);
- // Create dataspace for 1st attribute
- hsize_t dims2[] = {ATTR1_DIM1};
- DataSpace att_space (ATTR1_RANK, dims2);
+ // Create dataspace for 1st attribute
+ hsize_t dims2[] = {ATTR1_DIM1};
+ DataSpace att_space (ATTR1_RANK, dims2);
- // Create 1st attribute for the dataset
- Attribute ds_attr = dataset.createAttribute (ATTR1_NAME, PredType::NATIVE_INT, att_space);
+ // Create 1st attribute for the dataset
+ Attribute ds_attr = dataset.createAttribute (ATTR1_NAME, PredType::NATIVE_INT, att_space);
- // Write attribute information
- ds_attr.write (PredType::NATIVE_INT, attr_data1);
+ // Write attribute information
+ ds_attr.write (PredType::NATIVE_INT, attr_data1);
- // Create dataspace for 2nd attribute
- hsize_t dims3[] = {ATTR2_DIM1,ATTR2_DIM2};
- DataSpace att2_space (ATTR2_RANK, dims3);
+ // Create dataspace for 2nd attribute
+ hsize_t dims3[] = {ATTR2_DIM1,ATTR2_DIM2};
+ DataSpace att2_space (ATTR2_RANK, dims3);
- // Create 2nd attribute for the dataset
- Attribute ds_attr2 = dataset.createAttribute (ATTR2_NAME, PredType::NATIVE_INT, att2_space);
+ // Create 2nd attribute for the dataset
+ Attribute ds_attr2 = dataset.createAttribute (ATTR2_NAME, PredType::NATIVE_INT, att2_space);
- // Write 2nd attribute information
- ds_attr2.write (PredType::NATIVE_INT, attr_data2);
+ // Write 2nd attribute information
+ ds_attr2.write (PredType::NATIVE_INT, attr_data2);
- // Create dataspace for 3rd attribute
- hsize_t dims4[] = {ATTR3_DIM1,ATTR3_DIM2,ATTR3_DIM3};
- DataSpace att3_space (ATTR3_RANK, dims4);
+ // Create dataspace for 3rd attribute
+ hsize_t dims4[] = {ATTR3_DIM1,ATTR3_DIM2,ATTR3_DIM3};
+ DataSpace att3_space (ATTR3_RANK, dims4);
- // Create 3rd attribute for the dataset
- Attribute ds_attr3 = dataset.createAttribute (ATTR3_NAME, PredType::NATIVE_DOUBLE, att3_space);
+ // Create 3rd attribute for the dataset
+ Attribute ds_attr3 = dataset.createAttribute (ATTR3_NAME, PredType::NATIVE_DOUBLE, att3_space);
- // Try creating an attribute that already exists. This should fail
- // since two attributes cannot have the same name. If an exception
- // is not thrown for this action by createAttribute, then throw an
- // invalid action exception.
- try {
- Attribute invalid_attr = dataset.createAttribute (ATTR3_NAME, PredType::NATIVE_DOUBLE, att3_space);
+ // Try creating an attribute that already exists. This should fail
+ // since two attributes cannot have the same name. If an exception
+ // is not thrown for this action by createAttribute, then throw an
+ // invalid action exception.
+ try {
+ Attribute invalid_attr = dataset.createAttribute (ATTR3_NAME, PredType::NATIVE_DOUBLE, att3_space);
- // continuation here, that means no exception has been thrown
- throw InvalidActionException("DataSet::createAttribute", "Attempting to create a duplicate attribute");
- }
- catch (AttributeIException& E) // catching invalid creating attribute
+ // continuation here, that means no exception has been thrown
+ throw InvalidActionException("DataSet::createAttribute", "Attempting to create a duplicate attribute");
+ }
+ catch (AttributeIException& E) // catching invalid creating attribute
{} // do nothing, exception expected
- // Write 3rd attribute information
- ds_attr3.write (PredType::NATIVE_DOUBLE, attr_data3);
+ // Write 3rd attribute information
+ ds_attr3.write (PredType::NATIVE_DOUBLE, attr_data3);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_mult_write()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_mult_write()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_mult_write()
@@ -951,39 +949,39 @@ static void test_attr_mult_read()
double read_data3[ATTR3_DIM1][ATTR3_DIM2][ATTR3_DIM3]={{{0}}}; // Buffer for reading 3rd attribute
hsize_t i,j,k;
- // Output message about test being performed
+ // Output message about test being performed
SUBTEST("Multiple Attribute Reading Functions");
try {
- // Open file
- H5File fid1(FILE_MULTI, H5F_ACC_RDWR);
+ // Open file
+ H5File fid1(FILE_MULTI, H5F_ACC_RDWR);
- // Open the dataset
- DataSet dataset = fid1.openDataSet(DSET1_NAME);
+ // Open the dataset
+ DataSet dataset = fid1.openDataSet(DSET1_NAME);
- // Verify the correct number of attributes
- int num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ int num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__);
- // Open 1st attribute for the dataset
- Attribute attr = dataset.openAttribute((unsigned)0);
+ // Open 1st attribute for the dataset
+ Attribute attr = dataset.openAttribute((unsigned)0);
- /* Verify Dataspace */
+ /* Verify Dataspace */
- // Get the dataspace of the attribute
- DataSpace space = attr.getSpace();
+ // Get the dataspace of the attribute
+ DataSpace space = attr.getSpace();
- // Get the rank of the dataspace and verify it
- int rank = space.getSimpleExtentNdims();
- verify_val(rank, ATTR1_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+ // Get the rank of the dataspace and verify it
+ int rank = space.getSimpleExtentNdims();
+ verify_val(rank, ATTR1_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
- // Get the dims of the dataspace and verify them
- hsize_t dims[ATTR_MAX_DIMS]; // Attribute dimensions
- int ndims = space.getSimpleExtentDims(dims);
- if ((long)dims[0] != (long)ATTR1_DIM1)
- TestErrPrintf("%d:attribute dimensions different: dims[0]=%d, should be %d\n",__LINE__,(int)dims[0],ATTR1_DIM1);
+ // Get the dims of the dataspace and verify them
+ hsize_t dims[ATTR_MAX_DIMS]; // Attribute dimensions
+ int ndims = space.getSimpleExtentDims(dims);
+ if ((long)dims[0] != (long)ATTR1_DIM1)
+ TestErrPrintf("%d:attribute dimensions different: dims[0]=%d, should be %d\n",__LINE__,(int)dims[0],ATTR1_DIM1);
- /* Verify Datatype */
+ /* Verify Datatype */
// Get the class of the datatype that is used by attr
H5T_class_t type_class = attr.getTypeClass();
@@ -991,51 +989,51 @@ static void test_attr_mult_read()
// Verify that the type is of integer datatype
verify_val(type_class, H5T_INTEGER, "Attribute::getTypeClass", __LINE__, __FILE__);
- // Get the integer datatype
+ // Get the integer datatype
IntType i_type1 = attr.getIntType();
- // Get and verify the order of this type
- H5T_order_t order = i_type1.getOrder();
- verify_val(order, PredType::NATIVE_INT.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
+ // Get and verify the order of this type
+ H5T_order_t order = i_type1.getOrder();
+ verify_val(order, PredType::NATIVE_INT.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
- // Get and verify the size of this type
- size_t size = i_type1.getSize();
- verify_val(size, PredType::NATIVE_INT.getSize(), "DataType::getSize", __LINE__, __FILE__);
+ // Get and verify the size of this type
+ size_t size = i_type1.getSize();
+ verify_val(size, PredType::NATIVE_INT.getSize(), "DataType::getSize", __LINE__, __FILE__);
- // Read attribute information
- attr.read(PredType::NATIVE_INT, read_data1);
+ // Read attribute information
+ attr.read(PredType::NATIVE_INT, read_data1);
- // Verify values read in
- for(i=0; i<ATTR1_DIM1; i++)
- if(attr_data1[i]!=read_data1[i])
- TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1[i],i,read_data1[i]);
+ // Verify values read in
+ for(i=0; i<ATTR1_DIM1; i++)
+ if(attr_data1[i]!=read_data1[i])
+ TestErrPrintf("%d: attribute data different: attr_data1[%d]=%d,read_data1[%d]=%d\n",__LINE__,i,attr_data1[i],i,read_data1[i]);
- // Verify Name
- H5std_string attr_name = attr.getName();
- verify_val(attr_name, ATTR1_NAME, "DataType::getName", __LINE__, __FILE__);
+ // Verify Name
+ H5std_string attr_name = attr.getName();
+ verify_val(attr_name, ATTR1_NAME, "DataType::getName", __LINE__, __FILE__);
- attr.close();
- space.close();
+ attr.close();
+ space.close();
- // Open 2nd attribute for the dataset
- attr = dataset.openAttribute((unsigned)1);
+ // Open 2nd attribute for the dataset
+ attr = dataset.openAttribute((unsigned)1);
- /* Verify Dataspace */
+ /* Verify Dataspace */
- // Get the dataspace of the attribute
- space = attr.getSpace();
+ // Get the dataspace of the attribute
+ space = attr.getSpace();
- // Get the rank of the dataspace and verify it
- rank = space.getSimpleExtentNdims();
- verify_val(rank, ATTR2_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+ // Get the rank of the dataspace and verify it
+ rank = space.getSimpleExtentNdims();
+ verify_val(rank, ATTR2_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
- // Get the dims of the dataspace and verify them
- ndims = space.getSimpleExtentDims(dims);
+ // Get the dims of the dataspace and verify them
+ ndims = space.getSimpleExtentDims(dims);
verify_val((long)dims[0], (long)ATTR2_DIM1, "DataSpace::getSimpleExtentDims",__LINE__, __FILE__);
verify_val((long)dims[1], (long)ATTR2_DIM2, "DataSpace::getSimpleExtentDims",__LINE__, __FILE__);
- /* Verify Datatype */
+ /* Verify Datatype */
// Get the class of the datatype that is used by attr
type_class = attr.getTypeClass();
@@ -1043,52 +1041,52 @@ static void test_attr_mult_read()
// Verify that the type is of integer datatype
verify_val(type_class, H5T_INTEGER, "Attribute::getTypeClass", __LINE__, __FILE__);
- // Get the integer datatype
+ // Get the integer datatype
IntType i_type2 = attr.getIntType();
- // Get and verify the order of this type
- order = i_type2.getOrder();
- verify_val(order, PredType::NATIVE_INT.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
+ // Get and verify the order of this type
+ order = i_type2.getOrder();
+ verify_val(order, PredType::NATIVE_INT.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
- // Get and verify the size of this type
- size = i_type2.getSize();
- verify_val(size, PredType::NATIVE_INT.getSize(), "DataType::getSize", __LINE__, __FILE__);
+ // Get and verify the size of this type
+ size = i_type2.getSize();
+ verify_val(size, PredType::NATIVE_INT.getSize(), "DataType::getSize", __LINE__, __FILE__);
- // Read attribute information
- attr.read(PredType::NATIVE_INT, read_data2);
- //attr.read(i_type, read_data2);
+ // Read attribute information
+ attr.read(PredType::NATIVE_INT, read_data2);
+ //attr.read(i_type, read_data2);
- // Verify values read in
- for(i=0; i<ATTR2_DIM1; i++)
- for(j=0; j<ATTR2_DIM2; j++)
+ // Verify values read in
+ for(i=0; i<ATTR2_DIM1; i++)
+ for(j=0; j<ATTR2_DIM2; j++)
if(attr_data2[i][j]!=read_data2[i][j])
TestErrPrintf("%d: attribute data different: attr_data2[%d][%d]=%d, read_data2[%d][%d]=%d\n",__LINE__,i,j,attr_data2[i][j],i,j,read_data2[i][j]);
- // Verify Name
- attr_name = attr.getName();
- verify_val(attr_name, ATTR2_NAME, "DataType::getName", __LINE__, __FILE__);
- attr.close();
- space.close();
+ // Verify Name
+ attr_name = attr.getName();
+ verify_val(attr_name, ATTR2_NAME, "DataType::getName", __LINE__, __FILE__);
+ attr.close();
+ space.close();
- // Open 3rd attribute for the dataset
- attr = dataset.openAttribute((unsigned)2);
+ // Open 3rd attribute for the dataset
+ attr = dataset.openAttribute((unsigned)2);
- /* Verify Dataspace */
+ /* Verify Dataspace */
- // Get the dataspace of the attribute
- space = attr.getSpace();
+ // Get the dataspace of the attribute
+ space = attr.getSpace();
- // Get the rank of the dataspace and verify it
- rank = space.getSimpleExtentNdims();
- verify_val(rank, ATTR3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+ // Get the rank of the dataspace and verify it
+ rank = space.getSimpleExtentNdims();
+ verify_val(rank, ATTR3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
- // Get the dims of the dataspace and verify them
- ndims = space.getSimpleExtentDims(dims);
- verify_val((long)dims[0],(long)ATTR3_DIM1,"attribute dimensions",__FILE__,__LINE__);
- verify_val((long)dims[1],(long)ATTR3_DIM2,"attribute dimensions",__FILE__,__LINE__);
- verify_val((long)dims[2],(long)ATTR3_DIM3,"attribute dimensions",__FILE__,__LINE__);
+ // Get the dims of the dataspace and verify them
+ ndims = space.getSimpleExtentDims(dims);
+ verify_val((long)dims[0],(long)ATTR3_DIM1,"attribute dimensions",__FILE__,__LINE__);
+ verify_val((long)dims[1],(long)ATTR3_DIM2,"attribute dimensions",__FILE__,__LINE__);
+ verify_val((long)dims[2],(long)ATTR3_DIM3,"attribute dimensions",__FILE__,__LINE__);
- /* Verify Datatype */
+ /* Verify Datatype */
// Get the class of the datatype that is used by attr
type_class = attr.getTypeClass();
@@ -1096,44 +1094,44 @@ static void test_attr_mult_read()
// Verify that the type is of compound datatype
verify_val(type_class, H5T_FLOAT, "Attribute::getTypeClass", __LINE__, __FILE__);
- // Get the double datatype
+ // Get the double datatype
FloatType f_type = attr.getFloatType();
- // Get and verify the order of this type
- order = f_type.getOrder();
- verify_val(order, PredType::NATIVE_DOUBLE.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
+ // Get and verify the order of this type
+ order = f_type.getOrder();
+ verify_val(order, PredType::NATIVE_DOUBLE.getOrder(), "DataType::getOrder", __LINE__, __FILE__);
- // Get and verify the size of this type
- size = f_type.getSize();
- verify_val(size, PredType::NATIVE_DOUBLE.getSize(), "DataType::getSize", __LINE__, __FILE__);
+ // Get and verify the size of this type
+ size = f_type.getSize();
+ verify_val(size, PredType::NATIVE_DOUBLE.getSize(), "DataType::getSize", __LINE__, __FILE__);
- // Read attribute information
- attr.read(PredType::NATIVE_DOUBLE, read_data3);
+ // Read attribute information
+ attr.read(PredType::NATIVE_DOUBLE, read_data3);
- // Verify values read in
- for(i=0; i<ATTR3_DIM1; i++)
- for(j=0; j<ATTR3_DIM2; j++)
- for(k=0; k<ATTR3_DIM3; k++)
- if(attr_data3[i][j][k]!=read_data3[i][j][k])
- TestErrPrintf("%d: attribute data different: attr_data3[%d][%d][%d]=%f, read_data3[%d][%d][%d]=%f\n",__LINE__,i,j,k,attr_data3[i][j][k],i,j,k,read_data3[i][j][k]);
+ // Verify values read in
+ for(i=0; i<ATTR3_DIM1; i++)
+ for(j=0; j<ATTR3_DIM2; j++)
+ for(k=0; k<ATTR3_DIM3; k++)
+ if(attr_data3[i][j][k]!=read_data3[i][j][k])
+ TestErrPrintf("%d: attribute data different: attr_data3[%d][%d][%d]=%f, read_data3[%d][%d][%d]=%f\n",__LINE__,i,j,k,attr_data3[i][j][k],i,j,k,read_data3[i][j][k]);
- // Verify Name
- attr_name = attr.getName();
- verify_val(attr_name, ATTR3_NAME, "DataType::getName", __LINE__, __FILE__);
+ // Verify Name
+ attr_name = attr.getName();
+ verify_val(attr_name, ATTR3_NAME, "DataType::getName", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_mult_read()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_mult_read()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_mult_read()
/****************************************************************
**
** test_attr_delete(): Test deleting attribute from different
-** hdf5 objects.
+** hdf5 objects.
**
****************************************************************/
static void test_attr_delete()
@@ -1144,113 +1142,113 @@ static void test_attr_delete()
SUBTEST("Removing Attribute Function");
try {
- // Open file.
- H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
-
- // Get the number of file attributes
- int num_attrs = fid1.getNumAttrs();
- verify_val(num_attrs, 2, "H5File::getNumAttrs", __LINE__, __FILE__);
-
- // Delete the second file attribute
- fid1.removeAttr(FATTR2_NAME);
-
- // Get the number of file attributes
- num_attrs = fid1.getNumAttrs();
- verify_val(num_attrs, 1, "H5File::getNumAttrs", __LINE__, __FILE__);
-
- // Verify the name of the only file attribute left
- Attribute fattr = fid1.openAttribute((unsigned)0);
- attr_name = fattr.getName();
- verify_val(attr_name, FATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
- fattr.close();
-
- // Test deleting non-existing attribute
-
- // Open the dataset
- DataSet dataset = fid1.openDataSet(DSET1_NAME);
-
- // Verify the correct number of attributes
- num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__);
-
- // Try to delete bogus attribute, should fail
- try {
- dataset.removeAttr("Bogus");
-
- // continuation here, that means no exception has been thrown
- throw InvalidActionException("DataSet::removeAttr", "Attempting to remove non-existing attribute");
- }
- catch (AttributeIException& E) // catching invalid removing attribute
+ // Open file.
+ H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
+
+ // Get the number of file attributes
+ int num_attrs = fid1.getNumAttrs();
+ verify_val(num_attrs, 2, "H5File::getNumAttrs", __LINE__, __FILE__);
+
+ // Delete the second file attribute
+ fid1.removeAttr(FATTR2_NAME);
+
+ // Get the number of file attributes
+ num_attrs = fid1.getNumAttrs();
+ verify_val(num_attrs, 1, "H5File::getNumAttrs", __LINE__, __FILE__);
+
+ // Verify the name of the only file attribute left
+ Attribute fattr = fid1.openAttribute((unsigned)0);
+ attr_name = fattr.getName();
+ verify_val(attr_name, FATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
+ fattr.close();
+
+ // Test deleting non-existing attribute
+
+ // Open the dataset
+ DataSet dataset = fid1.openDataSet(DSET1_NAME);
+
+ // Verify the correct number of attributes
+ num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__);
+
+ // Try to delete bogus attribute, should fail
+ try {
+ dataset.removeAttr("Bogus");
+
+ // continuation here, that means no exception has been thrown
+ throw InvalidActionException("DataSet::removeAttr", "Attempting to remove non-existing attribute");
+ }
+ catch (AttributeIException& E) // catching invalid removing attribute
{} // do nothing, exception expected
- // Test deleting dataset's attributes
+ // Test deleting dataset's attributes
- // Verify the correct number of attributes
- num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 3, "DataSet::getNumAttrs", __LINE__, __FILE__);
- // Delete middle (2nd) attribute
- dataset.removeAttr(ATTR2_NAME);
+ // Delete middle (2nd) attribute
+ dataset.removeAttr(ATTR2_NAME);
- // Verify the correct number of attributes
- num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 2, "DataSet::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 2, "DataSet::getNumAttrs", __LINE__, __FILE__);
- // Open 1st attribute for the dataset
- Attribute attr = dataset.openAttribute((unsigned)0);
+ // Open 1st attribute for the dataset
+ Attribute attr = dataset.openAttribute((unsigned)0);
- // Verify Name
- attr_name = attr.getName();
- verify_val(attr_name, ATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
+ // Verify Name
+ attr_name = attr.getName();
+ verify_val(attr_name, ATTR1_NAME, "Attribute::getName", __LINE__, __FILE__);
- // Close attribute
- attr.close();
+ // Close attribute
+ attr.close();
- // Open last (formally 3rd) attribute for the dataset
- attr = dataset.openAttribute((unsigned)1);
+ // Open last (formally 3rd) attribute for the dataset
+ attr = dataset.openAttribute((unsigned)1);
- // Verify Name
- attr_name = attr.getName();
- verify_val(attr_name, ATTR3_NAME, "Attribute::getName", __LINE__, __FILE__);
+ // Verify Name
+ attr_name = attr.getName();
+ verify_val(attr_name, ATTR3_NAME, "Attribute::getName", __LINE__, __FILE__);
- attr.close();
+ attr.close();
- // Delete first attribute
- dataset.removeAttr(ATTR1_NAME);
+ // Delete first attribute
+ dataset.removeAttr(ATTR1_NAME);
- // Verify the correct number of attributes
- num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 1, "DataSet::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 1, "DataSet::getNumAttrs", __LINE__, __FILE__);
- // Open the only attribute for the dataset (formally 3rd)
- attr = dataset.openAttribute((unsigned)0);
+ // Open the only attribute for the dataset (formally 3rd)
+ attr = dataset.openAttribute((unsigned)0);
- // Verify Name
- attr_name = attr.getName();
- verify_val(attr_name, ATTR3_NAME, "Attribute::getName", __LINE__, __FILE__);
- // Close attribute
- attr.close();
+ // Verify Name
+ attr_name = attr.getName();
+ verify_val(attr_name, ATTR3_NAME, "Attribute::getName", __LINE__, __FILE__);
+ // Close attribute
+ attr.close();
- // Delete first attribute
- dataset.removeAttr(ATTR3_NAME);
+ // Delete first attribute
+ dataset.removeAttr(ATTR3_NAME);
- // Verify the correct number of attributes
- num_attrs = dataset.getNumAttrs();
- verify_val(num_attrs, 0, "DataSet::getNumAttrs", __LINE__, __FILE__);
+ // Verify the correct number of attributes
+ num_attrs = dataset.getNumAttrs();
+ verify_val(num_attrs, 0, "DataSet::getNumAttrs", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_delete()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_delete()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_delete()
/****************************************************************
**
** test_attr_dtype_shared(): Test code for using shared datatypes
-** in attributes.
+** in attributes.
**
****************************************************************/
static void test_attr_dtype_shared()
@@ -1266,142 +1264,142 @@ static void test_attr_dtype_shared()
SUBTEST("Shared Datatypes with Attributes");
try {
- // Create a file
- H5File fid1(FILE_DTYPE, H5F_ACC_TRUNC);
+ // Create a file
+ H5File fid1(FILE_DTYPE, H5F_ACC_TRUNC);
- // Close file
- fid1.close();
+ // Close file
+ fid1.close();
- // Get size of file
- h5_stat_size_t empty_filesize; // Size of empty file
- empty_filesize = h5_get_file_size(FILE_DTYPE.c_str(), H5P_DEFAULT);
- if (empty_filesize < 0)
+ // Get size of file
+ h5_stat_size_t empty_filesize; // Size of empty file
+ empty_filesize = h5_get_file_size(FILE_DTYPE.c_str(), H5P_DEFAULT);
+ if (empty_filesize < 0)
TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
- // Open the file again
- fid1.openFile(FILE_DTYPE, H5F_ACC_RDWR);
+ // Open the file again
+ fid1.openFile(FILE_DTYPE, H5F_ACC_RDWR);
- // Enclosing to work around the issue of unused variables and/or
- // objects created by copy constructors stay around until end of
- // scope, causing incorrect number of ref counts.
- { // First enclosed block
+ // Enclosing to work around the issue of unused variables and/or
+ // objects created by copy constructors stay around until end of
+ // scope, causing incorrect number of ref counts.
+ { // First enclosed block
- // Create a datatype to commit and use
- IntType dtype(PredType::NATIVE_INT);
+ // Create a datatype to commit and use
+ IntType dtype(PredType::NATIVE_INT);
- // Commit datatype to file
- dtype.commit(fid1, TYPE1_NAME);
+ // Commit datatype to file
+ dtype.commit(fid1, TYPE1_NAME);
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Check reference count on named datatype
- fid1.getObjinfo(TYPE1_NAME, statbuf);
- verify_val((int)statbuf.nlink, 1, "DataType::getObjinfo", __LINE__, __FILE__);
+ // Check reference count on named datatype
+ fid1.getObjinfo(TYPE1_NAME, statbuf);
+ verify_val((int)statbuf.nlink, 1, "DataType::getObjinfo", __LINE__, __FILE__);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- // Create dataspace for dataset
- DataSpace dspace;
+ // Create dataspace for dataset
+ DataSpace dspace;
- DataSet dset = fid1.createDataSet(DSET1_NAME, dtype, dspace);
+ DataSet dset = fid1.createDataSet(DSET1_NAME, dtype, dspace);
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Check reference count on named datatype
- fid1.getObjinfo(TYPE1_NAME, statbuf);
- verify_val((int)statbuf.nlink, 2, "H5File::getObjinfo", __LINE__, __FILE__);
+ // Check reference count on named datatype
+ fid1.getObjinfo(TYPE1_NAME, statbuf);
+ verify_val((int)statbuf.nlink, 2, "H5File::getObjinfo", __LINE__, __FILE__);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- // Create attribute on dataset
- Attribute attr = dset.createAttribute(ATTR1_NAME,dtype,dspace);
+ // Create attribute on dataset
+ Attribute attr = dset.createAttribute(ATTR1_NAME,dtype,dspace);
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Check reference count on named datatype
- fid1.getObjinfo(TYPE1_NAME, statbuf);
- verify_val((int)statbuf.nlink, 3, "DataSet::getObjinfo", __LINE__, __FILE__);
+ // Check reference count on named datatype
+ fid1.getObjinfo(TYPE1_NAME, statbuf);
+ verify_val((int)statbuf.nlink, 3, "DataSet::getObjinfo", __LINE__, __FILE__);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
// Close attribute
attr.close();
- // Delete attribute
- dset.removeAttr(ATTR1_NAME);
+ // Delete attribute
+ dset.removeAttr(ATTR1_NAME);
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Check reference count on named datatype
- fid1.getObjinfo(TYPE1_NAME, statbuf);
- verify_val((int)statbuf.nlink, 2, "DataSet::getObjinfo after DataSet::removeAttr", __LINE__, __FILE__);
+ // Check reference count on named datatype
+ fid1.getObjinfo(TYPE1_NAME, statbuf);
+ verify_val((int)statbuf.nlink, 2, "DataSet::getObjinfo after DataSet::removeAttr", __LINE__, __FILE__);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
// Create attribute on dataset
attr = dset.createAttribute(ATTR1_NAME,dtype,dspace);
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Check reference count on named datatype
- fid1.getObjinfo(TYPE1_NAME, statbuf);
- verify_val((int)statbuf.nlink, 3, "DataSet::createAttribute", __LINE__, __FILE__);
+ // Check reference count on named datatype
+ fid1.getObjinfo(TYPE1_NAME, statbuf);
+ verify_val((int)statbuf.nlink, 3, "DataSet::createAttribute", __LINE__, __FILE__);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- // Write data into the attribute
- attr.write(PredType::NATIVE_INT,&data);
+ // Write data into the attribute
+ attr.write(PredType::NATIVE_INT,&data);
- // Close attribute, dataset, dataspace, datatype, and file
- attr.close();
- dset.close();
- dspace.close();
- dtype.close();
- } // end of first enclosing
+ // Close attribute, dataset, dataspace, datatype, and file
+ attr.close();
+ dset.close();
+ dspace.close();
+ dtype.close();
+ } // end of first enclosing
- fid1.close();
+ fid1.close();
- // Open the file again
- fid1.openFile(FILE_DTYPE, H5F_ACC_RDWR);
+ // Open the file again
+ fid1.openFile(FILE_DTYPE, H5F_ACC_RDWR);
- { // Second enclosed block...
+ { // Second enclosed block...
- // Open dataset
- DataSet *dset2 = new DataSet (fid1.openDataSet(DSET1_NAME));
+ // Open dataset
+ DataSet *dset2 = new DataSet (fid1.openDataSet(DSET1_NAME));
- // Open attribute
- Attribute *attr2 = new Attribute (dset2->openAttribute(ATTR1_NAME));
+ // Open attribute
+ Attribute *attr2 = new Attribute (dset2->openAttribute(ATTR1_NAME));
- // Read data from the attribute
- attr2->read(PredType::NATIVE_INT, &rdata);
- verify_val(data, rdata, "Attribute::read", __LINE__, __FILE__);
+ // Read data from the attribute
+ attr2->read(PredType::NATIVE_INT, &rdata);
+ verify_val(data, rdata, "Attribute::read", __LINE__, __FILE__);
- // Close attribute and dataset
- delete attr2;
- delete dset2;
+ // Close attribute and dataset
+ delete attr2;
+ delete dset2;
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Check reference count on named datatype
- fid1.getObjinfo(TYPE1_NAME, statbuf);
- verify_val((int)statbuf.nlink, 3, "DataSet::openAttribute", __LINE__, __FILE__);
+ // Check reference count on named datatype
+ fid1.getObjinfo(TYPE1_NAME, statbuf);
+ verify_val((int)statbuf.nlink, 3, "DataSet::openAttribute", __LINE__, __FILE__);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- } // end of second enclosing
+ } // end of second enclosing
- // Unlink the dataset
- fid1.unlink(DSET1_NAME);
+ // Unlink the dataset
+ fid1.unlink(DSET1_NAME);
#ifndef H5_NO_DEPRECATED_SYMBOLS
- // Check reference count on named datatype
- fid1.getObjinfo(TYPE1_NAME, statbuf);
- verify_val((int)statbuf.nlink, 1, "H5File::unlink", __LINE__, __FILE__);
+ // Check reference count on named datatype
+ fid1.getObjinfo(TYPE1_NAME, statbuf);
+ verify_val((int)statbuf.nlink, 1, "H5File::unlink", __LINE__, __FILE__);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- // Unlink the named datatype
- fid1.unlink(TYPE1_NAME);
+ // Unlink the named datatype
+ fid1.unlink(TYPE1_NAME);
- // Close file
- fid1.close();
+ // Close file
+ fid1.close();
- // Check size of file
- filesize = h5_get_file_size(FILE_DTYPE.c_str(), H5P_DEFAULT);
- verify_val((long)filesize, (long)empty_filesize, "Checking file size", __LINE__, __FILE__);
+ // Check size of file
+ filesize = h5_get_file_size(FILE_DTYPE.c_str(), H5P_DEFAULT);
+ verify_val((long)filesize, (long)empty_filesize, "Checking file size", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_dtype_shared()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_dtype_shared()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_dtype_shared()
@@ -1424,116 +1422,116 @@ static void test_string_attr()
SUBTEST("I/O on FL and VL String Attributes");
try {
- // Create file
- H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
-
- //
- // Fixed-lenth string attributes
- //
- // Create a fixed-length string datatype to refer to.
- StrType fls_type(0, ATTR_LEN);
-
- // Open the root group.
- Group root = fid1.openGroup("/");
-
- // Create dataspace for the attribute.
- DataSpace att_space (H5S_SCALAR);
-
- /* Test Attribute::write(...,const void *buf) with Fixed len string */
-
- // Create an attribute for the root group.
- Attribute gr_flattr1 = root.createAttribute(ATTR1_FL_STR_NAME, fls_type, att_space);
-
- // Write data to the attribute.
- gr_flattr1.write(fls_type, ATTRSTR_DATA.c_str());
-
- /* Test Attribute::write(...,const H5std_string& strg) with FL string */
-
- // Create an attribute for the root group.
- Attribute gr_flattr2 = root.createAttribute(ATTR2_FL_STR_NAME, fls_type, att_space);
-
- // Write data to the attribute.
- gr_flattr2.write(fls_type, ATTRSTR_DATA);
-
- /* Test Attribute::read(...,void *buf) with FL string */
-
- // Read and verify the attribute string as a string of chars.
- char flstring_att_check[ATTR_LEN];
- gr_flattr1.read(fls_type, flstring_att_check);
- if(HDstrcmp(flstring_att_check, ATTRSTR_DATA.c_str())!=0)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,flstring_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), flstring_att_check);
-
- // Read and verify the attribute string as a string of chars; buffer
- // is dynamically allocated.
- size_t attr_size = gr_flattr1.getInMemDataSize();
- char *fl_dyn_string_att_check;
- fl_dyn_string_att_check = new char[attr_size+1];
- gr_flattr1.read(fls_type, fl_dyn_string_att_check);
- if(HDstrcmp(fl_dyn_string_att_check, ATTRSTR_DATA.c_str())!=0)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,flstring_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), fl_dyn_string_att_check);
- delete []fl_dyn_string_att_check;
-
- /* Test Attribute::read(...,H5std_string& strg) with FL string */
-
- // Read and verify the attribute string as an std::string.
- H5std_string read_flstr1;
- gr_flattr1.read(fls_type, read_flstr1);
- if (read_flstr1 != ATTRSTR_DATA)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,read_flstr1=%s\n",__LINE__, ATTRSTR_DATA.c_str(), read_flstr1.c_str());
-
- // Read and verify the attribute string as a string of chars.
- HDstrcpy(flstring_att_check, "");
- gr_flattr2.read(fls_type, flstring_att_check);
- if(HDstrcmp(flstring_att_check, ATTRSTR_DATA.c_str())!=0)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,flstring_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), flstring_att_check);
-
- /* Test Attribute::read(...,H5std_string& strg) with FL string */
-
- // Read and verify the attribute string as an std::string.
- H5std_string read_flstr2;
- gr_flattr2.read(fls_type, read_flstr2);
- if (read_flstr2 != ATTRSTR_DATA)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,read_flstr2=%s\n",__LINE__, ATTRSTR_DATA.c_str(), read_flstr2.c_str());
-
- //
- // Variable-lenth string attributes
- //
- // Create a variable length string datatype to refer to.
- StrType vls_type(0, H5T_VARIABLE);
-
- // Create an attribute for the root group.
- Attribute gr_vlattr = root.createAttribute(ATTR_VL_STR_NAME, vls_type, att_space);
-
- // Write data to the attribute.
- gr_vlattr.write(vls_type, ATTRSTR_DATA);
-
- /* Test Attribute::read(...,void *buf) with Variable len string */
- // Read and verify the attribute string as a string of chars.
- char *string_att_check;
- gr_vlattr.read(vls_type, &string_att_check);
- if(HDstrcmp(string_att_check, ATTRSTR_DATA.c_str())!=0)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,string_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), string_att_check);
- HDfree(string_att_check);
-
- /* Test Attribute::read(...,H5std_string& strg) with VL string */
- // Read and verify the attribute string as an std::string.
- H5std_string read_str;
- gr_vlattr.read(vls_type, read_str);
- if (read_str != ATTRSTR_DATA)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,read_str=%s\n",__LINE__, ATTRSTR_DATA.c_str(), read_str.c_str());
- PASSED();
+ // Create file
+ H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
+
+ //
+ // Fixed-lenth string attributes
+ //
+ // Create a fixed-length string datatype to refer to.
+ StrType fls_type(0, ATTR_LEN);
+
+ // Open the root group.
+ Group root = fid1.openGroup("/");
+
+ // Create dataspace for the attribute.
+ DataSpace att_space (H5S_SCALAR);
+
+ /* Test Attribute::write(...,const void *buf) with Fixed len string */
+
+ // Create an attribute for the root group.
+ Attribute gr_flattr1 = root.createAttribute(ATTR1_FL_STR_NAME, fls_type, att_space);
+
+ // Write data to the attribute.
+ gr_flattr1.write(fls_type, ATTRSTR_DATA.c_str());
+
+ /* Test Attribute::write(...,const H5std_string& strg) with FL string */
+
+ // Create an attribute for the root group.
+ Attribute gr_flattr2 = root.createAttribute(ATTR2_FL_STR_NAME, fls_type, att_space);
+
+ // Write data to the attribute.
+ gr_flattr2.write(fls_type, ATTRSTR_DATA);
+
+ /* Test Attribute::read(...,void *buf) with FL string */
+
+ // Read and verify the attribute string as a string of chars.
+ char flstring_att_check[ATTR_LEN];
+ gr_flattr1.read(fls_type, flstring_att_check);
+ if(HDstrcmp(flstring_att_check, ATTRSTR_DATA.c_str())!=0)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,flstring_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), flstring_att_check);
+
+ // Read and verify the attribute string as a string of chars; buffer
+ // is dynamically allocated.
+ size_t attr_size = gr_flattr1.getInMemDataSize();
+ char *fl_dyn_string_att_check;
+ fl_dyn_string_att_check = new char[attr_size+1];
+ gr_flattr1.read(fls_type, fl_dyn_string_att_check);
+ if(HDstrcmp(fl_dyn_string_att_check, ATTRSTR_DATA.c_str())!=0)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,flstring_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), fl_dyn_string_att_check);
+ delete []fl_dyn_string_att_check;
+
+ /* Test Attribute::read(...,H5std_string& strg) with FL string */
+
+ // Read and verify the attribute string as an std::string.
+ H5std_string read_flstr1;
+ gr_flattr1.read(fls_type, read_flstr1);
+ if (read_flstr1 != ATTRSTR_DATA)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,read_flstr1=%s\n",__LINE__, ATTRSTR_DATA.c_str(), read_flstr1.c_str());
+
+ // Read and verify the attribute string as a string of chars.
+ HDstrcpy(flstring_att_check, "");
+ gr_flattr2.read(fls_type, flstring_att_check);
+ if(HDstrcmp(flstring_att_check, ATTRSTR_DATA.c_str())!=0)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,flstring_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), flstring_att_check);
+
+ /* Test Attribute::read(...,H5std_string& strg) with FL string */
+
+ // Read and verify the attribute string as an std::string.
+ H5std_string read_flstr2;
+ gr_flattr2.read(fls_type, read_flstr2);
+ if (read_flstr2 != ATTRSTR_DATA)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,read_flstr2=%s\n",__LINE__, ATTRSTR_DATA.c_str(), read_flstr2.c_str());
+
+ //
+ // Variable-lenth string attributes
+ //
+ // Create a variable length string datatype to refer to.
+ StrType vls_type(0, H5T_VARIABLE);
+
+ // Create an attribute for the root group.
+ Attribute gr_vlattr = root.createAttribute(ATTR_VL_STR_NAME, vls_type, att_space);
+
+ // Write data to the attribute.
+ gr_vlattr.write(vls_type, ATTRSTR_DATA);
+
+ /* Test Attribute::read(...,void *buf) with Variable len string */
+ // Read and verify the attribute string as a string of chars.
+ char *string_att_check;
+ gr_vlattr.read(vls_type, &string_att_check);
+ if(HDstrcmp(string_att_check, ATTRSTR_DATA.c_str())!=0)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,string_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), string_att_check);
+ HDfree(string_att_check);
+
+ /* Test Attribute::read(...,H5std_string& strg) with VL string */
+ // Read and verify the attribute string as an std::string.
+ H5std_string read_str;
+ gr_vlattr.read(vls_type, read_str);
+ if (read_str != ATTRSTR_DATA)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,read_str=%s\n",__LINE__, ATTRSTR_DATA.c_str(), read_str.c_str());
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_string_attr()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_string_attr()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_string_attr()
/****************************************************************
**
** test_attr_exists(): Test checking for attribute existence.
-** (additional attrExists tests are in test_attr_rename())
+** (additional attrExists tests are in test_attr_rename())
**
****************************************************************/
static void test_attr_exists()
@@ -1542,40 +1540,40 @@ static void test_attr_exists()
SUBTEST("Check Attribute Existence");
try {
- // Open file.
- H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
+ // Open file.
+ H5File fid1(FILE_BASIC, H5F_ACC_RDWR);
- // Open the root group.
- Group root = fid1.openGroup("/");
+ // Open the root group.
+ Group root = fid1.openGroup("/");
- // Check for existence of attribute
- bool attr_exists = fid1.attrExists(ATTR1_FL_STR_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "fid1, ATTR1_FL_STR_NAMEAttribute should exist but does not");
+ // Check for existence of attribute
+ bool attr_exists = fid1.attrExists(ATTR1_FL_STR_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "fid1, ATTR1_FL_STR_NAMEAttribute should exist but does not");
- // Check for existence of attribute
- attr_exists = fid1.attrExists(FATTR1_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "fid1,FATTR2_NAMEAttribute should exist but does not");
+ // Check for existence of attribute
+ attr_exists = fid1.attrExists(FATTR1_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "fid1,FATTR2_NAMEAttribute should exist but does not");
- // Open a group.
- Group group = fid1.openGroup(GROUP1_NAME);
+ // Open a group.
+ Group group = fid1.openGroup(GROUP1_NAME);
- // Check for existence of attribute
- attr_exists = group.attrExists(ATTR2_NAME);
- if (attr_exists == false)
- throw InvalidActionException("H5File::attrExists", "group, ATTR2_NAMEAttribute should exist but does not");
+ // Check for existence of attribute
+ attr_exists = group.attrExists(ATTR2_NAME);
+ if (attr_exists == false)
+ throw InvalidActionException("H5File::attrExists", "group, ATTR2_NAMEAttribute should exist but does not");
- PASSED();
+ PASSED();
} // end try block
catch (InvalidActionException& E)
{
- issue_fail_msg("test_attr_exists()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_exists()", __LINE__, __FILE__, E.getCDetailMsg());
}
catch (Exception& E)
{
- issue_fail_msg("test_attr_exists()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_exists()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_exists()
@@ -1591,100 +1589,100 @@ const unsigned MAX_COMPACT_DEF = 8;
const unsigned MIN_DENSE_DEF = 6;
static void test_attr_dense_create(FileCreatPropList& fcpl,
- FileAccPropList& fapl)
+ FileAccPropList& fapl)
{
// Output message about test being performed
SUBTEST("Dense Attribute Storage Creation");
try {
- // Create file
- H5File fid1 (FILE_CRTPROPS, H5F_ACC_TRUNC, fcpl, fapl);
+ // Create file
+ H5File fid1 (FILE_CRTPROPS, H5F_ACC_TRUNC, fcpl, fapl);
- // Close file
- fid1.close();
+ // Close file
+ fid1.close();
- // Get size of file
- h5_stat_size_t empty_filesize; // Size of empty file
- empty_filesize = h5_get_file_size(FILE_CRTPROPS.c_str(), fapl.getId());
- if (empty_filesize < 0)
+ // Get size of file
+ h5_stat_size_t empty_filesize; // Size of empty file
+ empty_filesize = h5_get_file_size(FILE_CRTPROPS.c_str(), fapl.getId());
+ if (empty_filesize < 0)
TestErrPrintf("Line %d: file size wrong!\n", __LINE__);
- // Re-open file
- fid1.openFile(FILE_CRTPROPS, H5F_ACC_RDWR, fapl);
+ // Re-open file
+ fid1.openFile(FILE_CRTPROPS, H5F_ACC_RDWR, fapl);
- // Create dataspace for dataset
- DataSpace ds_space(H5S_SCALAR);
+ // Create dataspace for dataset
+ DataSpace ds_space(H5S_SCALAR);
- // Create dataset creation property list.
- DSetCreatPropList dcpl;
+ // Create dataset creation property list.
+ DSetCreatPropList dcpl;
- // Create a dataset
- DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR, ds_space, dcpl);
+ // Create a dataset
+ DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR, ds_space, dcpl);
- unsigned max_compact = 0, min_dense = 0;
+ unsigned max_compact = 0, min_dense = 0;
- // Retrieve limits for compact/dense attribute storage
- dcpl.getAttrPhaseChange(max_compact, min_dense);
- verify_val(max_compact, MAX_COMPACT_DEF, "DSetCreatPropList::getAttrPhaseChange",__LINE__,__FILE__);
- verify_val(min_dense, MIN_DENSE_DEF, "DSetCreatPropList::getAttrPhaseChange",__LINE__,__FILE__);
+ // Retrieve limits for compact/dense attribute storage
+ dcpl.getAttrPhaseChange(max_compact, min_dense);
+ verify_val(max_compact, MAX_COMPACT_DEF, "DSetCreatPropList::getAttrPhaseChange",__LINE__,__FILE__);
+ verify_val(min_dense, MIN_DENSE_DEF, "DSetCreatPropList::getAttrPhaseChange",__LINE__,__FILE__);
- // Set new compact/dense attribute storage limits to some random numbers
- dcpl.setAttrPhaseChange(7, 5);
+ // Set new compact/dense attribute storage limits to some random numbers
+ dcpl.setAttrPhaseChange(7, 5);
- // Retrieve limits for compact/dense attribute storage and verify them
- dcpl.getAttrPhaseChange(max_compact, min_dense);
- verify_val(max_compact, static_cast<unsigned>(7), "DSetCreatPropList::getAttrPhaseChange",__LINE__,__FILE__);
- verify_val(min_dense, static_cast<unsigned>(5), "DSetCreatPropList::getAttrPhaseChange",__LINE__,__FILE__);
+ // Retrieve limits for compact/dense attribute storage and verify them
+ dcpl.getAttrPhaseChange(max_compact, min_dense);
+ verify_val(max_compact, static_cast<unsigned>(7), "DSetCreatPropList::getAttrPhaseChange",__LINE__,__FILE__);
+ verify_val(min_dense, static_cast<unsigned>(5), "DSetCreatPropList::getAttrPhaseChange",__LINE__,__FILE__);
- // Close property list
- dcpl.close();
+ // Close property list
+ dcpl.close();
- // H5O_is_attr_dense_test - un-usable
+ // H5O_is_attr_dense_test - un-usable
- // Add attributes, until just before converting to dense storage
- char attr_name[NAME_BUF_SIZE];
- unsigned attr_num;
- for (attr_num = 0; attr_num < max_compact; attr_num++)
- {
- // Create attribute
- sprintf(attr_name, "attr %02u", attr_num);
- Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
+ // Add attributes, until just before converting to dense storage
+ char attr_name[NAME_BUF_SIZE];
+ unsigned attr_num;
+ for (attr_num = 0; attr_num < max_compact; attr_num++)
+ {
+ // Create attribute
+ sprintf(attr_name, "attr %02u", attr_num);
+ Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
- // Write data to the attribute
- attr.write(PredType::NATIVE_UINT, &attr_num);
- } // end for
+ // Write data to the attribute
+ attr.write(PredType::NATIVE_UINT, &attr_num);
+ } // end for
- // H5O_is_attr_dense_test - un-usable
+ // H5O_is_attr_dense_test - un-usable
- { // Add one more attribute, to push into "dense" storage
+ { // Add one more attribute, to push into "dense" storage
- // Create another attribute
- sprintf(attr_name, "attr %02u", attr_num);
- Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
+ // Create another attribute
+ sprintf(attr_name, "attr %02u", attr_num);
+ Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
- // Write data to the attribute
- attr.write(PredType::NATIVE_UINT, &attr_num);
- }
+ // Write data to the attribute
+ attr.write(PredType::NATIVE_UINT, &attr_num);
+ }
- // Attempt to add attribute again, which should fail
- try
- {
- // Create another attribute
- sprintf(attr_name, "attr %02u", attr_num);
- Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
+ // Attempt to add attribute again, which should fail
+ try
+ {
+ // Create another attribute
+ sprintf(attr_name, "attr %02u", attr_num);
+ Attribute attr = dataset.createAttribute(attr_name, PredType::NATIVE_UINT, ds_space);
- // continuation here, that means no exception has been thrown
- throw InvalidActionException("DataSet::createAttribute", "Maximum number of attributes has been reached");
- }
- catch (AttributeIException& E) // catching invalid action
+ // continuation here, that means no exception has been thrown
+ throw InvalidActionException("DataSet::createAttribute", "Maximum number of attributes has been reached");
+ }
+ catch (AttributeIException& E) // catching invalid action
{} // do nothing, exception expected
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_dense_create()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_dense_create()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_dense_create()
@@ -1695,81 +1693,81 @@ static void test_attr_dense_create(FileCreatPropList& fcpl,
**
****************************************************************/
static void test_attr_corder_create_basic(FileCreatPropList& fcpl,
- FileAccPropList& fapl)
+ FileAccPropList& fapl)
{
// Output message about test being performed
SUBTEST("Basic Code for Attributes with Creation Order Info");
try {
- // Create file
- H5File fid1 (FILE_CRTPROPS, H5F_ACC_TRUNC, fcpl, fapl);
-
- // Create dataset creation property list.
- DSetCreatPropList dcpl;
-
- // Get creation order indexing on object
- unsigned crt_order_flags = 0;
- crt_order_flags = dcpl.getAttrCrtOrder();
- verify_val(crt_order_flags, (unsigned)0, "DSetCreatPropList::getAttrCrtOrder",__LINE__,__FILE__);
-
- // Setting invalid combination of a attribute order creation order
- // indexing on should fail
- try {
- dcpl.setAttrCrtOrder(H5P_CRT_ORDER_INDEXED);
-
- // continuation here, that means no exception has been thrown
- throw InvalidActionException("DSetCreatPropList::getAttrCrtOrder", "Indexing cannot be set alone, order tracking is required");
- }
- catch (PropListIException& E) // catching invalid action
+ // Create file
+ H5File fid1 (FILE_CRTPROPS, H5F_ACC_TRUNC, fcpl, fapl);
+
+ // Create dataset creation property list.
+ DSetCreatPropList dcpl;
+
+ // Get creation order indexing on object
+ unsigned crt_order_flags = 0;
+ crt_order_flags = dcpl.getAttrCrtOrder();
+ verify_val(crt_order_flags, (unsigned)0, "DSetCreatPropList::getAttrCrtOrder",__LINE__,__FILE__);
+
+ // Setting invalid combination of a attribute order creation order
+ // indexing on should fail
+ try {
+ dcpl.setAttrCrtOrder(H5P_CRT_ORDER_INDEXED);
+
+ // continuation here, that means no exception has been thrown
+ throw InvalidActionException("DSetCreatPropList::getAttrCrtOrder", "Indexing cannot be set alone, order tracking is required");
+ }
+ catch (PropListIException& E) // catching invalid action
{} // do nothing, exception expected
- // Set attribute creation order tracking & indexing for object then
- // verify them
- dcpl.setAttrCrtOrder(H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED);
- crt_order_flags = dcpl.getAttrCrtOrder();
- verify_val(crt_order_flags, (unsigned)(H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "DSetCreatPropList::getAttrCrtOrder",__LINE__,__FILE__);
+ // Set attribute creation order tracking & indexing for object then
+ // verify them
+ dcpl.setAttrCrtOrder(H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED);
+ crt_order_flags = dcpl.getAttrCrtOrder();
+ verify_val(crt_order_flags, (unsigned)(H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "DSetCreatPropList::getAttrCrtOrder",__LINE__,__FILE__);
- // Create dataspace for dataset
- DataSpace ds_space(H5S_SCALAR);
+ // Create dataspace for dataset
+ DataSpace ds_space(H5S_SCALAR);
- // Create a dataset
- DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR, ds_space, dcpl);
+ // Create a dataset
+ DataSet dataset = fid1.createDataSet(DSET1_NAME, PredType::NATIVE_UCHAR, ds_space, dcpl);
- // Close dataspace
- ds_space.close();
+ // Close dataspace
+ ds_space.close();
- // Check on dataset's attribute storage status.
- // NOTE: Wrappers not available yet (H5O_is_attr_empty_test
- // and H5O_is_attr_dense_test)
+ // Check on dataset's attribute storage status.
+ // NOTE: Wrappers not available yet (H5O_is_attr_empty_test
+ // and H5O_is_attr_dense_test)
- // Close dataset
- dataset.close();
+ // Close dataset
+ dataset.close();
- // Close property list
- dcpl.close();
+ // Close property list
+ dcpl.close();
- // Close file
- fid1.close();
+ // Close file
+ fid1.close();
- // Re-open file
- fid1.openFile(FILE_CRTPROPS, H5F_ACC_RDWR, fapl);
+ // Re-open file
+ fid1.openFile(FILE_CRTPROPS, H5F_ACC_RDWR, fapl);
- // Open dataset created previously
- dataset = fid1.openDataSet(DSET1_NAME);
+ // Open dataset created previously
+ dataset = fid1.openDataSet(DSET1_NAME);
- // Retrieve dataset creation property list for the dataset
- dcpl = dataset.getCreatePlist();
+ // Retrieve dataset creation property list for the dataset
+ dcpl = dataset.getCreatePlist();
- // Query the attribute creation properties
- crt_order_flags = dcpl.getAttrCrtOrder();
- verify_val(crt_order_flags, (unsigned)(H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "DSetCreatPropList::getAttrCrtOrder",__LINE__,__FILE__);
+ // Query the attribute creation properties
+ crt_order_flags = dcpl.getAttrCrtOrder();
+ verify_val(crt_order_flags, (unsigned)(H5P_CRT_ORDER_TRACKED | H5P_CRT_ORDER_INDEXED), "DSetCreatPropList::getAttrCrtOrder",__LINE__,__FILE__);
- PASSED();
+ PASSED();
} // end try block
catch (Exception& E)
{
- issue_fail_msg("test_attr_corder_create_basic()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr_corder_create_basic()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr_corder_create_basic()
@@ -1814,42 +1812,42 @@ void test_attr()
// Set the file access proplist for the type of format
if (new_format)
{
- MESSAGE(7, ("testing with new file format\n"));
- curr_fapl = fapl_new;
+ MESSAGE(7, ("testing with new file format\n"));
+ curr_fapl = fapl_new;
}
else
{
- MESSAGE(7, ("testing with old file format\n"));
- curr_fapl = fapl;
+ MESSAGE(7, ("testing with old file format\n"));
+ curr_fapl = fapl;
}
- test_attr_basic_write(); // Test basic H5A writing code
- test_attr_getname(); // Test overloads of Attribute::getName
- test_attr_rename(); // Test renaming attribute
- test_attr_basic_read(); // Test basic H5A reading code
+ test_attr_basic_write(); // Test basic H5A writing code
+ test_attr_getname(); // Test overloads of Attribute::getName
+ test_attr_rename(); // Test renaming attribute
+ test_attr_basic_read(); // Test basic H5A reading code
- test_attr_compound_write(); // Test complex datatype H5A writing code
- test_attr_compound_read(); // Test complex datatype H5A reading code
+ test_attr_compound_write(); // Test complex datatype H5A writing code
+ test_attr_compound_read(); // Test complex datatype H5A reading code
- test_attr_scalar_write(); // Test scalar dataspace H5A writing code
- test_attr_scalar_read(); // Test scalar dataspace H5A reading code
+ test_attr_scalar_write(); // Test scalar dataspace H5A writing code
+ test_attr_scalar_read(); // Test scalar dataspace H5A reading code
- test_attr_mult_write(); // Test writing multiple attributes
- test_attr_mult_read(); // Test reading multiple attributes
- test_attr_delete(); // Test deleting attributes
+ test_attr_mult_write(); // Test writing multiple attributes
+ test_attr_mult_read(); // Test reading multiple attributes
+ test_attr_delete(); // Test deleting attributes
- test_attr_dtype_shared(); // Test using shared datatypes in attributes
+ test_attr_dtype_shared(); // Test using shared datatypes in attributes
- test_string_attr(); // Test read/write string attribute
- test_attr_exists(); // Test H5Location::attrExists
+ test_string_attr(); // Test read/write string attribute
+ test_attr_exists(); // Test H5Location::attrExists
// Test with new format
if (new_format)
{
- // Test dense attribute storage creation
+ // Test dense attribute storage creation
test_attr_dense_create(fcpl, curr_fapl);
- // Test create objects with attribute creation info
+ // Test create objects with attribute creation info
test_attr_corder_create_basic(fcpl, curr_fapl);
}
} // end for
@@ -1857,18 +1855,18 @@ void test_attr()
catch (Exception& E)
{
- issue_fail_msg("test_attr()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_attr()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_attr()
/*-------------------------------------------------------------------------
- * Function: cleanup_attr
+ * Function cleanup_attr
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: Albert Cheng
+ * Programmer Albert Cheng
* July 2, 1998
*
* Modifications:
diff --git a/c++/test/tcompound.cpp b/c++/test/tcompound.cpp
index f49ebb2..3af78c5 100644
--- a/c++/test/tcompound.cpp
+++ b/c++/test/tcompound.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -34,7 +32,7 @@ using namespace H5;
#include "h5cpputil.h" // C++ utilility header file
/* Number of elements in each test */
-#define NTESTELEM 100000
+#define NTESTELEM 100000
typedef struct complex_t {
double re;
@@ -45,11 +43,11 @@ typedef struct complex_t {
/*-------------------------------------------------------------------------
* Function: test_compound_1
*
- * Purpose: Tests various things about compound data types.
+ * Purpose Tests various things about compound data types.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (using C version)
+ * Programmer Binh-Minh Ribler (using C version)
* January, 2007
*
* Modifications:
@@ -61,13 +59,13 @@ static void test_compound_1()
// Output message about test being performed
SUBTEST("Compound Data Types");
try {
- // Create an empty compound datatype
- CompType complex_type(sizeof(complex_t));
+ // Create an empty compound datatype
+ CompType complex_type(sizeof(complex_t));
- // Add a couple of fields
- complex_type.insertMember("real", HOFFSET(complex_t, re), PredType::NATIVE_DOUBLE);
- complex_type.insertMember("imaginary", HOFFSET(complex_t, im), PredType::NATIVE_DOUBLE);
- PASSED();
+ // Add a couple of fields
+ complex_type.insertMember("real", HOFFSET(complex_t, re), PredType::NATIVE_DOUBLE);
+ complex_type.insertMember("imaginary", HOFFSET(complex_t, im), PredType::NATIVE_DOUBLE);
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -78,15 +76,15 @@ static void test_compound_1()
/*-------------------------------------------------------------------------
- * Function: test_compound_2
+ * Function: test_compound_2
*
- * Purpose: Tests a compound type conversion where the source and
- * destination are the same except for the order of the
- * elements.
+ * Purpose Tests a compound type conversion where the source and
+ * destination are the same except for the order of the
+ * elements.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
+ * Programmer Binh-Minh Ribler (use C version)
* January, 2007
*
* Modifications:
@@ -96,100 +94,100 @@ static void test_compound_1()
static void test_compound_2()
{
typedef struct {
- int a, b, c[4], d, e;
+ int a, b, c[4], d, e;
} src_typ_t;
typedef struct {
- int e, d, c[4], b, a;
+ int e, d, c[4], b, a;
} dst_typ_t;
- src_typ_t *s_ptr;
- dst_typ_t *d_ptr;
- const int nelmts = NTESTELEM;
+ src_typ_t *s_ptr;
+ dst_typ_t *d_ptr;
+ const int nelmts = NTESTELEM;
const hsize_t four = 4;
- int i;
+ int i;
unsigned char *buf = NULL, *orig = NULL, *bkg = NULL;
ArrayType *array_dt = NULL;
// Output message about test being performed
SUBTEST("Compound Element Reordering");
try {
- // Sizes should be the same, but be careful just in case
- buf = (unsigned char*)HDmalloc(nelmts * MAX(sizeof(src_typ_t), sizeof(dst_typ_t)));
- bkg = (unsigned char*)HDmalloc(nelmts * sizeof(dst_typ_t));
- orig = (unsigned char*)HDmalloc(nelmts * sizeof(src_typ_t));
- for (i=0; i<nelmts; i++) {
- s_ptr = ((src_typ_t*)orig) + i;
- s_ptr->a = i*8+0;
- s_ptr->b = i*8+1;
- s_ptr->c[0] = i*8+2;
- s_ptr->c[1] = i*8+3;
- s_ptr->c[2] = i*8+4;
- s_ptr->c[3] = i*8+5;
- s_ptr->d = i*8+6;
- s_ptr->e = i*8+7;
- }
- memcpy(buf, orig, nelmts*sizeof(src_typ_t));
-
- // Build hdf5 datatypes
- array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
-
- // Create an empty compound datatype
- CompType st(sizeof(src_typ_t));
- st.insertMember("a", HOFFSET(src_typ_t, a), PredType::NATIVE_INT);
- st.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_INT);
- st.insertMember("c", HOFFSET(src_typ_t, c), *array_dt);
- st.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_INT);
- st.insertMember("e", HOFFSET(src_typ_t, e), PredType::NATIVE_INT);
- array_dt->close();
+ // Sizes should be the same, but be careful just in case
+ buf = (unsigned char*)HDmalloc(nelmts * MAX(sizeof(src_typ_t), sizeof(dst_typ_t)));
+ bkg = (unsigned char*)HDmalloc(nelmts * sizeof(dst_typ_t));
+ orig = (unsigned char*)HDmalloc(nelmts * sizeof(src_typ_t));
+ for (i=0; i<nelmts; i++) {
+ s_ptr = ((src_typ_t*)orig) + i;
+ s_ptr->a = i*8+0;
+ s_ptr->b = i*8+1;
+ s_ptr->c[0] = i*8+2;
+ s_ptr->c[1] = i*8+3;
+ s_ptr->c[2] = i*8+4;
+ s_ptr->c[3] = i*8+5;
+ s_ptr->d = i*8+6;
+ s_ptr->e = i*8+7;
+ }
+ memcpy(buf, orig, nelmts*sizeof(src_typ_t));
+
+ // Build hdf5 datatypes
+ array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
+
+ // Create an empty compound datatype
+ CompType st(sizeof(src_typ_t));
+ st.insertMember("a", HOFFSET(src_typ_t, a), PredType::NATIVE_INT);
+ st.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_INT);
+ st.insertMember("c", HOFFSET(src_typ_t, c), *array_dt);
+ st.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_INT);
+ st.insertMember("e", HOFFSET(src_typ_t, e), PredType::NATIVE_INT);
+ array_dt->close();
delete array_dt;
- array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
-
- // Create an empty compound datatype
- CompType dt(sizeof(dst_typ_t));
- dt.insertMember("a", HOFFSET(dst_typ_t, a), PredType::NATIVE_INT);
- dt.insertMember("b", HOFFSET(dst_typ_t, b), PredType::NATIVE_INT);
- dt.insertMember("c", HOFFSET(dst_typ_t, c), *array_dt);
- dt.insertMember("d", HOFFSET(dst_typ_t, d), PredType::NATIVE_INT);
- dt.insertMember("e", HOFFSET(dst_typ_t, e), PredType::NATIVE_INT);
- array_dt->close();
-
- // Perform the conversion
- st.convert(dt, (size_t)nelmts, buf, bkg);
-
- // Compare results
- for (i=0; i<nelmts; i++) {
- s_ptr = ((src_typ_t*)orig) + i;
- d_ptr = ((dst_typ_t*)buf) + i;
- if (s_ptr->a != d_ptr->a ||
- s_ptr->b != d_ptr->b ||
- s_ptr->c[0] != d_ptr->c[0] ||
- s_ptr->c[1] != d_ptr->c[1] ||
- s_ptr->c[2] != d_ptr->c[2] ||
- s_ptr->c[3] != d_ptr->c[3] ||
- s_ptr->d != d_ptr->d ||
- s_ptr->e != d_ptr->e) {
- H5_FAILED();
- cerr << " i=" << i << endl;
- cerr << " src={a=" << s_ptr->a << ", b=" << s_ptr->b
- << "c=[" << s_ptr->c[0] << "," << s_ptr->c[1] << ","
- << s_ptr->c[2] << "," << s_ptr->c[3] << ", d="
- << s_ptr->d << ", e=" << s_ptr->e << "}" << endl;
- cerr << " dst={a=" << s_ptr->a << ", b=" << s_ptr->b
- << "c=[" << s_ptr->c[0] << "," << s_ptr->c[1] << ","
- << s_ptr->c[2] << "," << s_ptr->c[3] << ", d="
- << s_ptr->d << ", e=" << s_ptr->e << "}" << endl;
- }
- }
- // Release resources
- HDfree(buf);
- HDfree(bkg);
- HDfree(orig);
- s_ptr = NULL;
- d_ptr = NULL;
- st.close();
- dt.close();
- PASSED();
+ array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
+
+ // Create an empty compound datatype
+ CompType dt(sizeof(dst_typ_t));
+ dt.insertMember("a", HOFFSET(dst_typ_t, a), PredType::NATIVE_INT);
+ dt.insertMember("b", HOFFSET(dst_typ_t, b), PredType::NATIVE_INT);
+ dt.insertMember("c", HOFFSET(dst_typ_t, c), *array_dt);
+ dt.insertMember("d", HOFFSET(dst_typ_t, d), PredType::NATIVE_INT);
+ dt.insertMember("e", HOFFSET(dst_typ_t, e), PredType::NATIVE_INT);
+ array_dt->close();
+
+ // Perform the conversion
+ st.convert(dt, (size_t)nelmts, buf, bkg);
+
+ // Compare results
+ for (i=0; i<nelmts; i++) {
+ s_ptr = ((src_typ_t*)orig) + i;
+ d_ptr = ((dst_typ_t*)buf) + i;
+ if (s_ptr->a != d_ptr->a ||
+ s_ptr->b != d_ptr->b ||
+ s_ptr->c[0] != d_ptr->c[0] ||
+ s_ptr->c[1] != d_ptr->c[1] ||
+ s_ptr->c[2] != d_ptr->c[2] ||
+ s_ptr->c[3] != d_ptr->c[3] ||
+ s_ptr->d != d_ptr->d ||
+ s_ptr->e != d_ptr->e) {
+ H5_FAILED();
+ cerr << " i=" << i << endl;
+ cerr << " src={a=" << s_ptr->a << ", b=" << s_ptr->b
+ << "c=[" << s_ptr->c[0] << "," << s_ptr->c[1] << ","
+ << s_ptr->c[2] << "," << s_ptr->c[3] << ", d="
+ << s_ptr->d << ", e=" << s_ptr->e << "}" << endl;
+ cerr << " dst={a=" << s_ptr->a << ", b=" << s_ptr->b
+ << "c=[" << s_ptr->c[0] << "," << s_ptr->c[1] << ","
+ << s_ptr->c[2] << "," << s_ptr->c[3] << ", d="
+ << s_ptr->d << ", e=" << s_ptr->e << "}" << endl;
+ }
+ }
+ // Release resources
+ HDfree(buf);
+ HDfree(bkg);
+ HDfree(orig);
+ s_ptr = NULL;
+ d_ptr = NULL;
+ st.close();
+ dt.close();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -203,15 +201,15 @@ static void test_compound_2()
/*-------------------------------------------------------------------------
- * Function: test_compound_3
+ * Function: test_compound_3
*
- * Purpose: Tests compound conversions where the source and destination
- * are the same except the destination is missing a couple
- * members which appear in the source.
+ * Purpose Tests compound conversions where the source and destination
+ * are the same except the destination is missing a couple
+ * members which appear in the source.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
+ * Programmer Binh-Minh Ribler (use C version)
* January, 2007
*
* Modifications:
@@ -221,16 +219,16 @@ static void test_compound_2()
static void test_compound_3()
{
typedef struct {
- int a, b, c[4], d, e;
+ int a, b, c[4], d, e;
} src_typ_t;
typedef struct {
- int a, c[4], e;
+ int a, c[4], e;
} dst_typ_t;
- src_typ_t *s_ptr;
- dst_typ_t *d_ptr;
- int i;
- const int nelmts = NTESTELEM;
+ src_typ_t *s_ptr;
+ dst_typ_t *d_ptr;
+ int i;
+ const int nelmts = NTESTELEM;
const hsize_t four = 4;
unsigned char *buf = NULL, *orig = NULL, *bkg = NULL;
ArrayType* array_dt = NULL;
@@ -238,85 +236,85 @@ static void test_compound_3()
// Output message about test being performed
SUBTEST("Compound Datatype Subset Conversions");
try {
- /* Initialize */
- buf = (unsigned char*)HDmalloc(nelmts * MAX(sizeof(src_typ_t), sizeof(dst_typ_t)));
- bkg = (unsigned char*)HDmalloc(nelmts * sizeof(dst_typ_t));
- orig = (unsigned char*)HDmalloc(nelmts * sizeof(src_typ_t));
- for (i=0; i<nelmts; i++) {
- s_ptr = ((src_typ_t*)orig) + i;
- s_ptr->a = i*8+0;
- s_ptr->b = i*8+1;
- s_ptr->c[0] = i*8+2;
- s_ptr->c[1] = i*8+3;
- s_ptr->c[2] = i*8+4;
- s_ptr->c[3] = i*8+5;
- s_ptr->d = i*8+6;
- s_ptr->e = i*8+7;
- }
- memcpy(buf, orig, nelmts*sizeof(src_typ_t));
-
- /* Build hdf5 datatypes */
- array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
-
- // Create an empty compound datatype
- CompType st(sizeof(src_typ_t));
- st.insertMember("a", HOFFSET(src_typ_t, a), PredType::NATIVE_INT);
- st.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_INT);
- st.insertMember("c", HOFFSET(src_typ_t, c), *array_dt);
- st.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_INT);
- st.insertMember("e", HOFFSET(src_typ_t, e), PredType::NATIVE_INT);
- array_dt->close();
+ /* Initialize */
+ buf = (unsigned char*)HDmalloc(nelmts * MAX(sizeof(src_typ_t), sizeof(dst_typ_t)));
+ bkg = (unsigned char*)HDmalloc(nelmts * sizeof(dst_typ_t));
+ orig = (unsigned char*)HDmalloc(nelmts * sizeof(src_typ_t));
+ for (i=0; i<nelmts; i++) {
+ s_ptr = ((src_typ_t*)orig) + i;
+ s_ptr->a = i*8+0;
+ s_ptr->b = i*8+1;
+ s_ptr->c[0] = i*8+2;
+ s_ptr->c[1] = i*8+3;
+ s_ptr->c[2] = i*8+4;
+ s_ptr->c[3] = i*8+5;
+ s_ptr->d = i*8+6;
+ s_ptr->e = i*8+7;
+ }
+ memcpy(buf, orig, nelmts*sizeof(src_typ_t));
+
+ /* Build hdf5 datatypes */
+ array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
+
+ // Create an empty compound datatype
+ CompType st(sizeof(src_typ_t));
+ st.insertMember("a", HOFFSET(src_typ_t, a), PredType::NATIVE_INT);
+ st.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_INT);
+ st.insertMember("c", HOFFSET(src_typ_t, c), *array_dt);
+ st.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_INT);
+ st.insertMember("e", HOFFSET(src_typ_t, e), PredType::NATIVE_INT);
+ array_dt->close();
delete array_dt;
- array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
-
- // Create an empty compound datatype
- CompType dt(sizeof(dst_typ_t));
- dt.insertMember("a", HOFFSET(dst_typ_t, a), PredType::NATIVE_INT);
- dt.insertMember("c", HOFFSET(dst_typ_t, c), *array_dt);
- dt.insertMember("e", HOFFSET(dst_typ_t, e), PredType::NATIVE_INT);
- array_dt->close();
-
- /* Perform the conversion */
- st.convert(dt, (size_t)nelmts, buf, bkg);
-
- /* Compare results */
- for (i=0; i<nelmts; i++) {
- s_ptr = ((src_typ_t*)orig) + i;
- d_ptr = ((dst_typ_t*)buf) + i;
- if (s_ptr->a != d_ptr->a ||
- s_ptr->c[0] != d_ptr->c[0] ||
- s_ptr->c[1] != d_ptr->c[1] ||
- s_ptr->c[2] != d_ptr->c[2] ||
- s_ptr->c[3] != d_ptr->c[3] ||
- s_ptr->e != d_ptr->e) {
- H5_FAILED();
- cerr << " i=" << i << endl;
- cerr << " src={a=" << s_ptr->a << ", b=" << s_ptr->b
- << ", c=[" << s_ptr->c[0] << "," << s_ptr->c[1] << ","
- << s_ptr->c[2] << "," << s_ptr->c[3] << "], d="
- << s_ptr->d << ", e=" << s_ptr->e << "}" << endl;
- cerr << " dst={a=" << d_ptr->a
- << ", c=[" << d_ptr->c[0] << "," << d_ptr->c[1] << ","
- << d_ptr->c[2] << "," << d_ptr->c[3] << "], e="
- << d_ptr->e << "}" << endl;
- } // if
- } // for
-
- /* Release resources */
- HDfree(buf);
- HDfree(bkg);
- HDfree(orig);
- s_ptr = NULL;
- d_ptr = NULL;
- st.close();
- dt.close();
- PASSED();
+ array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
+
+ // Create an empty compound datatype
+ CompType dt(sizeof(dst_typ_t));
+ dt.insertMember("a", HOFFSET(dst_typ_t, a), PredType::NATIVE_INT);
+ dt.insertMember("c", HOFFSET(dst_typ_t, c), *array_dt);
+ dt.insertMember("e", HOFFSET(dst_typ_t, e), PredType::NATIVE_INT);
+ array_dt->close();
+
+ /* Perform the conversion */
+ st.convert(dt, (size_t)nelmts, buf, bkg);
+
+ /* Compare results */
+ for (i=0; i<nelmts; i++) {
+ s_ptr = ((src_typ_t*)orig) + i;
+ d_ptr = ((dst_typ_t*)buf) + i;
+ if (s_ptr->a != d_ptr->a ||
+ s_ptr->c[0] != d_ptr->c[0] ||
+ s_ptr->c[1] != d_ptr->c[1] ||
+ s_ptr->c[2] != d_ptr->c[2] ||
+ s_ptr->c[3] != d_ptr->c[3] ||
+ s_ptr->e != d_ptr->e) {
+ H5_FAILED();
+ cerr << " i=" << i << endl;
+ cerr << " src={a=" << s_ptr->a << ", b=" << s_ptr->b
+ << ", c=[" << s_ptr->c[0] << "," << s_ptr->c[1] << ","
+ << s_ptr->c[2] << "," << s_ptr->c[3] << "], d="
+ << s_ptr->d << ", e=" << s_ptr->e << "}" << endl;
+ cerr << " dst={a=" << d_ptr->a
+ << ", c=[" << d_ptr->c[0] << "," << d_ptr->c[1] << ","
+ << d_ptr->c[2] << "," << d_ptr->c[3] << "], e="
+ << d_ptr->e << "}" << endl;
+ } // if
+ } // for
+
+ /* Release resources */
+ HDfree(buf);
+ HDfree(bkg);
+ HDfree(orig);
+ s_ptr = NULL;
+ d_ptr = NULL;
+ st.close();
+ dt.close();
+ PASSED();
} // end of try block
catch (Exception& E)
{
- issue_fail_msg(E.getCFuncName(), __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg(E.getCFuncName(), __LINE__, __FILE__, E.getCDetailMsg());
}
if(array_dt)
@@ -325,16 +323,16 @@ static void test_compound_3()
/*-------------------------------------------------------------------------
- * Function: test_compound_4
+ * Function: test_compound_4
*
- * Purpose: Tests compound conversions when the destination has the same
- * fields as the source but one or more of the fields are
- * smaller.
+ * Purpose Tests compound conversions when the destination has the same
+ * fields as the source but one or more of the fields are
+ * smaller.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
* Modifications:
*
@@ -344,20 +342,20 @@ static void test_compound_4()
{
typedef struct {
- int a, b, c[4], d, e;
+ int a, b, c[4], d, e;
} src_typ_t;
typedef struct {
- short b;
- int a, c[4];
- short d;
- int e;
+ short b;
+ int a, c[4];
+ short d;
+ int e;
} dst_typ_t;
- src_typ_t *s_ptr;
- dst_typ_t *d_ptr;
- int i;
- const int nelmts = NTESTELEM;
+ src_typ_t *s_ptr;
+ dst_typ_t *d_ptr;
+ int i;
+ const int nelmts = NTESTELEM;
const hsize_t four = 4;
unsigned char *buf = NULL, *orig = NULL, *bkg = NULL;
ArrayType* array_dt = NULL;
@@ -365,85 +363,85 @@ static void test_compound_4()
// Output message about test being performed
SUBTEST("Compound Element Shrinking & Reordering");
try {
- /* Sizes should be the same, but be careful just in case */
- buf = (unsigned char*)HDmalloc(nelmts * MAX(sizeof(src_typ_t), sizeof(dst_typ_t)));
- bkg = (unsigned char*)HDmalloc(nelmts * sizeof(dst_typ_t));
- orig = (unsigned char*)HDmalloc(nelmts * sizeof(src_typ_t));
- for (i=0; i<nelmts; i++) {
- s_ptr = ((src_typ_t*)orig) + i;
- s_ptr->a = i*8+0;
- s_ptr->b = (i*8+1) & 0x7fff;
- s_ptr->c[0] = i*8+2;
- s_ptr->c[1] = i*8+3;
- s_ptr->c[2] = i*8+4;
- s_ptr->c[3] = i*8+5;
- s_ptr->d = (i*8+6) & 0x7fff;
- s_ptr->e = i*8+7;
- }
- memcpy(buf, orig, nelmts*sizeof(src_typ_t));
-
- /* Build hdf5 datatypes */
- array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
-
- // Create an empty compound datatype
- CompType st(sizeof(src_typ_t));
- st.insertMember("a", HOFFSET(src_typ_t, a), PredType::NATIVE_INT);
- st.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_INT);
- st.insertMember("c", HOFFSET(src_typ_t, c), *array_dt);
- st.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_INT);
- st.insertMember("e", HOFFSET(src_typ_t, e), PredType::NATIVE_INT);
- array_dt->close();
+ /* Sizes should be the same, but be careful just in case */
+ buf = (unsigned char*)HDmalloc(nelmts * MAX(sizeof(src_typ_t), sizeof(dst_typ_t)));
+ bkg = (unsigned char*)HDmalloc(nelmts * sizeof(dst_typ_t));
+ orig = (unsigned char*)HDmalloc(nelmts * sizeof(src_typ_t));
+ for (i=0; i<nelmts; i++) {
+ s_ptr = ((src_typ_t*)orig) + i;
+ s_ptr->a = i*8+0;
+ s_ptr->b = (i*8+1) & 0x7fff;
+ s_ptr->c[0] = i*8+2;
+ s_ptr->c[1] = i*8+3;
+ s_ptr->c[2] = i*8+4;
+ s_ptr->c[3] = i*8+5;
+ s_ptr->d = (i*8+6) & 0x7fff;
+ s_ptr->e = i*8+7;
+ }
+ memcpy(buf, orig, nelmts*sizeof(src_typ_t));
+
+ /* Build hdf5 datatypes */
+ array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
+
+ // Create an empty compound datatype
+ CompType st(sizeof(src_typ_t));
+ st.insertMember("a", HOFFSET(src_typ_t, a), PredType::NATIVE_INT);
+ st.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_INT);
+ st.insertMember("c", HOFFSET(src_typ_t, c), *array_dt);
+ st.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_INT);
+ st.insertMember("e", HOFFSET(src_typ_t, e), PredType::NATIVE_INT);
+ array_dt->close();
delete array_dt;
- array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
-
- // Create an empty compound datatype
- CompType dt(sizeof(dst_typ_t));
- dt.insertMember("a", HOFFSET(dst_typ_t, a), PredType::NATIVE_INT);
- dt.insertMember("b", HOFFSET(dst_typ_t, b), PredType::NATIVE_SHORT);
- dt.insertMember("c", HOFFSET(dst_typ_t, c), *array_dt);
- dt.insertMember("d", HOFFSET(dst_typ_t, d), PredType::NATIVE_SHORT);
- dt.insertMember("e", HOFFSET(dst_typ_t, e), PredType::NATIVE_INT);
- array_dt->close();
-
- /* Perform the conversion */
- st.convert(dt, (size_t)nelmts, buf, bkg);
-
- /* Compare results */
- for (i=0; i<nelmts; i++) {
- s_ptr = ((src_typ_t*)orig) + i;
- d_ptr = ((dst_typ_t*)buf) + i;
- if (s_ptr->a != d_ptr->a ||
- s_ptr->b != d_ptr->b ||
- s_ptr->c[0] != d_ptr->c[0] ||
- s_ptr->c[1] != d_ptr->c[1] ||
- s_ptr->c[2] != d_ptr->c[2] ||
- s_ptr->c[3] != d_ptr->c[3] ||
- s_ptr->d != d_ptr->d ||
- s_ptr->e != d_ptr->e)
- {
- H5_FAILED();
- cerr << " i=" << i << endl;
- cerr << " src={a=" << s_ptr->a << ", b=" << s_ptr->b
- << "c=[" << s_ptr->c[0] << "," << s_ptr->c[1] << ","
- << s_ptr->c[2] << "," << s_ptr->c[3] << ", d="
- << s_ptr->d << ", e=" << s_ptr->e << "}" << endl;
- cerr << " dst={a=" << d_ptr->a << ", b=" << d_ptr->b
- << "c=[" << d_ptr->c[0] << "," << d_ptr->c[1] << ","
- << d_ptr->c[2] << "," << d_ptr->c[3] << ", d="
- << d_ptr->d << ", e=" << d_ptr->e << "}" << endl;
- } // if
- } // for
-
- /* Release resources */
- HDfree(buf);
- HDfree(bkg);
- HDfree(orig);
- s_ptr = NULL;
- d_ptr = NULL;
- st.close();
- dt.close();
- PASSED();
+ array_dt = new ArrayType(PredType::NATIVE_INT, 1, &four);
+
+ // Create an empty compound datatype
+ CompType dt(sizeof(dst_typ_t));
+ dt.insertMember("a", HOFFSET(dst_typ_t, a), PredType::NATIVE_INT);
+ dt.insertMember("b", HOFFSET(dst_typ_t, b), PredType::NATIVE_SHORT);
+ dt.insertMember("c", HOFFSET(dst_typ_t, c), *array_dt);
+ dt.insertMember("d", HOFFSET(dst_typ_t, d), PredType::NATIVE_SHORT);
+ dt.insertMember("e", HOFFSET(dst_typ_t, e), PredType::NATIVE_INT);
+ array_dt->close();
+
+ /* Perform the conversion */
+ st.convert(dt, (size_t)nelmts, buf, bkg);
+
+ /* Compare results */
+ for (i=0; i<nelmts; i++) {
+ s_ptr = ((src_typ_t*)orig) + i;
+ d_ptr = ((dst_typ_t*)buf) + i;
+ if (s_ptr->a != d_ptr->a ||
+ s_ptr->b != d_ptr->b ||
+ s_ptr->c[0] != d_ptr->c[0] ||
+ s_ptr->c[1] != d_ptr->c[1] ||
+ s_ptr->c[2] != d_ptr->c[2] ||
+ s_ptr->c[3] != d_ptr->c[3] ||
+ s_ptr->d != d_ptr->d ||
+ s_ptr->e != d_ptr->e)
+ {
+ H5_FAILED();
+ cerr << " i=" << i << endl;
+ cerr << " src={a=" << s_ptr->a << ", b=" << s_ptr->b
+ << "c=[" << s_ptr->c[0] << "," << s_ptr->c[1] << ","
+ << s_ptr->c[2] << "," << s_ptr->c[3] << ", d="
+ << s_ptr->d << ", e=" << s_ptr->e << "}" << endl;
+ cerr << " dst={a=" << d_ptr->a << ", b=" << d_ptr->b
+ << "c=[" << d_ptr->c[0] << "," << d_ptr->c[1] << ","
+ << d_ptr->c[2] << "," << d_ptr->c[3] << ", d="
+ << d_ptr->d << ", e=" << d_ptr->e << "}" << endl;
+ } // if
+ } // for
+
+ /* Release resources */
+ HDfree(buf);
+ HDfree(bkg);
+ HDfree(orig);
+ s_ptr = NULL;
+ d_ptr = NULL;
+ st.close();
+ dt.close();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -457,17 +455,17 @@ static void test_compound_4()
/*-------------------------------------------------------------------------
- * Function: test_compound_5
+ * Function: test_compound_5
*
- * Purpose: Many versions of HDF5 have a bug in the optimized compound
+ * Purpose Many versions of HDF5 have a bug in the optimized compound
* datatype conversion function, H5T_conv_struct_opt(), which
* is triggered when the top-level type contains a struct
* which must undergo a conversion.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
* Modifications:
*
@@ -499,55 +497,55 @@ static void test_compound_5()
SUBTEST("Optimized Struct Converter");
try {
- /* Build datatypes */
- array_dt = new ArrayType(PredType::NATIVE_SHORT, 1, dims);
- CompType short_array(4*sizeof(short));
- short_array.insertMember("_", 0, *array_dt);
- array_dt->close();
+ /* Build datatypes */
+ array_dt = new ArrayType(PredType::NATIVE_SHORT, 1, dims);
+ CompType short_array(4*sizeof(short));
+ short_array.insertMember("_", 0, *array_dt);
+ array_dt->close();
delete array_dt;
- CompType int_array(4*sizeof(int));
- array_dt = new ArrayType(PredType::NATIVE_INT, 1, dims);
- int_array.insertMember("_", 0, *array_dt);
- array_dt->close();
-
- StrType strg(PredType::C_S1, 16);
- CompType src_type(sizeof(src_typ_t));
- src_type.insertMember("name", HOFFSET(src_typ_t, name), strg);
- src_type.insertMember("tdim", HOFFSET(src_typ_t, tdim), PredType::NATIVE_SHORT);
- src_type.insertMember("coll_ids", HOFFSET(src_typ_t, coll_ids), short_array);
-
- CompType dst_type(sizeof(dst_typ_t));
- dst_type.insertMember("name", HOFFSET(dst_typ_t, name), strg);
- dst_type.insertMember("tdim", HOFFSET(dst_typ_t, tdim), PredType::NATIVE_SHORT);
- dst_type.insertMember("coll_ids", HOFFSET(dst_typ_t, coll_ids), int_array);
-
- /* Convert data */
- memcpy(buf, src, sizeof(src));
- src_type.convert(dst_type, (size_t)2, buf, bkg);
- dst = (dst_typ_t*)buf;
-
- /* Cleanup */
- src_type.close();
- dst_type.close();
- strg.close();
- short_array.close();
- int_array.close();
-
- /* Check results */
- if (memcmp(src[1].name, dst[1].name, sizeof(src[1].name)) ||
- src[1].tdim!=dst[1].tdim ||
- src[1].coll_ids[0]!=dst[1].coll_ids[0] ||
- src[1].coll_ids[1]!=dst[1].coll_ids[1] ||
- src[1].coll_ids[2]!=dst[1].coll_ids[2] ||
- src[1].coll_ids[3]!=dst[1].coll_ids[3])
- { H5_FAILED(); }
-
- /* Free memory buffers */
- HDfree(buf);
- HDfree(bkg);
- dst = NULL;
- PASSED();
+ CompType int_array(4*sizeof(int));
+ array_dt = new ArrayType(PredType::NATIVE_INT, 1, dims);
+ int_array.insertMember("_", 0, *array_dt);
+ array_dt->close();
+
+ StrType strg(PredType::C_S1, 16);
+ CompType src_type(sizeof(src_typ_t));
+ src_type.insertMember("name", HOFFSET(src_typ_t, name), strg);
+ src_type.insertMember("tdim", HOFFSET(src_typ_t, tdim), PredType::NATIVE_SHORT);
+ src_type.insertMember("coll_ids", HOFFSET(src_typ_t, coll_ids), short_array);
+
+ CompType dst_type(sizeof(dst_typ_t));
+ dst_type.insertMember("name", HOFFSET(dst_typ_t, name), strg);
+ dst_type.insertMember("tdim", HOFFSET(dst_typ_t, tdim), PredType::NATIVE_SHORT);
+ dst_type.insertMember("coll_ids", HOFFSET(dst_typ_t, coll_ids), int_array);
+
+ /* Convert data */
+ memcpy(buf, src, sizeof(src));
+ src_type.convert(dst_type, (size_t)2, buf, bkg);
+ dst = (dst_typ_t*)buf;
+
+ /* Cleanup */
+ src_type.close();
+ dst_type.close();
+ strg.close();
+ short_array.close();
+ int_array.close();
+
+ /* Check results */
+ if (memcmp(src[1].name, dst[1].name, sizeof(src[1].name)) ||
+ src[1].tdim!=dst[1].tdim ||
+ src[1].coll_ids[0]!=dst[1].coll_ids[0] ||
+ src[1].coll_ids[1]!=dst[1].coll_ids[1] ||
+ src[1].coll_ids[2]!=dst[1].coll_ids[2] ||
+ src[1].coll_ids[3]!=dst[1].coll_ids[3])
+ { H5_FAILED(); }
+
+ /* Free memory buffers */
+ HDfree(buf);
+ HDfree(bkg);
+ dst = NULL;
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -561,16 +559,16 @@ static void test_compound_5()
/*-------------------------------------------------------------------------
- * Function: test_compound_6
+ * Function: test_compound_6
*
- * Purpose: Tests compound conversions when the destination has the same
- * fields as the source but one or more of the fields are
- * larger.
+ * Purpose Tests compound conversions when the destination has the same
+ * fields as the source but one or more of the fields are
+ * larger.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
* Modifications:
*
@@ -579,72 +577,72 @@ static void test_compound_5()
static void test_compound_6()
{
typedef struct {
- short b;
- short d;
+ short b;
+ short d;
} src_typ_t;
typedef struct {
- long b;
- long d;
+ long b;
+ long d;
} dst_typ_t;
- src_typ_t *s_ptr;
- dst_typ_t *d_ptr;
- int i;
- const int nelmts = NTESTELEM;
+ src_typ_t *s_ptr;
+ dst_typ_t *d_ptr;
+ int i;
+ const int nelmts = NTESTELEM;
unsigned char *buf=NULL, *orig=NULL, *bkg=NULL;
// Output message about test being performed
SUBTEST("Compound Element Growing");
try {
- /* Sizes should be the same, but be careful just in case */
- buf = (unsigned char*)HDmalloc(nelmts * MAX(sizeof(src_typ_t), sizeof(dst_typ_t)));
- bkg = (unsigned char*)HDmalloc(nelmts * sizeof(dst_typ_t));
- orig = (unsigned char*)HDmalloc(nelmts * sizeof(src_typ_t));
- for (i=0; i<nelmts; i++) {
- s_ptr = ((src_typ_t*)orig) + i;
- s_ptr->b = (i*8+1) & 0x7fff;
- s_ptr->d = (i*8+6) & 0x7fff;
- }
- memcpy(buf, orig, nelmts*sizeof(src_typ_t));
-
- /* Build hdf5 datatypes */
- CompType st(sizeof(src_typ_t));
- st.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_SHORT);
- st.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_SHORT);
-
- CompType dt(sizeof(dst_typ_t));
- dt.insertMember("b", HOFFSET(dst_typ_t, b), PredType::NATIVE_LONG);
- dt.insertMember("d", HOFFSET(dst_typ_t, d), PredType::NATIVE_LONG);
-
- /* Perform the conversion */
- st.convert(dt, (size_t)nelmts, buf, bkg);
-
- /* Compare results */
- for (i=0; i<nelmts; i++) {
- s_ptr = ((src_typ_t*)orig) + i;
- d_ptr = ((dst_typ_t*)buf) + i;
- if (s_ptr->b != d_ptr->b ||
- s_ptr->d != d_ptr->d)
- {
- H5_FAILED();
- cerr << " i=" << i << endl;
- cerr << " src={b=" << s_ptr->b << ", d=" << s_ptr->d
- << "}" << endl;
- cerr << " dst={b=" << d_ptr->b << ", d=" << d_ptr->d
- << "}" << endl;
- } // if
- } // for
-
- /* Release resources */
- HDfree(buf);
- HDfree(bkg);
- HDfree(orig);
- s_ptr = NULL;
- d_ptr = NULL;
- st.close();
- dt.close();
- PASSED();
+ /* Sizes should be the same, but be careful just in case */
+ buf = (unsigned char*)HDmalloc(nelmts * MAX(sizeof(src_typ_t), sizeof(dst_typ_t)));
+ bkg = (unsigned char*)HDmalloc(nelmts * sizeof(dst_typ_t));
+ orig = (unsigned char*)HDmalloc(nelmts * sizeof(src_typ_t));
+ for (i=0; i<nelmts; i++) {
+ s_ptr = ((src_typ_t*)orig) + i;
+ s_ptr->b = (i*8+1) & 0x7fff;
+ s_ptr->d = (i*8+6) & 0x7fff;
+ }
+ memcpy(buf, orig, nelmts*sizeof(src_typ_t));
+
+ /* Build hdf5 datatypes */
+ CompType st(sizeof(src_typ_t));
+ st.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_SHORT);
+ st.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_SHORT);
+
+ CompType dt(sizeof(dst_typ_t));
+ dt.insertMember("b", HOFFSET(dst_typ_t, b), PredType::NATIVE_LONG);
+ dt.insertMember("d", HOFFSET(dst_typ_t, d), PredType::NATIVE_LONG);
+
+ /* Perform the conversion */
+ st.convert(dt, (size_t)nelmts, buf, bkg);
+
+ /* Compare results */
+ for (i=0; i<nelmts; i++) {
+ s_ptr = ((src_typ_t*)orig) + i;
+ d_ptr = ((dst_typ_t*)buf) + i;
+ if (s_ptr->b != d_ptr->b ||
+ s_ptr->d != d_ptr->d)
+ {
+ H5_FAILED();
+ cerr << " i=" << i << endl;
+ cerr << " src={b=" << s_ptr->b << ", d=" << s_ptr->d
+ << "}" << endl;
+ cerr << " dst={b=" << d_ptr->b << ", d=" << d_ptr->d
+ << "}" << endl;
+ } // if
+ } // for
+
+ /* Release resources */
+ HDfree(buf);
+ HDfree(bkg);
+ HDfree(orig);
+ s_ptr = NULL;
+ d_ptr = NULL;
+ st.close();
+ dt.close();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -654,15 +652,15 @@ static void test_compound_6()
} // test_compound_6()
/*-------------------------------------------------------------------------
- * Function: test_compound_7
+ * Function: test_compound_7
*
- * Purpose: Tests inserting fields into compound datatypes when the field
- * overlaps the end of the compound datatype.
+ * Purpose Tests inserting fields into compound datatypes when the field
+ * overlaps the end of the compound datatype.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
* Modifications:
*
@@ -671,47 +669,47 @@ static void test_compound_6()
static void test_compound_7()
{
typedef struct {
- int a;
- float b;
- long c;
+ int a;
+ float b;
+ long c;
} s1_typ_t;
typedef struct {
- int a;
- float b;
- long c;
- double d;
+ int a;
+ float b;
+ long c;
+ double d;
} s2_typ_t;
// Output message about test being performed
SUBTEST("Compound Element Insertion");
try {
- CompType tid1(sizeof(s1_typ_t));
+ CompType tid1(sizeof(s1_typ_t));
- tid1.insertMember("a", HOFFSET(s1_typ_t,a),PredType::NATIVE_INT);
- tid1.insertMember("b", HOFFSET(s1_typ_t,b),PredType::NATIVE_FLOAT);
- tid1.insertMember("c", HOFFSET(s1_typ_t,c),PredType::NATIVE_LONG);
+ tid1.insertMember("a", HOFFSET(s1_typ_t,a),PredType::NATIVE_INT);
+ tid1.insertMember("b", HOFFSET(s1_typ_t,b),PredType::NATIVE_FLOAT);
+ tid1.insertMember("c", HOFFSET(s1_typ_t,c),PredType::NATIVE_LONG);
- size_t type_size = tid1.getSize();
- verify_val(type_size, sizeof(s1_typ_t), "DataType::getSize", __LINE__, __FILE__);
+ size_t type_size = tid1.getSize();
+ verify_val(type_size, sizeof(s1_typ_t), "DataType::getSize", __LINE__, __FILE__);
- CompType tid2;
- tid2.copy(tid1);
+ CompType tid2;
+ tid2.copy(tid1);
- type_size = tid2.getSize();
- verify_val_noteq(type_size, sizeof(s2_typ_t), "DataType::getSize", __LINE__, __FILE__);
+ type_size = tid2.getSize();
+ verify_val_noteq(type_size, sizeof(s2_typ_t), "DataType::getSize", __LINE__, __FILE__);
- /* Should not be able to insert field past end of compound datatype */
- try {
- tid2.insertMember("d", HOFFSET(s2_typ_t, d), PredType::NATIVE_DOUBLE);
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("CompType::insertMember", "Attempted to insert field past end of compound data type.");
- } catch (DataTypeIException& err) {}
+ /* Should not be able to insert field past end of compound datatype */
+ try {
+ tid2.insertMember("d", HOFFSET(s2_typ_t, d), PredType::NATIVE_DOUBLE);
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("CompType::insertMember", "Attempted to insert field past end of compound data type.");
+ } catch (DataTypeIException& err) {}
- /* Release resources */
- tid1.close();
- tid2.close();
- PASSED();
+ /* Release resources */
+ tid1.close();
+ tid2.close();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -721,13 +719,13 @@ static void test_compound_7()
} // test_compound_7()
/*-------------------------------------------------------------------------
- * Function: test_compound_set_size
+ * Function: test_compound_set_size
*
- * Purpose: Tests member function setSize() on compound datatype
+ * Purpose Tests member function setSize() on compound datatype
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use partial C version test_ooo_order)
+ * Programmer Binh-Minh Ribler (use partial C version test_ooo_order)
* March, 2014
*
* Modifications:
@@ -738,7 +736,7 @@ const H5std_string COMPFILE("tcompound_types.h5");
static void test_compound_set_size()
{
typedef struct {
- int a, b, c[4], d, e;
+ int a, b, c[4], d, e;
} src_typ_t;
// Output message about test being performed
@@ -755,53 +753,53 @@ static void test_compound_set_size()
dtype.insertMember("c", HOFFSET(src_typ_t, c), PredType::NATIVE_LONG);
dtype.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_DOUBLE);
- // Verify that the compound is not packed
- // bool packed = dtype.packed(); // not until C library provides API
- // verify_val(packed, FALSE, "DataType::packed", __LINE__, __FILE__);
+ // Verify that the compound is not packed
+ // bool packed = dtype.packed(); // not until C library provides API
+ // verify_val(packed, FALSE, "DataType::packed", __LINE__, __FILE__);
- dtype.commit(file, "dtype");
+ dtype.commit(file, "dtype");
- // Close the type and file
- dtype.close();
- file.close();
+ // Close the type and file
+ dtype.close();
+ file.close();
- // Open the file for read/write
- file.openFile(COMPFILE, H5F_ACC_RDWR);
+ // Open the file for read/write
+ file.openFile(COMPFILE, H5F_ACC_RDWR);
- // Open the data type "dtype"
- CompType dtype_tmp = file.openCompType("dtype");
+ // Open the data type "dtype"
+ CompType dtype_tmp = file.openCompType("dtype");
- // Make a copy of the data type
- dtype.copy(dtype_tmp);
+ // Make a copy of the data type
+ dtype.copy(dtype_tmp);
- // Verify that the compound is not packed
- // packed = dtype_tmp.packed(); // not until C library provides API
- // verify_val(packed, FALSE, "DataType::packed", __LINE__, __FILE__);
+ // Verify that the compound is not packed
+ // packed = dtype_tmp.packed(); // not until C library provides API
+ // verify_val(packed, FALSE, "DataType::packed", __LINE__, __FILE__);
- // Expand the type, and verify that it became unpacked
- dtype.setSize((size_t)33);
- // packed = dtype.packed(); // not until C library provides API
- // verify_val(packed, FALSE, "DataType::packed", __LINE__, __FILE__);
+ // Expand the type, and verify that it became unpacked
+ dtype.setSize((size_t)33);
+ // packed = dtype.packed(); // not until C library provides API
+ // verify_val(packed, FALSE, "DataType::packed", __LINE__, __FILE__);
- // Verify setSize() actually set size
- size_t new_size = dtype.getSize();
- verify_val(new_size, (size_t)33, "DataType::getSize", __LINE__, __FILE__);
+ // Verify setSize() actually set size
+ size_t new_size = dtype.getSize();
+ verify_val(new_size, (size_t)33, "DataType::getSize", __LINE__, __FILE__);
- // Shrink the type, and verify that it became packed
- dtype.setSize((size_t)32);
- // packed = dtype.packed(); // not until C library provides API
- // verify_val(packed, TRUE, "DataType::packed", __LINE__, __FILE__);
+ // Shrink the type, and verify that it became packed
+ dtype.setSize((size_t)32);
+ // packed = dtype.packed(); // not until C library provides API
+ // verify_val(packed, TRUE, "DataType::packed", __LINE__, __FILE__);
- // Verify setSize() actually set size again
- new_size = dtype.getSize();
- verify_val(new_size, (size_t)32, "DataType::getSize", __LINE__, __FILE__);
+ // Verify setSize() actually set size again
+ new_size = dtype.getSize();
+ verify_val(new_size, (size_t)32, "DataType::getSize", __LINE__, __FILE__);
- /* Close types and file */
- dtype_tmp.close();
- dtype.close();
- file.close();
+ /* Close types and file */
+ dtype_tmp.close();
+ dtype.close();
+ file.close();
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -811,14 +809,14 @@ static void test_compound_set_size()
} // test_compound_set_size()
/*-------------------------------------------------------------------------
- * Function: test_compound
+ * Function: test_compound
*
- * Purpose: Main compound datatype testing routine
+ * Purpose Main compound datatype testing routine
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler
- * January 2007
+ * Programmer Binh-Minh Ribler
+ * January 2007
*
* Modifications:
*
@@ -830,23 +828,23 @@ void test_compound()
// Output message about test being performed
MESSAGE(5, ("Testing Compound Data Type operations\n"));
- test_compound_1(); // various things about compound data types
- test_compound_2(); // compound element reordering
- test_compound_3(); // compound datatype subset conversions
- test_compound_4(); // compound element shrinking & reordering
- test_compound_5(); // optimized struct converter
- test_compound_6(); // compound element growing
- test_compound_7(); // compound element insertion
- test_compound_set_size(); // set size on compound data types
+ test_compound_1(); // various things about compound data types
+ test_compound_2(); // compound element reordering
+ test_compound_3(); // compound datatype subset conversions
+ test_compound_4(); // compound element shrinking & reordering
+ test_compound_5(); // optimized struct converter
+ test_compound_6(); // compound element growing
+ test_compound_7(); // compound element insertion
+ test_compound_set_size(); // set size on compound data types
} // test_compound()
/*-------------------------------------------------------------------------
- * Function: cleanup_compound
+ * Function: cleanup_compound
*
- * Purpose: Cleanup temporary test files - nothing at this time.
+ * Purpose Cleanup temporary test files - nothing at this time.
*
- * Return: none
+ * Return none
*
* Modifications:
*
diff --git a/c++/test/tdspl.cpp b/c++/test/tdspl.cpp
index d733ffe..4aaa93a 100644
--- a/c++/test/tdspl.cpp
+++ b/c++/test/tdspl.cpp
@@ -5,18 +5,16 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
FILE
tdspl.cpp - HDF5 C++ testing the dataset memory and transfer property
- list functionality
+ list functionality
***************************************************************************/
#ifdef OLD_HEADER_FILENAME
@@ -45,70 +43,70 @@ static void test_transfplist()
SUBTEST("DSetMemXferPropList::set/getDataTransform()");
try {
- // Create various data set prop lists and set data transform expression.
- DSetMemXferPropList dxpl_c_to_f(c_to_f);
-
- DSetMemXferPropList dxpl_simple;
- dxpl_simple.setDataTransform(simple);
-
- DSetMemXferPropList dxpl_utrans_inv;
- dxpl_utrans_inv.setDataTransform(utrans_inv);
-
- //
- // Make a copy of one of those prop lists then read the data transform
- // expression and verify that it's the same as the original.
- //
-
- // Copy the prop list.
- DSetMemXferPropList dxpl_c_to_f_copy;
- dxpl_c_to_f_copy.copy(dxpl_c_to_f);
-
- // Find out the length of the transform expression, allocate the buffer
- // for it, then read and verify the expression from the copied plist
- ssize_t tran_len = dxpl_c_to_f_copy.getDataTransform(NULL);
- char *c_to_f_read = (char *)HDmalloc(tran_len+1);
- HDmemset(c_to_f_read, 0, tran_len+1);
- dxpl_c_to_f_copy.getDataTransform(c_to_f_read, tran_len+1);
- verify_val((const char*)c_to_f_read, (const char*)c_to_f,
- "DSetMemXferPropList::getDataTransform", __LINE__, __FILE__);
- HDfree(c_to_f_read);
-
- //
- // Read the expression of each of the prop lists and verify the read
- // expression
- //
-
- // Get and verify the expression with:
- // ssize_t getDataTransform(char* exp, const size_t buf_size [default=0])
- tran_len = dxpl_c_to_f.getDataTransform(NULL);
- c_to_f_read = (char *)HDmalloc(tran_len+1);
- HDmemset(c_to_f_read, 0, tran_len+1);
- dxpl_c_to_f.getDataTransform(c_to_f_read, tran_len+1);
- verify_val((const char*)c_to_f_read, (const char*)c_to_f,
- "DSetMemXferPropList::getDataTransform", __LINE__, __FILE__);
- HDfree(c_to_f_read);
-
- // Get and verify the expression with:
- // H5std_string DSetMemXferPropList::getDataTransform()
- H5std_string simple_read = dxpl_simple.getDataTransform();
- verify_val((const char*)simple_read.c_str(), (const char*)simple,
- "DSetMemXferPropList::getDataTransform", __LINE__, __FILE__);
-
- // Get and verify the expression with:
- // ssize_t getDataTransform(char* exp, const size_t buf_size)
- tran_len = dxpl_utrans_inv.getDataTransform(NULL, 0);
- char *utrans_inv_read = (char *)HDmalloc(tran_len+1);
- HDmemset(utrans_inv_read, 0, tran_len+1);
- dxpl_utrans_inv.getDataTransform(utrans_inv_read, tran_len+1);
- verify_val((const char*)utrans_inv_read, (const char*)utrans_inv,
- "DSetMemXferPropList::getDataTransform", __LINE__, __FILE__);
- HDfree(utrans_inv_read);
-
- PASSED();
+ // Create various data set prop lists and set data transform expression.
+ DSetMemXferPropList dxpl_c_to_f(c_to_f);
+
+ DSetMemXferPropList dxpl_simple;
+ dxpl_simple.setDataTransform(simple);
+
+ DSetMemXferPropList dxpl_utrans_inv;
+ dxpl_utrans_inv.setDataTransform(utrans_inv);
+
+ //
+ // Make a copy of one of those prop lists then read the data transform
+ // expression and verify that it's the same as the original.
+ //
+
+ // Copy the prop list.
+ DSetMemXferPropList dxpl_c_to_f_copy;
+ dxpl_c_to_f_copy.copy(dxpl_c_to_f);
+
+ // Find out the length of the transform expression, allocate the buffer
+ // for it, then read and verify the expression from the copied plist
+ ssize_t tran_len = dxpl_c_to_f_copy.getDataTransform(NULL);
+ char *c_to_f_read = (char *)HDmalloc(tran_len+1);
+ HDmemset(c_to_f_read, 0, tran_len+1);
+ dxpl_c_to_f_copy.getDataTransform(c_to_f_read, tran_len+1);
+ verify_val((const char*)c_to_f_read, (const char*)c_to_f,
+ "DSetMemXferPropList::getDataTransform", __LINE__, __FILE__);
+ HDfree(c_to_f_read);
+
+ //
+ // Read the expression of each of the prop lists and verify the read
+ // expression
+ //
+
+ // Get and verify the expression with:
+ // ssize_t getDataTransform(char* exp, const size_t buf_size [default=0])
+ tran_len = dxpl_c_to_f.getDataTransform(NULL);
+ c_to_f_read = (char *)HDmalloc(tran_len+1);
+ HDmemset(c_to_f_read, 0, tran_len+1);
+ dxpl_c_to_f.getDataTransform(c_to_f_read, tran_len+1);
+ verify_val((const char*)c_to_f_read, (const char*)c_to_f,
+ "DSetMemXferPropList::getDataTransform", __LINE__, __FILE__);
+ HDfree(c_to_f_read);
+
+ // Get and verify the expression with:
+ // H5std_string DSetMemXferPropList::getDataTransform()
+ H5std_string simple_read = dxpl_simple.getDataTransform();
+ verify_val((const char*)simple_read.c_str(), (const char*)simple,
+ "DSetMemXferPropList::getDataTransform", __LINE__, __FILE__);
+
+ // Get and verify the expression with:
+ // ssize_t getDataTransform(char* exp, const size_t buf_size)
+ tran_len = dxpl_utrans_inv.getDataTransform(NULL, 0);
+ char *utrans_inv_read = (char *)HDmalloc(tran_len+1);
+ HDmemset(utrans_inv_read, 0, tran_len+1);
+ dxpl_utrans_inv.getDataTransform(utrans_inv_read, tran_len+1);
+ verify_val((const char*)utrans_inv_read, (const char*)utrans_inv,
+ "DSetMemXferPropList::getDataTransform", __LINE__, __FILE__);
+ HDfree(utrans_inv_read);
+
+ PASSED();
}
catch (Exception& E)
{
- issue_fail_msg("test_transfplist", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_transfplist", __LINE__, __FILE__, E.getCDetailMsg());
}
}
diff --git a/c++/test/testhdf5.cpp b/c++/test/testhdf5.cpp
index 28ede6b..a2a0867 100644
--- a/c++/test/testhdf5.cpp
+++ b/c++/test/testhdf5.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -28,16 +26,16 @@
base functionality testing.
EXTERNAL ROUTINES/VARIABLES:
- TestInit(...) -- Initialize testing framework
- TestInfo(...) -- Print test info
- AddTest(...) -- Setup a test function and add it to the list of tests
- TestParseCmdLine(...) -- Parse command line arguments
- PerformTests() -- Perform requested testing
- GetTestSummary() -- Retrieve Summary request value
- TestSummary() -- Display test summary
- GetTestCleanup() -- Retrieve Cleanup request value
- TestCleanup() -- Clean up files from testing
- GetTestNumErrs() -- Retrieve the number of testing errors
+ TestInit(...) -- Initialize testing framework
+ TestInfo(...) -- Print test info
+ AddTest(...) -- Setup a test function and add it to the list of tests
+ TestParseCmdLine(...) -- Parse command line arguments
+ PerformTests() -- Perform requested testing
+ GetTestSummary() -- Retrieve Summary request value
+ TestSummary() -- Display test summary
+ GetTestCleanup() -- Retrieve Cleanup request value
+ TestCleanup() -- Clean up files from testing
+ GetTestNumErrs() -- Retrieve the number of testing errors
***************************************************************************/
#ifdef OLD_HEADER_FILENAME
@@ -60,47 +58,47 @@ main(int argc, char *argv[])
{
try
{
- // Turn of the auto-printing when failure occurs so that we can
- // handle the errors appropriately since sometime failures are
- // caused deliberately and expected.
- Exception::dontPrint();
- /* Initialize testing framework */
- TestInit(argv[0], NULL, NULL);
-
- // testing file creation and opening in tfile.cpp
- AddTest("tfile", test_file, cleanup_file, "File I/O Operations", NULL);
- // testing dataset functionalities in dset.cpp
- AddTest("dsets", test_dset, cleanup_dsets, "Dataset I/O Operations", NULL);
- // testing dataspace functionalities in th5s.cpp
- AddTest("th5s", test_h5s, cleanup_h5s, "Dataspaces", NULL);
- // testing attribute functionalities in tattr.cpp
- AddTest("tattr", test_attr, cleanup_attr, "Attributes", NULL);
- // testing object functionalities in tobject.cpp
- AddTest("tobject", test_object, cleanup_object, "Objects", NULL);
- // testing reference functionalities in trefer.cpp
- AddTest("trefer", test_reference, cleanup_reference, "References", NULL);
- // testing variable-length strings in tvlstr.cpp
- AddTest("tvlstr", test_vlstrings, cleanup_vlstrings, "Variable-Length Strings", NULL);
- AddTest("ttypes", test_types, cleanup_types, "Generic Data Types", NULL);
- AddTest("tarray", test_array, cleanup_array, "Array Datatypes", NULL);
- AddTest("tcompound", test_compound, cleanup_compound, "Compound Data Types", NULL);
- AddTest("tdspl", test_dsproplist, cleanup_dsproplist, "Dataset Property List", NULL);
- AddTest("tfilter", test_filters, cleanup_filters, "Various Filters", NULL);
- AddTest("tlinks", test_links, cleanup_links, "Various Links", NULL);
+ // Turn of the auto-printing when failure occurs so that we can
+ // handle the errors appropriately since sometime failures are
+ // caused deliberately and expected.
+ Exception::dontPrint();
+ /* Initialize testing framework */
+ TestInit(argv[0], NULL, NULL);
+
+ // testing file creation and opening in tfile.cpp
+ AddTest("tfile", test_file, cleanup_file, "File I/O Operations", NULL);
+ // testing dataset functionalities in dset.cpp
+ AddTest("dsets", test_dset, cleanup_dsets, "Dataset I/O Operations", NULL);
+ // testing dataspace functionalities in th5s.cpp
+ AddTest("th5s", test_h5s, cleanup_h5s, "Dataspaces", NULL);
+ // testing attribute functionalities in tattr.cpp
+ AddTest("tattr", test_attr, cleanup_attr, "Attributes", NULL);
+ // testing object functionalities in tobject.cpp
+ AddTest("tobject", test_object, cleanup_object, "Objects", NULL);
+ // testing reference functionalities in trefer.cpp
+ AddTest("trefer", test_reference, cleanup_reference, "References", NULL);
+ // testing variable-length strings in tvlstr.cpp
+ AddTest("tvlstr", test_vlstrings, cleanup_vlstrings, "Variable-Length Strings", NULL);
+ AddTest("ttypes", test_types, cleanup_types, "Generic Data Types", NULL);
+ AddTest("tarray", test_array, cleanup_array, "Array Datatypes", NULL);
+ AddTest("tcompound", test_compound, cleanup_compound, "Compound Data Types", NULL);
+ AddTest("tdspl", test_dsproplist, cleanup_dsproplist, "Dataset Property List", NULL);
+ AddTest("tfilter", test_filters, cleanup_filters, "Various Filters", NULL);
+ AddTest("tlinks", test_links, cleanup_links, "Various Links", NULL);
/* Comment out tests that are not done yet. - BMR, Feb 2001
- AddTest("select", test_select, cleanup_select, "Selections", NULL);
- AddTest("time", test_time, cleanup_time, "Time Datatypes", NULL);
- AddTest("vltypes", test_vltypes, cleanup_vltypes, "Variable-Length Datatypes", NULL);
+ AddTest("select", test_select, cleanup_select, "Selections", NULL);
+ AddTest("time", test_time, cleanup_time, "Time Datatypes", NULL);
+ AddTest("vltypes", test_vltypes, cleanup_vltypes, "Variable-Length Datatypes", NULL);
*/
- AddTest("iterate", test_iterate, cleanup_iterate, "Group & Attribute Iteration", NULL);
+ AddTest("iterate", test_iterate, cleanup_iterate, "Group & Attribute Iteration", NULL);
/*
- AddTest("genprop", test_genprop, cleanup_genprop, "Generic Properties", NULL);
- AddTest("id", test_ids, NULL, "User-Created Identifiers", NULL);
+ AddTest("genprop", test_genprop, cleanup_genprop, "Generic Properties", NULL);
+ AddTest("id", test_ids, NULL, "User-Created Identifiers", NULL);
Comment out tests that are not done yet */
/* Tentative - BMR 2007/1/12
- AddTest("enum", test_enum, cleanup_enum, "Enum Data Types", NULL);
+ AddTest("enum", test_enum, cleanup_enum, "Enum Data Types", NULL);
*/
}
catch (Exception& E)
diff --git a/c++/test/tfile.cpp b/c++/test/tfile.cpp
index 47b9a85..51682a3 100644
--- a/c++/test/tfile.cpp
+++ b/c++/test/tfile.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -49,6 +47,7 @@ const size_t F2_OFFSET_SIZE = 8;
const size_t F2_LENGTH_SIZE = 8;
const unsigned F2_SYM_LEAF_K = 8;
const unsigned F2_SYM_INTERN_K = 32;
+const unsigned F2_ISTORE = 64;
const H5std_string FILE2("tfile2.h5");
const hsize_t F3_USERBLOCK_SIZE = (hsize_t)0;
@@ -65,20 +64,20 @@ const H5std_string FILE4("tfile4.h5");
/*-------------------------------------------------------------------------
* Function: test_file_create
*
- * Purpose: Test file and template creations
+ * Purpose Test file and template creations
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
+ * Programmer Binh-Minh Ribler (use C version)
* January, 2001
*
* Modifications:
- * January, 2005: C tests' macro VERIFY casts values to 'long' for all
- * cases. Since there are no operator<< for 'long long'
- * or int64 in VS C++ ostream, I casted the hsize_t values
- * passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
- * with a special routine.
+ * January, 2005: C tests' macro VERIFY casts values to 'long' for all
+ * cases. Since there are no operator<< for 'long long'
+ * or int64 in VS C++ ostream, I casted the hsize_t values
+ * passed to verify_val to 'long' as well. If problems
+ * arises later, this will have to be specificly handled
+ * with a special routine.
*
*-------------------------------------------------------------------------
*/
@@ -97,81 +96,81 @@ static void test_file_create()
// Setting this to NULL for cleaning up in failure situations
H5File* file1 = NULL;
try {
- // Create file FILE1
- file1 = new H5File (FILE1, H5F_ACC_EXCL);
-
- // Try to create the same file with H5F_ACC_TRUNC. This should fail
- // because file1 is the same file and is currently open.
- try {
- H5File file2 (FILE1, H5F_ACC_TRUNC); // should throw E
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("H5File constructor", "Attempted to create an existing file.");
- }
- catch (FileIException& E) // catch truncating existing file
- {} // do nothing, FAIL expected
-
- // Close file1
- delete file1;
- file1 = NULL;
-
- // Try again with H5F_ACC_EXCL. This should fail because the file
- // already exists from the previous steps.
- try {
- H5File file2(FILE1, H5F_ACC_EXCL); // should throw E
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("H5File constructor", "File already exists.");
- }
- catch (FileIException& E) // catching creating existing file
- {} // do nothing, FAIL expected
-
- // Test create with H5F_ACC_TRUNC. This will truncate the existing file.
- file1 = new H5File (FILE1, H5F_ACC_TRUNC);
-
- // Try to create first file again. This should fail because file1
- // is the same file and is currently open.
- try {
- H5File file2 (FILE1, H5F_ACC_TRUNC); // should throw E
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("H5File constructor", "H5F_ACC_TRUNC attempt on an opened file.");
- }
- catch (FileIException& E) // catching truncating opened file
- {} // do nothing, FAIL expected
-
- // Try with H5F_ACC_EXCL. This should fail too because the file already
- // exists.
- try {
- H5File file3 (FILE1, H5F_ACC_EXCL); // should throw E
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("H5File constructor", "H5F_ACC_EXCL attempt on an existing file.");
- }
- catch (FileIException& E) // catching H5F_ACC_EXCL on existing file
- {} // do nothing, FAIL expected
-
- // Get the file-creation template
- FileCreatPropList tmpl1 = file1->getCreatePlist();
-
- hsize_t ublock = tmpl1.getUserblock();
- verify_val((long)ublock, (long)F1_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
-
- size_t parm1, parm2; // file-creation parameters
- tmpl1.getSizes( parm1, parm2);
- verify_val(parm1, F1_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
- verify_val(parm2, F1_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ // Create file FILE1
+ file1 = new H5File (FILE1, H5F_ACC_EXCL);
+
+ // Try to create the same file with H5F_ACC_TRUNC. This should fail
+ // because file1 is the same file and is currently open.
+ try {
+ H5File file2 (FILE1, H5F_ACC_TRUNC); // should throw E
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("H5File constructor", "Attempted to create an existing file.");
+ }
+ catch (FileIException& E) // catch truncating existing file
+ {} // do nothing, FAIL expected
+
+ // Close file1
+ delete file1;
+ file1 = NULL;
+
+ // Try again with H5F_ACC_EXCL. This should fail because the file
+ // already exists from the previous steps.
+ try {
+ H5File file2(FILE1, H5F_ACC_EXCL); // should throw E
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("H5File constructor", "File already exists.");
+ }
+ catch (FileIException& E) // catching creating existing file
+ {} // do nothing, FAIL expected
+
+ // Test create with H5F_ACC_TRUNC. This will truncate the existing file.
+ file1 = new H5File (FILE1, H5F_ACC_TRUNC);
+
+ // Try to create first file again. This should fail because file1
+ // is the same file and is currently open.
+ try {
+ H5File file2 (FILE1, H5F_ACC_TRUNC); // should throw E
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("H5File constructor", "H5F_ACC_TRUNC attempt on an opened file.");
+ }
+ catch (FileIException& E) // catching truncating opened file
+ {} // do nothing, FAIL expected
+
+ // Try with H5F_ACC_EXCL. This should fail too because the file already
+ // exists.
+ try {
+ H5File file3 (FILE1, H5F_ACC_EXCL); // should throw E
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("H5File constructor", "H5F_ACC_EXCL attempt on an existing file.");
+ }
+ catch (FileIException& E) // catching H5F_ACC_EXCL on existing file
+ {} // do nothing, FAIL expected
+
+ // Get the file-creation template
+ FileCreatPropList tmpl1 = file1->getCreatePlist();
+
+ hsize_t ublock = tmpl1.getUserblock();
+ verify_val((long)ublock, (long)F1_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
+
+ size_t parm1, parm2; // file-creation parameters
+ tmpl1.getSizes( parm1, parm2);
+ verify_val(parm1, F1_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ verify_val(parm2, F1_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
unsigned iparm1,iparm2; // file-creation parameters
tmpl1.getSymk( iparm1, iparm2);
verify_val(iparm1, F1_SYM_INTERN_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
verify_val(iparm2, F1_SYM_LEAF_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
- // tmpl1 is automatically closed; if error occurs, it'll be
- // caught in the catch block
+ // tmpl1 is automatically closed; if error occurs, it'll be
+ // caught in the catch block
- // Close first file
- delete file1;
+ // Close first file
+ delete file1;
}
catch (InvalidActionException& E)
{
@@ -183,7 +182,7 @@ static void test_file_create()
// catch all other exceptions
catch (Exception& E)
{
- issue_fail_msg("test_file_create()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_file_create()", __LINE__, __FILE__, E.getCDetailMsg());
if (file1 != NULL) // clean up
delete file1;
}
@@ -192,79 +191,79 @@ static void test_file_create()
FileCreatPropList* tmpl1 = NULL;
try
{
- // Create a new file with a non-standard file-creation template
- tmpl1 = new FileCreatPropList;
+ // Create a new file with a non-standard file-creation template
+ tmpl1 = new FileCreatPropList;
- // Set the new file-creation parameters
- tmpl1->setUserblock (F2_USERBLOCK_SIZE);
- tmpl1->setSizes( F2_OFFSET_SIZE, F2_LENGTH_SIZE );
- tmpl1->setSymk( F2_SYM_INTERN_K, F2_SYM_LEAF_K );
+ // Set the new file-creation parameters
+ tmpl1->setUserblock (F2_USERBLOCK_SIZE);
+ tmpl1->setSizes( F2_OFFSET_SIZE, F2_LENGTH_SIZE );
+ tmpl1->setSymk( F2_SYM_INTERN_K, F2_SYM_LEAF_K );
- // Try to create second file, with non-standard file-creation template
- // params.
- H5File file2( FILE2, H5F_ACC_TRUNC, *tmpl1 );
+ // Try to create second file, with non-standard file-creation template
+ // params.
+ H5File file2( FILE2, H5F_ACC_TRUNC, *tmpl1 );
- // Release file-creation template
- delete tmpl1;
- tmpl1 = NULL;
+ // Release file-creation template
+ delete tmpl1;
+ tmpl1 = NULL;
- // Get the file-creation template
- tmpl1 = new FileCreatPropList (file2.getCreatePlist());
+ // Get the file-creation template
+ tmpl1 = new FileCreatPropList (file2.getCreatePlist());
- // Get the file-creation parameters
- hsize_t ublock = tmpl1->getUserblock();
- verify_val((long)ublock, (long)F2_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
+ // Get the file-creation parameters
+ hsize_t ublock = tmpl1->getUserblock();
+ verify_val((long)ublock, (long)F2_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
- size_t parm1, parm2; // file-creation parameters
- tmpl1->getSizes( parm1, parm2);
- verify_val(parm1, F2_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
- verify_val(parm2, F2_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ size_t parm1, parm2; // file-creation parameters
+ tmpl1->getSizes( parm1, parm2);
+ verify_val(parm1, F2_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ verify_val(parm2, F2_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
- unsigned iparm1,iparm2; // file-creation parameters
+ unsigned iparm1,iparm2; // file-creation parameters
tmpl1->getSymk( iparm1, iparm2);
verify_val(iparm1, F2_SYM_INTERN_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
verify_val(iparm2, F2_SYM_LEAF_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
- // Clone the file-creation template
- FileCreatPropList tmpl2;
- tmpl2.copy (*tmpl1);
+ // Clone the file-creation template
+ FileCreatPropList tmpl2;
+ tmpl2.copy (*tmpl1);
- // Release file-creation template
- delete tmpl1;
- tmpl1 = NULL;
+ // Release file-creation template
+ delete tmpl1;
+ tmpl1 = NULL;
- // Set the new file-creation parameter
- tmpl2.setUserblock( F3_USERBLOCK_SIZE );
+ // Set the new file-creation parameter
+ tmpl2.setUserblock( F3_USERBLOCK_SIZE );
- // Try to create second file, with non-standard file-creation template
- // params
- H5File file3( FILE3, H5F_ACC_TRUNC, tmpl2 );
+ // Try to create second file, with non-standard file-creation template
+ // params
+ H5File file3( FILE3, H5F_ACC_TRUNC, tmpl2 );
- // Get the file-creation template
- tmpl1 = new FileCreatPropList (file3.getCreatePlist());
+ // Get the file-creation template
+ tmpl1 = new FileCreatPropList (file3.getCreatePlist());
- // Get the file-creation parameters
- ublock = tmpl1->getUserblock();
- verify_val((long)ublock, (long)F3_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
+ // Get the file-creation parameters
+ ublock = tmpl1->getUserblock();
+ verify_val((long)ublock, (long)F3_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
- tmpl1->getSizes( parm1, parm2);
- verify_val(parm1, F3_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
- verify_val(parm2, F3_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ tmpl1->getSizes( parm1, parm2);
+ verify_val(parm1, F3_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ verify_val(parm2, F3_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
- tmpl1->getSymk( iparm1, iparm2);
- verify_val(iparm1, F3_SYM_INTERN_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
- verify_val(iparm2, F3_SYM_LEAF_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
+ tmpl1->getSymk( iparm1, iparm2);
+ verify_val(iparm1, F3_SYM_INTERN_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
+ verify_val(iparm2, F3_SYM_LEAF_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
- // Release file-creation template
- delete tmpl1;
- PASSED();
+ // Release file-creation template
+ delete tmpl1;
+ PASSED();
}
// catch all exceptions
catch (Exception& E)
{
- issue_fail_msg("test_file_create()", __LINE__, __FILE__, E.getCDetailMsg());
- if (tmpl1 != NULL) // clean up
- delete tmpl1;
+ issue_fail_msg("test_file_create()", __LINE__, __FILE__, E.getCDetailMsg());
+ if (tmpl1 != NULL) // clean up
+ delete tmpl1;
}
} // test_file_create()
@@ -272,20 +271,20 @@ static void test_file_create()
/*-------------------------------------------------------------------------
* Function: test_file_open
*
- * Purpose: Test file accesses
+ * Purpose Test file accesses
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
+ * Programmer Binh-Minh Ribler (use C version)
* January, 2001
*
* Modifications:
- * January, 2005: C tests' macro VERIFY casts values to 'long' for all
- * cases. Since there are no operator<< for 'long long'
- * or int64 in VS C++ ostream, I casted the hsize_t values
- * passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
- * with a special routine.
+ * January, 2005: C tests' macro VERIFY casts values to 'long' for all
+ * cases. Since there are no operator<< for 'long long'
+ * or int64 in VS C++ ostream, I casted the hsize_t values
+ * passed to verify_val to 'long' as well. If problems
+ * arises later, this will have to be specificly handled
+ * with a special routine.
*
*-------------------------------------------------------------------------
*/
@@ -296,53 +295,53 @@ static void test_file_open()
try {
- // Open first file
- H5File file1 (FILE2, H5F_ACC_RDWR );
+ // Open first file
+ H5File file1 (FILE2, H5F_ACC_RDWR );
- // Get the file-creation template
- FileCreatPropList tmpl1 = file1.getCreatePlist();
+ // Get the file-creation template
+ FileCreatPropList tmpl1 = file1.getCreatePlist();
- // Get the file-creation parameters
- hsize_t ublock = tmpl1.getUserblock();
- verify_val((long)ublock, (long)F2_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
+ // Get the file-creation parameters
+ hsize_t ublock = tmpl1.getUserblock();
+ verify_val((long)ublock, (long)F2_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
- size_t parm1, parm2; // file-creation parameters
- tmpl1.getSizes( parm1, parm2);
- verify_val(parm1, F2_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
- verify_val(parm2, F2_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ size_t parm1, parm2; // file-creation parameters
+ tmpl1.getSizes( parm1, parm2);
+ verify_val(parm1, F2_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ verify_val(parm2, F2_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
unsigned iparm1,iparm2; // file-creation parameters
tmpl1.getSymk( iparm1, iparm2);
verify_val(iparm1, F2_SYM_INTERN_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
verify_val(iparm2, F2_SYM_LEAF_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
- // Test H5File constructor with existing file id
- H5File file2(file1.getId());
- file1.close();
+ // Test H5File constructor with existing file id
+ H5File file2(file1.getId());
+ file1.close();
- // Try truncating the file, and it should fail because the file is
- // still opened with file2.
- try {
- H5File file3 (FILE2, H5F_ACC_TRUNC); // should throw E
+ // Try truncating the file, and it should fail because the file is
+ // still opened with file2.
+ try {
+ H5File file3 (FILE2, H5F_ACC_TRUNC); // should throw E
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("H5File constructor", "Attempt truncating an opened file.");
- }
- catch (FileIException& E) // catching H5F_ACC_TRUNC on opened file
- {} // do nothing, FAIL expected
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("H5File constructor", "Attempt truncating an opened file.");
+ }
+ catch (FileIException& E) // catching H5F_ACC_TRUNC on opened file
+ {} // do nothing, FAIL expected
- // Now, really close the file.
- file2.close();
+ // Now, really close the file.
+ file2.close();
- // Truncating should succeed now.
- H5File file3(FILE2, H5F_ACC_TRUNC);
+ // Truncating should succeed now.
+ H5File file3(FILE2, H5F_ACC_TRUNC);
- // Opening another file to file3 object, FILE2 should be closed, so
- // the next attempt to truncate FILE2 should succeed.
- file3.openFile(FILE1, H5F_ACC_RDONLY);
- H5File file4(FILE2, H5F_ACC_TRUNC);
+ // Opening another file to file3 object, FILE2 should be closed, so
+ // the next attempt to truncate FILE2 should succeed.
+ file3.openFile(FILE1, H5F_ACC_RDONLY);
+ H5File file4(FILE2, H5F_ACC_TRUNC);
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -355,11 +354,11 @@ static void test_file_open()
/*-------------------------------------------------------------------------
* Function: test_file_size
*
- * Purpose: Test file size.
+ * Purpose Test file size.
*
- * Return: None
+ * Return None
*
- * Programmer: Raymond Lu
+ * Programmer Raymond Lu
* June, 2004
*
* Modifications:
@@ -371,7 +370,7 @@ static void test_file_size()
// Output message about test being performed
SUBTEST("File Size");
- hid_t fapl_id;
+ hid_t fapl_id;
fapl_id = h5_fileaccess(); // in h5test.c, returns a file access template
try {
@@ -379,13 +378,13 @@ static void test_file_size()
// list object to pass in H5File::H5File
FileAccPropList fapl(fapl_id);
- // Set to sec2 driver. Do we want to test other file drivers?
+ // Set to sec2 driver. Do we want to test other file drivers?
// They're not tested in C++.
// File drivers seem not implemented.
- // fapl.setSec2();
+ // fapl.setSec2();
// Create a file
- H5File file4( FILE4, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
+ H5File file4( FILE4, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
// Get file size
hsize_t file_size = file4.getFileSize();
@@ -394,14 +393,14 @@ static void test_file_size()
if (file_size < 1*KB || file_size > 4*KB)
issue_fail_msg("test_file_size()", __LINE__, __FILE__, "getFileSize() returned unreasonable value");
- // Get the amount of free space in the file
- hssize_t free_space = file4.getFreeSpace();
+ // Get the amount of free space in the file
+ hssize_t free_space = file4.getFreeSpace();
- // Check if it's reasonable. It's 0 now.
- if (free_space < 0 || free_space > 4*KB)
- issue_fail_msg("test_file_size()", __LINE__, __FILE__, "getFreeSpace returned unreasonable value");
+ // Check if it's reasonable. It's 0 now.
+ if (free_space < 0 || free_space > 4*KB)
+ issue_fail_msg("test_file_size()", __LINE__, __FILE__, "getFreeSpace returned unreasonable value");
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -410,7 +409,9 @@ static void test_file_size()
}
// use C test utility routine to close property list.
- H5Pclose(fapl_id);
+ herr_t ret = H5Pclose(fapl_id);
+ if (ret < 0)
+ issue_fail_msg("test_file_size()", __LINE__, __FILE__, "H5Pclose failed");
} // test_file_size()
@@ -418,25 +419,25 @@ static void test_file_size()
/*-------------------------------------------------------------------------
* Function: test_file_name
*
- * Purpose: Test getting file's name.
+ * Purpose Test getting file's name.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler
+ * Programmer Binh-Minh Ribler
* July, 2004
*
* Modifications:
*
*-------------------------------------------------------------------------
*/
-const int RANK = 2;
-const int NX = 4;
-const int NY = 5;
-const H5std_string GROUPNAME ("group");
-const H5std_string DSETNAME ("dataset");
-const H5std_string DATTRNAME ("dataset attribute");
-const H5std_string FATTRNAME ("file attribute");
-const H5std_string DTYPENAME ("compound");
+const int RANK = 2;
+const int NX = 4;
+const int NY = 5;
+const H5std_string GROUPNAME ("group");
+const H5std_string DSETNAME ("dataset");
+const H5std_string DATTRNAME ("dataset attribute");
+const H5std_string FATTRNAME ("file attribute");
+const H5std_string DTYPENAME ("compound");
// Compound datatype
typedef struct s1_t {
@@ -452,68 +453,67 @@ static void test_file_name()
H5std_string file_name;
try {
// Create a file using default properties.
- H5File file4(FILE4, H5F_ACC_TRUNC);
+ H5File file4(FILE4, H5F_ACC_TRUNC);
// Get file name from the file instance.
file_name = file4.getFileName();
- verify_val(file_name, FILE4, "H5File::getFileName", __LINE__, __FILE__);
+ verify_val(file_name, FILE4, "H5File::getFileName", __LINE__, __FILE__);
- // Create a group in the root group.
- Group group(file4.createGroup(GROUPNAME, 0));
+ // Create a group in the root group.
+ Group group(file4.createGroup(GROUPNAME, 0));
- // Get and verify file name via a group.
- file_name = group.getFileName();
- verify_val(file_name, FILE4, "Group::getFileName", __LINE__, __FILE__);
+ // Get and verify file name via a group.
+ file_name = group.getFileName();
+ verify_val(file_name, FILE4, "Group::getFileName", __LINE__, __FILE__);
- // Create the data space.
- hsize_t dims[RANK] = {NX, NY};
- DataSpace space(RANK, dims);
+ // Create the data space.
+ hsize_t dims[RANK] = {NX, NY};
+ DataSpace space(RANK, dims);
- // Create a new dataset.
- DataSet dataset(file4.createDataSet (DSETNAME, PredType::NATIVE_INT, space));
+ // Create a new dataset.
+ DataSet dataset(file4.createDataSet (DSETNAME, PredType::NATIVE_INT, space));
- // Get and verify file name via a dataset.
- file_name = dataset.getFileName();
- verify_val(file_name, FILE4, "DataSet::getFileName", __LINE__, __FILE__);
+ // Get and verify file name via a dataset.
+ file_name = dataset.getFileName();
+ verify_val(file_name, FILE4, "DataSet::getFileName", __LINE__, __FILE__);
- // Create an attribute for the dataset.
- Attribute attr(dataset.createAttribute(DATTRNAME, PredType::NATIVE_INT, space));
+ // Create an attribute for the dataset.
+ Attribute attr(dataset.createAttribute(DATTRNAME, PredType::NATIVE_INT, space));
- // Get and verify file name via an attribute.
- file_name = attr.getFileName();
- verify_val(file_name, FILE4, "Attribute::getFileName", __LINE__, __FILE__);
+ // Get and verify file name via an attribute.
+ file_name = attr.getFileName();
+ verify_val(file_name, FILE4, "Attribute::getFileName", __LINE__, __FILE__);
- // Create a compound datatype.
- CompType comp_type (sizeof(s1_t));
+ // Create a compound datatype.
+ CompType comp_type (sizeof(s1_t));
- // Insert fields.
- comp_type.insertMember("a", HOFFSET(s1_t, a), PredType::NATIVE_INT);
- comp_type.insertMember("b", HOFFSET(s1_t, b), PredType::NATIVE_FLOAT);
+ // Insert fields.
+ comp_type.insertMember("a", HOFFSET(s1_t, a), PredType::NATIVE_INT);
+ comp_type.insertMember("b", HOFFSET(s1_t, b), PredType::NATIVE_FLOAT);
- // Save it on file.
- comp_type.commit(file4, DTYPENAME);
+ // Save it on file.
+ comp_type.commit(file4, DTYPENAME);
- // Get and verify file name via a committed datatype.
- comp_type.getFileName();
- verify_val(file_name, FILE4, "CompType::getFileName", __LINE__, __FILE__);
- PASSED();
+ // Get and verify file name via a committed datatype.
+ comp_type.getFileName();
+ verify_val(file_name, FILE4, "CompType::getFileName", __LINE__, __FILE__);
+ PASSED();
} // end of try block
catch (Exception& E)
{
issue_fail_msg("test_file_name()", __LINE__, __FILE__, E.getCDetailMsg());
}
-
} // test_file_name()
-const int RANK1 = 1;
-const int ATTR1_DIM1 = 3;
-const H5std_string FILE5("tfattrs.h5");
-const H5std_string FATTR1_NAME ("file attribute 1");
-const H5std_string FATTR2_NAME ("file attribute 2");
-int fattr_data[ATTR1_DIM1]={512,-234,98123}; /* Test data for file attribute */
-int dattr_data[ATTR1_DIM1]={256,-123,1000}; /* Test data for dataset attribute */
+const int RANK1 = 1;
+const int ATTR1_DIM1 = 3;
+const H5std_string FILE5("tfattrs.h5");
+const H5std_string FATTR1_NAME ("file attribute 1");
+const H5std_string FATTR2_NAME ("file attribute 2");
+int fattr_data[ATTR1_DIM1]={512,-234,98123}; // Test data for file attribute
+int dattr_data[ATTR1_DIM1]={256,-123,1000}; // Test data for dataset attribute
static void test_file_attribute()
{
@@ -526,89 +526,89 @@ static void test_file_attribute()
H5std_string file_name;
try {
// Create a file using default properties.
- H5File file5(FILE5, H5F_ACC_TRUNC);
+ H5File file5(FILE5, H5F_ACC_TRUNC);
- // Create the data space
- hsize_t dims[RANK1] = {ATTR1_DIM1};
- DataSpace space(RANK1, dims);
+ // Create the data space
+ hsize_t dims[RANK1] = {ATTR1_DIM1};
+ DataSpace space(RANK1, dims);
- // Create two attributes for the file
- Attribute fattr1(file5.createAttribute(FATTR1_NAME, PredType::NATIVE_FLOAT, space));
- Attribute fattr2(file5.createAttribute(FATTR2_NAME, PredType::NATIVE_INT, space));
+ // Create two attributes for the file
+ Attribute fattr1(file5.createAttribute(FATTR1_NAME, PredType::NATIVE_FLOAT, space));
+ Attribute fattr2(file5.createAttribute(FATTR2_NAME, PredType::NATIVE_INT, space));
- fattr2.write(PredType::NATIVE_INT, fattr_data);
+ fattr2.write(PredType::NATIVE_INT, fattr_data);
- try {
- // Try to create the same attribute again (should fail)
- Attribute fattr_dup(file5.createAttribute(FATTR2_NAME, PredType::NATIVE_INT, space));
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("H5File createAttribute", "Attempted to create an existing attribute.");
- }
- catch (AttributeIException& E) // catch creating existing attribute
- {} // do nothing, FAIL expected
+ try {
+ // Try to create the same attribute again (should fail)
+ Attribute fattr_dup(file5.createAttribute(FATTR2_NAME, PredType::NATIVE_INT, space));
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("H5File createAttribute", "Attempted to create an existing attribute.");
+ }
+ catch (AttributeIException& E) // catch creating existing attribute
+ {} // do nothing, FAIL expected
- // Create a new dataset
- DataSet dataset(file5.createDataSet (DSETNAME, PredType::NATIVE_INT, space));
+ // Create a new dataset
+ DataSet dataset(file5.createDataSet (DSETNAME, PredType::NATIVE_INT, space));
- // Create an attribute for the dataset
- Attribute dattr(dataset.createAttribute(DATTRNAME, PredType::NATIVE_INT, space));
+ // Create an attribute for the dataset
+ Attribute dattr(dataset.createAttribute(DATTRNAME, PredType::NATIVE_INT, space));
- // Write data to the second file attribute
- dattr.write(PredType::NATIVE_INT, dattr_data);
+ // Write data to the second file attribute
+ dattr.write(PredType::NATIVE_INT, dattr_data);
- // Test flushing out the data from the attribute object
+ // Test flushing out the data from the attribute object
dattr.flush(H5F_SCOPE_GLOBAL);
- // Get and verify the number of all objects in the file
- // Current: 1 file, 2 file attr, 1 ds, and 1 ds attr.
- ssize_t num_objs = file5.getObjCount(H5F_OBJ_ALL);
- verify_val(num_objs, 5, "H5File::getObjCount", __LINE__, __FILE__);
-
- num_objs = file5.getObjCount(H5F_OBJ_GROUP);
- verify_val(num_objs, 0, "H5File::getObjCount(H5F_OBJ_GROUP)", __LINE__, __FILE__);
- num_objs = file5.getObjCount(H5F_OBJ_DATASET);
- verify_val(num_objs, 1, "H5File::getObjCount(H5F_OBJ_DATASET)", __LINE__, __FILE__);
- num_objs = file5.getObjCount(H5F_OBJ_ATTR);
- verify_val(num_objs, 3, "H5File::getObjCount(H5F_OBJ_ATTR)", __LINE__, __FILE__);
- num_objs = file5.getObjCount(H5F_OBJ_DATATYPE);
- verify_val(num_objs, 0, "H5File::getObjCount(H5F_OBJ_DATATYPE)", __LINE__, __FILE__);
- num_objs = file5.getObjCount(H5F_OBJ_FILE);
- verify_val(num_objs, 1, "H5File::getObjCount(H5F_OBJ_FILE)", __LINE__, __FILE__);
-
- // Get the file name using the attributes
- H5std_string fname = fattr1.getFileName();
- verify_val(fname, FILE5, "H5File::getFileName()", __LINE__, __FILE__);
-
- fname.clear();
- fname = dattr.getFileName();
- verify_val(fname, FILE5, "H5File::getFileName()", __LINE__, __FILE__);
-
- // Get the class of a file attribute's datatype
- H5T_class_t atclass = fattr1.getTypeClass();
- verify_val(atclass, H5T_FLOAT, "Attribute::getTypeClass()", __LINE__, __FILE__);
-
- // Get and verify the number of attributes attached to a file
- int n_attrs = file5.getNumAttrs();
- verify_val(n_attrs, 2, "H5File::getNumAttrs()", __LINE__, __FILE__);
-
- // Get and verify the number of attributes attached to a dataset
- n_attrs = 0;
- n_attrs = dataset.getNumAttrs();
- verify_val(n_attrs, 1, "DataSet::getNumAttrs()", __LINE__, __FILE__);
-
- // Read back attribute's data
- HDmemset(rdata, 0, sizeof(rdata));
+ // Get and verify the number of all objects in the file
+ // Current: 1 file, 2 file attr, 1 ds, and 1 ds attr.
+ ssize_t num_objs = file5.getObjCount(H5F_OBJ_ALL);
+ verify_val(num_objs, 5, "H5File::getObjCount", __LINE__, __FILE__);
+
+ num_objs = file5.getObjCount(H5F_OBJ_GROUP);
+ verify_val(num_objs, 0, "H5File::getObjCount(H5F_OBJ_GROUP)", __LINE__, __FILE__);
+ num_objs = file5.getObjCount(H5F_OBJ_DATASET);
+ verify_val(num_objs, 1, "H5File::getObjCount(H5F_OBJ_DATASET)", __LINE__, __FILE__);
+ num_objs = file5.getObjCount(H5F_OBJ_ATTR);
+ verify_val(num_objs, 3, "H5File::getObjCount(H5F_OBJ_ATTR)", __LINE__, __FILE__);
+ num_objs = file5.getObjCount(H5F_OBJ_DATATYPE);
+ verify_val(num_objs, 0, "H5File::getObjCount(H5F_OBJ_DATATYPE)", __LINE__, __FILE__);
+ num_objs = file5.getObjCount(H5F_OBJ_FILE);
+ verify_val(num_objs, 1, "H5File::getObjCount(H5F_OBJ_FILE)", __LINE__, __FILE__);
+
+ // Get the file name using the attributes
+ H5std_string fname = fattr1.getFileName();
+ verify_val(fname, FILE5, "H5File::getFileName()", __LINE__, __FILE__);
+
+ fname.clear();
+ fname = dattr.getFileName();
+ verify_val(fname, FILE5, "H5File::getFileName()", __LINE__, __FILE__);
+
+ // Get the class of a file attribute's datatype
+ H5T_class_t atclass = fattr1.getTypeClass();
+ verify_val(atclass, H5T_FLOAT, "Attribute::getTypeClass()", __LINE__, __FILE__);
+
+ // Get and verify the number of attributes attached to a file
+ int n_attrs = file5.getNumAttrs();
+ verify_val(n_attrs, 2, "H5File::getNumAttrs()", __LINE__, __FILE__);
+
+ // Get and verify the number of attributes attached to a dataset
+ n_attrs = 0;
+ n_attrs = dataset.getNumAttrs();
+ verify_val(n_attrs, 1, "DataSet::getNumAttrs()", __LINE__, __FILE__);
+
+ // Read back attribute's data
+ HDmemset(rdata, 0, sizeof(rdata));
dattr.read(PredType::NATIVE_INT, rdata);
/* Check results */
for (i = 0; i < ATTR1_DIM1; i++) {
if (rdata[i] != dattr_data[i]) {
H5_FAILED();
- cerr << endl;
+ cerr << endl;
cerr << "element [" << i << "] is " << rdata[i] <<
- "but should have been " << dattr_data[i] << endl;
+ "but should have been " << dattr_data[i] << endl;
}
}
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -616,29 +616,30 @@ static void test_file_attribute()
issue_fail_msg("test_file_attribute()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_file_attribute()
+
-const H5std_string FILE6("tfile5.h5");
-const H5std_string ROOTGROUP("/");
-const H5std_string GROUP1("/G1");
-const H5std_string SUBGROUP3("/G1/G3");
+const H5std_string FILE6("tfile5.h5");
+const H5std_string ROOTGROUP("/");
+const H5std_string GROUP1("/G1");
+const H5std_string SUBGROUP3("/G1/G3");
/*-------------------------------------------------------------------------
- * Function: test_libver_bounds_real
+ * Function: test_libver_bounds_real
*
- * Purpose: Verify that a file created and modified with the
- * specified libver bounds has the specified object header
- * versions for the right objects.
+ * Purpose Verify that a file created and modified with the
+ * specified libver bounds has the specified object header
+ * versions for the right objects.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * March, 2015
+ * Programmer Binh-Minh Ribler (use C version)
+ * March, 2015
*
*-------------------------------------------------------------------------
*/
static void test_libver_bounds_real(
- H5F_libver_t libver_create, unsigned oh_vers_create,
- H5F_libver_t libver_mod, unsigned oh_vers_mod)
+ H5F_libver_t libver_create, unsigned oh_vers_create,
+ H5F_libver_t libver_mod, unsigned oh_vers_mod)
{
try {
@@ -674,8 +675,8 @@ static void test_libver_bounds_real(
*/
Group group = file.createGroup(GROUP1);
- obj_version = file.childObjVersion(GROUP1);
- verify_val(obj_version, oh_vers_mod, "H5File::childObjVersion", __LINE__, __FILE__);
+ obj_version = group.objVersion();
+ verify_val(obj_version, oh_vers_mod, "Group::objVersion", __LINE__, __FILE__);
group.close(); // close "/G1"
@@ -685,8 +686,8 @@ static void test_libver_bounds_real(
*/
group = file.createGroup(SUBGROUP3);
- obj_version = group.childObjVersion(SUBGROUP3);
- verify_val(obj_version, oh_vers_mod, "H5File::childObjVersion", __LINE__, __FILE__);
+ obj_version = group.objVersion();
+ verify_val(obj_version, oh_vers_mod, "Group::objVersion", __LINE__, __FILE__);
group.close(); // close "/G1/G3"
@@ -710,12 +711,12 @@ static void test_libver_bounds_real(
*
* Function: test_libver_bounds
*
- * Purpose: Verify that a file created and modified with various
- * libver bounds is handled correctly.
+ * Purpose Verify that a file created and modified with various
+ * libver bounds is handled correctly.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
+ * Programmer Binh-Minh Ribler (use C version)
* March 2015
*
*-------------------------------------------------------------------------
@@ -732,16 +733,14 @@ static void test_libver_bounds()
} /* end test_libver_bounds() */
/*-------------------------------------------------------------------------
- * Function: test_commonfg
+ * Function: test_commonfg
*
- * Purpose: Verify that a file created and modified with the
- * specified libver bounds has the specified object header
- * versions for the right objects.
+ * Purpose Verify that H5File works as a root group.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * March, 2015
+ * Programmer Binh-Minh Ribler (use C version)
+ * March, 2015
*
*-------------------------------------------------------------------------
*/
@@ -752,40 +751,40 @@ static void test_commonfg()
try {
// Create a file using default properties.
- H5File file4(FILE4, H5F_ACC_TRUNC);
+ H5File file4(FILE4, H5F_ACC_TRUNC);
- // Try opening the root group.
- Group rootgroup(file4.openGroup(ROOTGROUP));
+ // Try opening the root group.
+ Group rootgroup(file4.openGroup(ROOTGROUP));
- // Create a group in the root group.
- Group group(rootgroup.createGroup(GROUPNAME, 0));
+ // Create a group in the root group.
+ Group group(rootgroup.createGroup(GROUPNAME, 0));
- // Create the data space.
- hsize_t dims[RANK] = {NX, NY};
- DataSpace space(RANK, dims);
+ // Create the data space.
+ hsize_t dims[RANK] = {NX, NY};
+ DataSpace space(RANK, dims);
- // Create a new dataset.
- DataSet dataset(group.createDataSet (DSETNAME, PredType::NATIVE_INT, space));
+ // Create a new dataset.
+ DataSet dataset(group.createDataSet (DSETNAME, PredType::NATIVE_INT, space));
- // Get and verify file name via a dataset.
- H5std_string file_name = dataset.getFileName();
- verify_val(file_name, FILE4, "DataSet::getFileName", __LINE__, __FILE__);
+ // Get and verify file name via a dataset.
+ H5std_string file_name = dataset.getFileName();
+ verify_val(file_name, FILE4, "DataSet::getFileName", __LINE__, __FILE__);
- // Create an attribute for the dataset.
- Attribute attr(dataset.createAttribute(DATTRNAME, PredType::NATIVE_INT, space));
+ // Create an attribute for the dataset.
+ Attribute attr(dataset.createAttribute(DATTRNAME, PredType::NATIVE_INT, space));
- // Get and verify file name via an attribute.
- file_name = attr.getFileName();
- verify_val(file_name, FILE4, "Attribute::getFileName", __LINE__, __FILE__);
+ // Get and verify file name via an attribute.
+ file_name = attr.getFileName();
+ verify_val(file_name, FILE4, "Attribute::getFileName", __LINE__, __FILE__);
- // Create an attribute for the file via root group.
- Attribute rootg_attr(rootgroup.createAttribute(FATTRNAME, PredType::NATIVE_INT, space));
+ // Create an attribute for the file via root group.
+ Attribute rootg_attr(rootgroup.createAttribute(FATTRNAME, PredType::NATIVE_INT, space));
- // Get and verify file name via an attribute.
- file_name = attr.getFileName();
- verify_val(file_name, FILE4, "Attribute::getFileName", __LINE__, __FILE__);
+ // Get and verify file name via an attribute.
+ file_name = attr.getFileName();
+ verify_val(file_name, FILE4, "Attribute::getFileName", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
@@ -795,14 +794,153 @@ static void test_commonfg()
} /* end test_commonfg() */
+const H5std_string FILE7("tfile7.h5");
+
+/*-------------------------------------------------------------------------
+ * Function: test_file_info
+ *
+ * Purpose Verify that various properties in a file creation property
+ * lists are stored correctly in the file and can be retrieved
+ * when the file is re-opened.
+ *
+ * Return None
+ *
+ * Programmer Binh-Minh Ribler
+ * February, 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+const hsize_t FSP_SIZE_DEF = 4096;
+const hsize_t FSP_SIZE512 = 512;
+static void test_file_info()
+{
+ // Output message about test being performed
+ SUBTEST("File general information");
+
+ hsize_t out_threshold = 0; // Free space section threshold to get
+ hbool_t out_persist = FALSE;// Persist free-space read
+ // File space handling strategy
+ H5F_fspace_strategy_t out_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR;
+
+ try {
+ // Create a file using default properties.
+ H5File tempfile(FILE7, H5F_ACC_TRUNC);
+
+ // Get the file's version information.
+ H5F_info2_t finfo;
+ tempfile.getFileInfo(finfo);
+ verify_val(finfo.super.version, 0, "H5File::getFileInfo", __LINE__, __FILE__);
+ verify_val(finfo.free.version, 0, "H5File::getFileInfo", __LINE__, __FILE__);
+ verify_val(finfo.sohm.version, 0, "H5File::getFileInfo", __LINE__, __FILE__);
+
+ // Close the file.
+ tempfile.close();
+
+ // Create file creation property list.
+ FileCreatPropList fcpl;
+
+ // Retrieve file space information.
+ fcpl.getFileSpaceStrategy(out_strategy, out_persist, out_threshold);
+
+ // Verify file space information.
+ verify_val(out_strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5File::getFileInfo", __LINE__, __FILE__);
+ verify_val(out_persist, FALSE, "H5File::getFileInfo", __LINE__, __FILE__);
+ verify_val(out_threshold, 1, "H5File::getFileInfo", __LINE__, __FILE__);
+
+ /* Retrieve file space page size */
+ hsize_t out_fsp_psize = fcpl.getFileSpacePagesize();
+ verify_val(out_fsp_psize, FSP_SIZE_DEF, "FileCreatPropList::getFileSpacePagesize", __LINE__, __FILE__);
+
+ // Set various file information.
+ fcpl.setUserblock(F2_USERBLOCK_SIZE);
+ fcpl.setSizes(F2_OFFSET_SIZE, F2_LENGTH_SIZE);
+ fcpl.setSymk(F2_SYM_INTERN_K, F2_SYM_LEAF_K);
+ fcpl.setIstorek(F2_ISTORE);
+
+ hsize_t threshold = 5; // Free space section threshold to set
+ hbool_t persist = TRUE; // Persist free-space to set
+ H5F_fspace_strategy_t strategy = H5F_FSPACE_STRATEGY_PAGE;
+
+ fcpl.setFileSpaceStrategy(strategy, persist, threshold);
+ fcpl.setFileSpacePagesize(FSP_SIZE512);
+
+ // Creating a file with the non-default file creation property list
+ // should create a version 1 superblock
+
+ // Create file with custom file creation property list.
+ H5File file7(FILE7, H5F_ACC_TRUNC, fcpl);
+
+ // Close the file creation property list.
+ fcpl.close();
+
+ // Get the file's version information.
+ file7.getFileInfo(finfo);
+ verify_val(finfo.super.version, 2, "H5File::getFileInfo", __LINE__, __FILE__);
+ verify_val(finfo.free.version, 0, "H5File::getFileInfo", __LINE__, __FILE__);
+ verify_val(finfo.sohm.version, 0, "H5File::getFileInfo", __LINE__, __FILE__);
+
+ // Close the file.
+ file7.close();
+
+ // Re-open the file.
+ file7.openFile(FILE7, H5F_ACC_RDONLY);
+
+ // Get the file's creation property list.
+ FileCreatPropList fcpl2 = file7.getCreatePlist();
+
+ // Get the file's version information.
+ file7.getFileInfo(finfo);
+ verify_val(finfo.super.version, 2, "H5File::getFileInfo", __LINE__, __FILE__);
+ verify_val(finfo.free.version, 0, "H5File::getFileInfo", __LINE__, __FILE__);
+ verify_val(finfo.sohm.version, 0, "H5File::getFileInfo", __LINE__, __FILE__);
+
+ // Retrieve the property values & check them.
+ hsize_t userblock = fcpl2.getUserblock();
+ verify_val(userblock, F2_USERBLOCK_SIZE, "FileCreatPropList::getUserblock", __LINE__, __FILE__);
+
+ size_t off_size = 0, len_size = 0;
+ fcpl2.getSizes(off_size, len_size);
+ verify_val(off_size, F2_OFFSET_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+ verify_val(len_size, F2_LENGTH_SIZE, "FileCreatPropList::getSizes", __LINE__, __FILE__);
+
+ unsigned sym_ik = 0, sym_lk = 0;
+ fcpl2.getSymk(sym_ik, sym_lk);
+ verify_val(sym_ik, F2_SYM_INTERN_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
+ verify_val(sym_lk, F2_SYM_LEAF_K, "FileCreatPropList::getSymk", __LINE__, __FILE__);
+
+ unsigned istore_ik = fcpl2.getIstorek();
+ verify_val(istore_ik, F2_ISTORE, "FileCreatPropList::getIstorek", __LINE__, __FILE__);
+
+ /* ret=H5Pget_shared_mesg_nindexes(fcpl2,&nindexes);
+ CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes");
+ VERIFY(nindexes, MISC11_NINDEXES, "H5Pget_shared_mesg_nindexes");
+ */
+
+ // Get and verify the file space info from the creation property list */
+ fcpl2.getFileSpaceStrategy(out_strategy, out_persist, out_threshold);
+ verify_val(out_strategy, strategy, "FileCreatPropList::getFileSpaceStrategy", __LINE__, __FILE__);
+ verify_val(out_persist, persist, "FileCreatPropList::getFileSpaceStrategy", __LINE__, __FILE__);
+ verify_val(out_threshold, threshold, "FileCreatPropList::getFileSpaceStrategy", __LINE__, __FILE__);
+
+ out_fsp_psize = fcpl2.getFileSpacePagesize();
+ verify_val(out_fsp_psize, FSP_SIZE512, "FileCreatPropList::getFileSpacePagesize", __LINE__, __FILE__);
+
+ PASSED();
+ } // end of try block
+ catch (Exception& E)
+ {
+ issue_fail_msg("test_filespace_info()", __LINE__, __FILE__, E.getCDetailMsg());
+ }
+} /* test_file_info() */
+
/*-------------------------------------------------------------------------
* Function: test_file
*
- * Purpose: Main file testing routine
+ * Purpose Main file testing routine
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
+ * Programmer Binh-Minh Ribler (use C version)
* January 2001
*
* Modifications:
@@ -815,24 +953,25 @@ void test_file()
// Output message about test being performed
MESSAGE(5, ("Testing File I/O Operations\n"));
- test_file_create(); // Test file creation (also creation templates)
- test_file_open(); // Test file opening
- test_file_size(); // Test file size
- test_file_name(); // Test getting file's name
- test_file_attribute(); // Test file attribute feature
- test_libver_bounds(); // Test format version
- test_commonfg();
+ test_file_create(); // Test file creation (also creation templates)
+ test_file_open(); // Test file opening
+ test_file_size(); // Test file size
+ test_file_name(); // Test getting file's name
+ test_file_attribute(); // Test file attribute feature
+ test_libver_bounds(); // Test format version
+ test_commonfg(); // Test H5File as a root group
+ test_file_info(); // Test various file info
} // test_file()
/*-------------------------------------------------------------------------
- * Function: cleanup_file
+ * Function: cleanup_file
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: (use C version)
+ * Programmer (use C version)
*
* Modifications:
*
@@ -846,7 +985,8 @@ void cleanup_file()
HDremove(FILE1.c_str());
HDremove(FILE2.c_str());
HDremove(FILE3.c_str());
-// HDremove(FILE4.c_str());
+ HDremove(FILE4.c_str());
HDremove(FILE5.c_str());
HDremove(FILE6.c_str());
+ HDremove(FILE7.c_str());
} // cleanup_file
diff --git a/c++/test/tfilter.cpp b/c++/test/tfilter.cpp
index ee78fe1..5de6590 100644
--- a/c++/test/tfilter.cpp
+++ b/c++/test/tfilter.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -33,15 +31,15 @@ using namespace H5;
#include "h5test.h"
#include "h5cpputil.h" // C++ utilility header file
-#define DSET_DIM1 100
-#define DSET_DIM2 200
+#define DSET_DIM1 100
+#define DSET_DIM2 200
#define FILTER_CHUNK_DIM1 2
#define FILTER_CHUNK_DIM2 25
// will do this function later or use it as guideline - BMR - 2007/01/26
#if 0
static herr_t test_filter_internal(hid_t fid, const char *name, hid_t dcpl,
- int if_fletcher32, int corrupted, hsize_t *dset_size)
+ int if_fletcher32, int corrupted, hsize_t *dset_size)
{
cerr << "do nothing right now" << endl;
return(0);
@@ -58,25 +56,25 @@ static size_t filter_bogus(unsigned int flags, size_t cd_nelmts,
static size_t filter_bogus(size_t nbytes);
/* This message derives from H5Z */
const H5Z_class2_t H5Z_BOGUS[1] = {{
- H5Z_CLASS_T_VERS, /* H5Z_class_t version */
- H5Z_FILTER_BOGUS, /* Filter id number */
- 1, 1, /* Encoding and decoding enabled */
- "bogus", /* Filter name for debugging */
- NULL, /* The "can apply" callback */
- NULL, /* The "set local" callback */
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ H5Z_FILTER_BOGUS, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "bogus", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
(H5Z_func_t)filter_bogus, /* The actual filter function */
}};
/*-------------------------------------------------------------------------
* Function: filter_bogus
*
- * Purpose: A bogus compression method that doesn't do anything.
+ * Purpose A bogus compression method that doesn't do anything.
*
- * Return: Success: Data chunk size
+ * Return Success: Data chunk size
*
- * Failure: 0
+ * Failure: 0
*
- * Programmer: Robb Matzke
+ * Programmer Robb Matzke
* Tuesday, April 21, 1998
*
* Modifications:
@@ -95,17 +93,17 @@ filter_bogus(size_t nbytes)
}
/*-------------------------------------------------------------------------
- * Function: test_null_filter
+ * Function: test_null_filter
*
- * Purpose: Test null I/O filter by itself.
+ * Purpose Test null I/O filter by itself.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version, from dsets.c/test_filters)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version, from dsets.c/test_filters)
+ * January, 2007
*
* Modifications:
- * Note: H5Z interface is not implemented yet.
+ * Note: H5Z interface is not implemented yet.
*
*-------------------------------------------------------------------------
*/
@@ -118,25 +116,25 @@ static void test_null_filter()
// Output message about test being performed
SUBTEST("'Null' filter");
try {
- //hsize_t null_size; // Size of dataset with null filter
+ //hsize_t null_size; // Size of dataset with null filter
- // Prepare dataset create property list
- DSetCreatPropList dsplist;
- dsplist.setChunk(2, chunk_size);
+ // Prepare dataset create property list
+ DSetCreatPropList dsplist;
+ dsplist.setChunk(2, chunk_size);
- if (H5Zregister (H5Z_BOGUS)<0)
+ if (H5Zregister (H5Z_BOGUS)<0)
throw Exception("test_null_filter", "H5Zregister failed");
- // Set some pretent filter
- dsplist.setFilter(H5Z_FILTER_BOGUS);
+ // Set some pretent filter
+ dsplist.setFilter(H5Z_FILTER_BOGUS);
- // this function is just a stub right now; will work on it later - BMR
- //if(test_filter_internal(file,DSET_BOGUS_NAME,dc,DISABLE_FLETCHER32,DATA_NOT_CORRUPTED,&null_size)<0)
+ // this function is just a stub right now; will work on it later - BMR
+ //if(test_filter_internal(file,DSET_BOGUS_NAME,dc,DISABLE_FLETCHER32,DATA_NOT_CORRUPTED,&null_size)<0)
// throw Exception("test_null_filter", "test_filter_internal failed");
- // Close objects.
- dsplist.close();
- PASSED();
+ // Close objects.
+ dsplist.close();
+ PASSED();
} // end of try
// catch all other exceptions
@@ -147,17 +145,17 @@ static void test_null_filter()
} // test_null_filter
/*-------------------------------------------------------------------------
- * Function: test_szip_filter
+ * Function: test_szip_filter
*
- * Purpose: Test SZIP filter by itself.
+ * Purpose Test SZIP filter by itself.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (partly from dsets.c/test_filters)
- * January, 2007
+ * Programmer Binh-Minh Ribler (partly from dsets.c/test_filters)
+ * January, 2007
*
* Modifications:
- * Note: H5Z interface is not implemented yet.
+ * Note: H5Z interface is not implemented yet.
*
*-------------------------------------------------------------------------
*/
@@ -232,7 +230,7 @@ static void test_szip_filter(H5File& file1)
delete[] tconv_buf;
} // if szip presents
else {
- SKIPPED();
+ SKIPPED();
}
#else /* H5_HAVE_FILTER_SZIP */
@@ -266,9 +264,9 @@ void test_filters()
H5File file1(FILE1, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
- // Test basic VL string datatype
- test_null_filter();
- test_szip_filter(file1);
+ // Test basic VL string datatype
+ test_null_filter();
+ test_szip_filter(file1);
}
catch (Exception& E)
{
@@ -277,13 +275,13 @@ void test_filters()
} // test_filters()
/*-------------------------------------------------------------------------
- * Function: cleanup_filters
+ * Function: cleanup_filters
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: Quincey Koziol
+ * Programmer Quincey Koziol
* September 10, 1999
*
* Modifications:
diff --git a/c++/test/th5s.cpp b/c++/test/th5s.cpp
index c795c08..e99ce99 100644
--- a/c++/test/th5s.cpp
+++ b/c++/test/th5s.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -35,7 +33,7 @@ using namespace H5;
#include "h5test.h"
#include "h5cpputil.h" // C++ utilility header file
-#include "H5srcdir.h" // srcdir querying header file
+#include "H5srcdir.h" // srcdir querying header file
const H5std_string TESTFILE("th5s.h5");
const H5std_string DATAFILE("th5s1.h5");
@@ -86,124 +84,123 @@ int space5_data = 7;
/*-------------------------------------------------------------------------
*
- * Function: test_h5s_basic
+ * Function: test_h5s_basic
*
- * Purpose: Test basic H5S (dataspace) code
+ * Purpose Test basic H5S (dataspace) code
*
- * Return: none
+ * Return none
*
- * Programmer: Binh-Minh Ribler (using C version)
+ * Programmer Binh-Minh Ribler (using C version)
* Mar 2001
*
* Modifications:
* January, 2005: C tests' macro VERIFY casts values to 'long' for all
- * cases. Since there are no operator<< for 'long long'
- * or int64 in VS C++ ostream, I casted the hssize_t values
- * passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
- * with a special routine.
+ * cases. Since there are no operator<< for 'long long'
+ * or int64 in VS C++ ostream, I casted the hssize_t values
+ * passed to verify_val to 'long' as well. If problems
+ * arises later, this will have to be specificly handled
+ * with a special routine.
* April 12, 2011: Raymond Lu
- * Starting from the 1.8.7 release, we allow dimension
- * size to be zero. So I took out the test against it.
+ * Starting from the 1.8.7 release, we allow dimension
+ * size to be zero. So I took out the test against it.
*-------------------------------------------------------------------------
*/
static void test_h5s_basic()
{
- hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
- hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2, SPACE2_DIM3,
- SPACE2_DIM4};
- hsize_t dims3[H5S_MAX_RANK+1];
- hsize_t tmax[4];
+ hsize_t dims1[] = {SPACE1_DIM1, SPACE1_DIM2, SPACE1_DIM3};
+ hsize_t dims2[] = {SPACE2_DIM1, SPACE2_DIM2, SPACE2_DIM3, SPACE2_DIM4};
+ hsize_t dims3[H5S_MAX_RANK+1];
+ hsize_t tmax[4];
// Output message about test being performed
SUBTEST("Dataspace Manipulation");
try {
- // Create simple dataspace sid1
- DataSpace sid1 (SPACE1_RANK, dims1 );
-
- // Get simple extent npoints of the dataspace sid1 and verify it
- hssize_t n; // Number of dataspace elements
- n = sid1.getSimpleExtentNpoints();
- verify_val((long)n, (long)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3),
- "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
-
- // Get the logical rank of dataspace sid1 and verify it
- int rank; // Logical rank of dataspace
- rank = sid1.getSimpleExtentNdims();
- verify_val(rank, SPACE1_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
-
- // Retrieves dimension size of dataspace sid1 and verify it
- int ndims; // Number of dimensions
- hsize_t tdims[4]; // Dimension array to test with
- ndims = sid1.getSimpleExtentDims( tdims );
- verify_val(ndims, SPACE1_RANK, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
- verify_val(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(unsigned)), 0,
- "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
-
- // Create simple dataspace sid2
- hsize_t max2[] = {SPACE2_MAX1, SPACE2_MAX2, SPACE2_MAX3, SPACE2_MAX4};
- DataSpace sid2 (SPACE2_RANK, dims2, max2);
-
- // Get simple extent npoints of dataspace sid2 and verify it
- n = sid2.getSimpleExtentNpoints();
- verify_val((long)n, (long)(SPACE2_DIM1 * SPACE2_DIM2 * SPACE2_DIM3 * SPACE2_DIM4),
- "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
-
- // Get the logical rank of dataspace sid2 and verify it
- rank = sid2.getSimpleExtentNdims();
- verify_val(rank, SPACE2_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
-
- // Retrieves dimension size and max size of dataspace sid2 and
- // verify them
- ndims = sid2.getSimpleExtentDims( tdims, tmax );
- verify_val(HDmemcmp(tdims, dims2, SPACE2_RANK * sizeof(unsigned)), 0,
- "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
- verify_val(HDmemcmp(tmax, max2, SPACE2_RANK * sizeof(unsigned)), 0,
- "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
-
- // Check to be sure we can't create a simple data space that has too
- // many dimensions.
- try {
- DataSpace manydims_ds(H5S_MAX_RANK+1, dims3, NULL);
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("DataSpace constructor", "Library allowed overwrite of existing dataset");
- }
- catch (DataSpaceIException& E) // Simple data space with too many dims
- {} // do nothing, exception expected
+ // Create simple dataspace sid1
+ DataSpace sid1 (SPACE1_RANK, dims1 );
+
+ // Get simple extent npoints of the dataspace sid1 and verify it
+ hssize_t n; // Number of dataspace elements
+ n = sid1.getSimpleExtentNpoints();
+ verify_val((long)n, (long)(SPACE1_DIM1 * SPACE1_DIM2 * SPACE1_DIM3),
+ "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
+
+ // Get the logical rank of dataspace sid1 and verify it
+ int rank; // Logical rank of dataspace
+ rank = sid1.getSimpleExtentNdims();
+ verify_val(rank, SPACE1_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+
+ // Retrieves dimension size of dataspace sid1 and verify it
+ int ndims; // Number of dimensions
+ hsize_t tdims[4]; // Dimension array to test with
+ ndims = sid1.getSimpleExtentDims( tdims );
+ verify_val(ndims, SPACE1_RANK, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+ verify_val(HDmemcmp(tdims, dims1, SPACE1_RANK * sizeof(unsigned)), 0,
+ "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+
+ // Create simple dataspace sid2
+ hsize_t max2[] = {SPACE2_MAX1, SPACE2_MAX2, SPACE2_MAX3, SPACE2_MAX4};
+ DataSpace sid2 (SPACE2_RANK, dims2, max2);
+
+ // Get simple extent npoints of dataspace sid2 and verify it
+ n = sid2.getSimpleExtentNpoints();
+ verify_val((long)n, (long)(SPACE2_DIM1 * SPACE2_DIM2 * SPACE2_DIM3 * SPACE2_DIM4),
+ "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
+
+ // Get the logical rank of dataspace sid2 and verify it
+ rank = sid2.getSimpleExtentNdims();
+ verify_val(rank, SPACE2_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+
+ // Retrieves dimension size and max size of dataspace sid2 and
+ // verify them
+ ndims = sid2.getSimpleExtentDims( tdims, tmax );
+ verify_val(HDmemcmp(tdims, dims2, SPACE2_RANK * sizeof(unsigned)), 0,
+ "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+ verify_val(HDmemcmp(tmax, max2, SPACE2_RANK * sizeof(unsigned)), 0,
+ "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+
+ // Check to be sure we can't create a simple data space that has too
+ // many dimensions.
+ try {
+ DataSpace manydims_ds(H5S_MAX_RANK+1, dims3, NULL);
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("DataSpace constructor", "Library allowed overwrite of existing dataset");
+ }
+ catch (DataSpaceIException& E) // Simple data space with too many dims
+ {} // do nothing, exception expected
/*
- * Try reading a file that has been prepared that has a dataset with a
- * higher dimensionality than what the library can handle.
- *
- * If this test fails and the H5S_MAX_RANK variable has changed, follow
- * the instructions in space_overflow.c for regenating the th5s.h5 file.
- */
- char *tmp_str = new char[TESTFILE.length()+1];
- strcpy(tmp_str, TESTFILE.c_str());
- const char *testfile = H5_get_srcdir_filename(tmp_str);
- delete []tmp_str;
-
- // Create file
- H5File fid1(testfile, H5F_ACC_RDONLY);
-
- // Try to open the dataset that has higher dimensionality than
- // what the library can handle and this operation should fail.
- try {
- DataSet dset1 = fid1.openDataSet( "dset" );
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("H5File::openDataSet", "Opening a dataset with higher dimensionality than what the library can handle");
- }
- catch (FileIException& E) // catching higher dimensionality dataset
- {} // do nothing, exception expected
+ * Try reading a file that has been prepared that has a dataset with a
+ * higher dimensionality than what the library can handle.
+ *
+ * If this test fails and the H5S_MAX_RANK variable has changed, follow
+ * the instructions in space_overflow.c for regenating the th5s.h5 file.
+ */
+ char *tmp_str = new char[TESTFILE.length()+1];
+ strcpy(tmp_str, TESTFILE.c_str());
+ const char *testfile = H5_get_srcdir_filename(tmp_str);
+ delete []tmp_str;
+
+ // Create file
+ H5File fid1(testfile, H5F_ACC_RDONLY);
+
+ // Try to open the dataset that has higher dimensionality than
+ // what the library can handle and this operation should fail.
+ try {
+ DataSet dset1 = fid1.openDataSet( "dset" );
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("H5File::openDataSet", "Opening a dataset with higher dimensionality than what the library can handle");
+ }
+ catch (FileIException& E) // catching higher dimensionality dataset
+ {} // do nothing, exception expected
// CHECK_I(ret, "H5Fclose"); // leave this here, later, fake a failure
- // in the p_close see how this will handle it. - BMR
+ // in the p_close see how this will handle it. - BMR
- PASSED();
- } // end of try block
+ PASSED();
+ } // end of try block
catch (InvalidActionException& E)
{
@@ -219,22 +216,22 @@ static void test_h5s_basic()
/*-------------------------------------------------------------------------
*
- * Function: test_h5s_scalar_write
+ * Function: test_h5s_scalar_write
*
- * Purpose: Test scalar H5S (dataspace) writing code
+ * Purpose Test scalar H5S (dataspace) writing code
*
- * Return: none
+ * Return none
*
- * Programmer: Binh-Minh Ribler (using C version)
+ * Programmer Binh-Minh Ribler (using C version)
* Mar 2001
*
* Modifications:
* January, 2005: C tests' macro VERIFY casts values to 'long' for all
- * cases. Since there are no operator<< for 'long long'
- * or int64 in VS C++ ostream, I casted the hssize_t values
- * passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
- * with a special routine.
+ * cases. Since there are no operator<< for 'long long'
+ * or int64 in VS C++ ostream, I casted the hssize_t values
+ * passed to verify_val to 'long' as well. If problems
+ * arises later, this will have to be specificly handled
+ * with a special routine.
*-------------------------------------------------------------------------
*/
static void test_h5s_scalar_write()
@@ -243,124 +240,124 @@ static void test_h5s_scalar_write()
SUBTEST("Scalar Dataspace Writing");
try {
- // Create file
- H5File fid1(DATAFILE, H5F_ACC_TRUNC);
+ // Create file
+ H5File fid1(DATAFILE, H5F_ACC_TRUNC);
- // Create scalar dataspace
- DataSpace sid1(SPACE3_RANK, NULL);
+ // Create scalar dataspace
+ DataSpace sid1(SPACE3_RANK, NULL);
- //n = H5Sget_simple_extent_npoints(sid1);
- hssize_t n; // Number of dataspace elements
- n = sid1.getSimpleExtentNpoints();
- verify_val((long)n, 1, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
+ //n = H5Sget_simple_extent_npoints(sid1);
+ hssize_t n; // Number of dataspace elements
+ n = sid1.getSimpleExtentNpoints();
+ verify_val((long)n, 1, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
- int rank; // Logical rank of dataspace
- rank = sid1.getSimpleExtentNdims();
- verify_val(rank, SPACE3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+ int rank; // Logical rank of dataspace
+ rank = sid1.getSimpleExtentNdims();
+ verify_val(rank, SPACE3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
- // Retrieves dimension size of dataspace sid1 and verify it
- int ndims; // Number of dimensions
- hsize_t tdims[4]; // Dimension array to test with
- ndims = sid1.getSimpleExtentDims( tdims );
- verify_val(ndims, 0, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+ // Retrieves dimension size of dataspace sid1 and verify it
+ int ndims; // Number of dimensions
+ hsize_t tdims[4]; // Dimension array to test with
+ ndims = sid1.getSimpleExtentDims( tdims );
+ verify_val(ndims, 0, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
- // Verify extent type
- H5S_class_t ext_type; // Extent type
- ext_type = sid1.getSimpleExtentType();
- verify_val(ext_type, H5S_SCALAR, "DataSpace::getSimpleExtentType", __LINE__, __FILE__);
+ // Verify extent type
+ H5S_class_t ext_type; // Extent type
+ ext_type = sid1.getSimpleExtentType();
+ verify_val(ext_type, H5S_SCALAR, "DataSpace::getSimpleExtentType", __LINE__, __FILE__);
- // Create and write a dataset
- DataSet dataset = fid1.createDataSet("Dataset1", PredType::NATIVE_UINT,sid1);
- dataset.write(&space3_data, PredType::NATIVE_UINT);
+ // Create and write a dataset
+ DataSet dataset = fid1.createDataSet("Dataset1", PredType::NATIVE_UINT,sid1);
+ dataset.write(&space3_data, PredType::NATIVE_UINT);
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
{
- issue_fail_msg("test_h5s_scalar_write()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_h5s_scalar_write()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_h5s_scalar_write()
/*-------------------------------------------------------------------------
*
- * Function: test_h5s_scalar_read
+ * Function: test_h5s_scalar_read
*
- * Purpose: Test scalar H5S (dataspace) reading code
+ * Purpose Test scalar H5S (dataspace) reading code
*
- * Return: none
+ * Return none
*
- * Programmer: Binh-Minh Ribler (using C version)
+ * Programmer Binh-Minh Ribler (using C version)
* Mar 2001
*
* Modifications:
* January, 2005: C tests' macro VERIFY casts values to 'long' for all
- * cases. Since there are no operator<< for 'long long'
- * or int64 in VS C++ ostream, I casted the hssize_t values
- * passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
- * with a special routine.
+ * cases. Since there are no operator<< for 'long long'
+ * or int64 in VS C++ ostream, I casted the hssize_t values
+ * passed to verify_val to 'long' as well. If problems
+ * arises later, this will have to be specificly handled
+ * with a special routine.
*-------------------------------------------------------------------------
*/
static void test_h5s_scalar_read()
{
- hsize_t tdims[4]; // Dimension array to test with
+ hsize_t tdims[4]; // Dimension array to test with
// Output message about test being performed
SUBTEST("Scalar Dataspace Reading");
try {
- // Open file
- H5File fid1(DATAFILE, H5F_ACC_RDWR);
+ // Open file
+ H5File fid1(DATAFILE, H5F_ACC_RDWR);
- // Create a dataset
- DataSet dataset = fid1.openDataSet("Dataset1");
+ // Create a dataset
+ DataSet dataset = fid1.openDataSet("Dataset1");
- DataSpace sid1 = dataset.getSpace();
+ DataSpace sid1 = dataset.getSpace();
- // Get the number of dataspace elements
- hssize_t n = sid1.getSimpleExtentNpoints();
- verify_val((long)n, 1, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
+ // Get the number of dataspace elements
+ hssize_t n = sid1.getSimpleExtentNpoints();
+ verify_val((long)n, 1, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
- // Get the logical rank of the dataspace
- int ndims = sid1.getSimpleExtentNdims();
- verify_val(ndims, SPACE3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+ // Get the logical rank of the dataspace
+ int ndims = sid1.getSimpleExtentNdims();
+ verify_val(ndims, SPACE3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
- ndims = sid1.getSimpleExtentDims(tdims);
- verify_val(ndims, 0, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+ ndims = sid1.getSimpleExtentDims(tdims);
+ verify_val(ndims, 0, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
- // Read data back and verify it
- unsigned rdata; // Scalar data read in
- dataset.read(&rdata, PredType::NATIVE_UINT);
- verify_val(rdata, space3_data, "DataSet::read", __LINE__, __FILE__);
+ // Read data back and verify it
+ unsigned rdata; // Scalar data read in
+ dataset.read(&rdata, PredType::NATIVE_UINT);
+ verify_val(rdata, space3_data, "DataSet::read", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
{
- // all the exceptions caused by negative returned values by C APIs
- issue_fail_msg("test_h5s_scalar_read()", __LINE__, __FILE__, E.getCDetailMsg());
+ // all the exceptions caused by negative returned values by C APIs
+ issue_fail_msg("test_h5s_scalar_read()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_h5s_scalar_read()
/*-------------------------------------------------------------------------
*
- * Function: test_h5s_null
+ * Function: test_h5s_null
*
- * Purpose: Test null H5S (dataspace) code
+ * Purpose Test null H5S (dataspace) code
*
- * Return: none
+ * Return none
*
- * Programmer: Raymond Lu (using C version)
+ * Programmer Raymond Lu (using C version)
* May 18, 2004
*
* Modifications:
* January, 2005: C tests' macro VERIFY casts values to 'long' for all
- * cases. Since there are no operator<< for 'long long'
- * or int64 in VS C++ ostream, I casted the hssize_t values
- * passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
- * with a special routine.
+ * cases. Since there are no operator<< for 'long long'
+ * or int64 in VS C++ ostream, I casted the hssize_t values
+ * passed to verify_val to 'long' as well. If problems
+ * arises later, this will have to be specificly handled
+ * with a special routine.
*-------------------------------------------------------------------------
*/
static void test_h5s_null()
@@ -369,53 +366,53 @@ static void test_h5s_null()
SUBTEST("Null Dataspace Writing");
try {
- // Create file
- H5File fid1(DATAFILE, H5F_ACC_TRUNC);
+ // Create file
+ H5File fid1(DATAFILE, H5F_ACC_TRUNC);
- // Create scalar dataspace
- DataSpace sid1(H5S_NULL);
+ // Create scalar dataspace
+ DataSpace sid1(H5S_NULL);
- hssize_t n; // Number of dataspace elements
- n = sid1.getSimpleExtentNpoints();
- verify_val((long)n, 0, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
+ hssize_t n; // Number of dataspace elements
+ n = sid1.getSimpleExtentNpoints();
+ verify_val((long)n, 0, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
- // Create a dataset
- DataSet dataset = fid1.createDataSet("Dataset1", PredType::NATIVE_UINT,sid1);
+ // Create a dataset
+ DataSet dataset = fid1.createDataSet("Dataset1", PredType::NATIVE_UINT,sid1);
// Try to write nothing to the dataset
- dataset.write(&space5_data, PredType::NATIVE_INT);
+ dataset.write(&space5_data, PredType::NATIVE_INT);
// Read the data. Make sure no change to the buffer
- dataset.read(&space5_data, PredType::NATIVE_INT);
- verify_val(space5_data, 7, "DataSet::read", __LINE__, __FILE__);
+ dataset.read(&space5_data, PredType::NATIVE_INT);
+ verify_val(space5_data, 7, "DataSet::read", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
{
- issue_fail_msg("test_h5s_null()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_h5s_null()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_h5s_null()
/*-------------------------------------------------------------------------
*
- * Function: test_h5s_compound_scalar_write
+ * Function: test_h5s_compound_scalar_write
*
- * Purpose: Test scalar H5S (dataspace) writing for compound
- * datatypes
+ * Purpose Test scalar H5S (dataspace) writing for compound
+ * datatypes
*
- * Return: none
+ * Return none
*
- * Programmer: Binh-Minh Ribler (using C version)
+ * Programmer Binh-Minh Ribler (using C version)
* Mar 2001
*
* Modifications:
* January, 2005: C tests' macro VERIFY casts values to 'long' for all
- * cases. Since there are no operator<< for 'long long'
- * or int64 in VS C++ ostream, I casted the hssize_t values
- * passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
- * with a special routine.
+ * cases. Since there are no operator<< for 'long long'
+ * or int64 in VS C++ ostream, I casted the hssize_t values
+ * passed to verify_val to 'long' as well. If problems
+ * arises later, this will have to be specificly handled
+ * with a special routine.
*-------------------------------------------------------------------------
*/
static void test_h5s_compound_scalar_write()
@@ -424,135 +421,135 @@ static void test_h5s_compound_scalar_write()
SUBTEST("Compound Dataspace Writing");
try {
- // Create file
- H5File fid1(DATAFILE, H5F_ACC_TRUNC);
-
- // Create the compound datatype.
- CompType tid1(sizeof(struct space4_struct));
- space4_field1_off=HOFFSET(struct space4_struct, c1);
- tid1.insertMember(SPACE4_FIELDNAME1, space4_field1_off,
- PredType::NATIVE_SCHAR);
- space4_field2_off=HOFFSET(struct space4_struct, u);
- tid1.insertMember(SPACE4_FIELDNAME2, space4_field2_off,
- PredType::NATIVE_UINT);
- space4_field3_off=HOFFSET(struct space4_struct, f);
- tid1.insertMember(SPACE4_FIELDNAME3, space4_field3_off,
- PredType::NATIVE_FLOAT);
- space4_field4_off=HOFFSET(struct space4_struct, c2);
- tid1.insertMember(SPACE4_FIELDNAME4, space4_field4_off,
- PredType::NATIVE_SCHAR);
-
- // Create scalar dataspace
- DataSpace sid1(SPACE3_RANK, NULL);
-
- // Get the number of dataspace elements
- hssize_t n = sid1.getSimpleExtentNpoints();
- verify_val((long)n, 1, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
-
- // Get the logical rank of the dataspace
- int ndims = sid1.getSimpleExtentNdims();
- verify_val(ndims, SPACE3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
-
- hsize_t tdims[4]; // Dimension array to test with
- ndims = sid1.getSimpleExtentDims(tdims);
- verify_val(ndims, 0, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
-
- // Create and write a dataset
- DataSet dataset = fid1.createDataSet("Dataset1", tid1, sid1);
- dataset.write(&space4_data, tid1);
-
- PASSED();
- } // end of try block
+ // Create file
+ H5File fid1(DATAFILE, H5F_ACC_TRUNC);
+
+ // Create the compound datatype.
+ CompType tid1(sizeof(struct space4_struct));
+ space4_field1_off=HOFFSET(struct space4_struct, c1);
+ tid1.insertMember(SPACE4_FIELDNAME1, space4_field1_off,
+ PredType::NATIVE_SCHAR);
+ space4_field2_off=HOFFSET(struct space4_struct, u);
+ tid1.insertMember(SPACE4_FIELDNAME2, space4_field2_off,
+ PredType::NATIVE_UINT);
+ space4_field3_off=HOFFSET(struct space4_struct, f);
+ tid1.insertMember(SPACE4_FIELDNAME3, space4_field3_off,
+ PredType::NATIVE_FLOAT);
+ space4_field4_off=HOFFSET(struct space4_struct, c2);
+ tid1.insertMember(SPACE4_FIELDNAME4, space4_field4_off,
+ PredType::NATIVE_SCHAR);
+
+ // Create scalar dataspace
+ DataSpace sid1(SPACE3_RANK, NULL);
+
+ // Get the number of dataspace elements
+ hssize_t n = sid1.getSimpleExtentNpoints();
+ verify_val((long)n, 1, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
+
+ // Get the logical rank of the dataspace
+ int ndims = sid1.getSimpleExtentNdims();
+ verify_val(ndims, SPACE3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+
+ hsize_t tdims[4]; // Dimension array to test with
+ ndims = sid1.getSimpleExtentDims(tdims);
+ verify_val(ndims, 0, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+
+ // Create and write a dataset
+ DataSet dataset = fid1.createDataSet("Dataset1", tid1, sid1);
+ dataset.write(&space4_data, tid1);
+
+ PASSED();
+ } // end of try block
catch (Exception& E)
{
- // all the exceptions caused by negative returned values by C APIs
- issue_fail_msg("test_h5s_compound_scalar_write()", __LINE__, __FILE__, E.getCDetailMsg());
+ // all the exceptions caused by negative returned values by C APIs
+ issue_fail_msg("test_h5s_compound_scalar_write()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_h5s_compound_scalar_write()
/*-------------------------------------------------------------------------
*
- * Function: test_h5s_compound_scalar_read
+ * Function: test_h5s_compound_scalar_read
*
- * Purpose: Test scalar H5S (dataspace) reading for compound
- * datatypes
+ * Purpose Test scalar H5S (dataspace) reading for compound
+ * datatypes
*
- * Return: none
+ * Return none
*
- * Programmer: Binh-Minh Ribler (using C version)
+ * Programmer Binh-Minh Ribler (using C version)
* Mar 2001
*
* Modifications:
* January, 2005: C tests' macro VERIFY casts values to 'long' for all
- * cases. Since there are no operator<< for 'long long'
- * or int64 in VS C++ ostream, I casted the hssize_t values
- * passed to verify_val to 'long' as well. If problems
- * arises later, this will have to be specificly handled
- * with a special routine.
+ * cases. Since there are no operator<< for 'long long'
+ * or int64 in VS C++ ostream, I casted the hssize_t values
+ * passed to verify_val to 'long' as well. If problems
+ * arises later, this will have to be specificly handled
+ * with a special routine.
*-------------------------------------------------------------------------
*/
static void test_h5s_compound_scalar_read()
{
- hsize_t tdims[4]; // Dimension array to test with
+ hsize_t tdims[4]; // Dimension array to test with
// Output message about test being performed
SUBTEST("Compound Dataspace Reading");
try {
- // Open file
- H5File fid1(DATAFILE, H5F_ACC_RDWR);
+ // Open file
+ H5File fid1(DATAFILE, H5F_ACC_RDWR);
- // Create a dataset
- DataSet dataset = fid1.openDataSet("Dataset1");
+ // Create a dataset
+ DataSet dataset = fid1.openDataSet("Dataset1");
- DataSpace sid1 = dataset.getSpace();
+ DataSpace sid1 = dataset.getSpace();
- // Get the number of dataspace elements
- hssize_t n = sid1.getSimpleExtentNpoints();
- verify_val((long)n, 1, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
+ // Get the number of dataspace elements
+ hssize_t n = sid1.getSimpleExtentNpoints();
+ verify_val((long)n, 1, "DataSpace::getSimpleExtentNpoints", __LINE__, __FILE__);
- // Get the logical rank of the dataspace
- int ndims = sid1.getSimpleExtentNdims();
- verify_val(ndims, SPACE3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
+ // Get the logical rank of the dataspace
+ int ndims = sid1.getSimpleExtentNdims();
+ verify_val(ndims, SPACE3_RANK, "DataSpace::getSimpleExtentNdims", __LINE__, __FILE__);
- ndims = sid1.getSimpleExtentDims(tdims);
- verify_val(ndims, 0, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
+ ndims = sid1.getSimpleExtentDims(tdims);
+ verify_val(ndims, 0, "DataSpace::getSimpleExtentDims", __LINE__, __FILE__);
- // Get the datatype of this dataset.
- CompType type(dataset);
+ // Get the datatype of this dataset.
+ CompType type(dataset);
- struct space4_struct rdata; // Scalar data read in
- dataset.read(&rdata, type);
+ struct space4_struct rdata; // Scalar data read in
+ dataset.read(&rdata, type);
- // Verify read data
- if(HDmemcmp(&space4_data,&rdata,sizeof(struct space4_struct)))
- {
+ // Verify read data
+ if(HDmemcmp(&space4_data,&rdata,sizeof(struct space4_struct)))
+ {
cerr << "scalar data different: space4_data.c1="
- << space4_data.c1 << ", read_data4.c1=" << rdata.c1 << endl;
+ << space4_data.c1 << ", read_data4.c1=" << rdata.c1 << endl;
cerr << "scalar data different: space4_data.u="
- << space4_data.u << ", read_data4.u=" << rdata.u << endl;
+ << space4_data.u << ", read_data4.u=" << rdata.u << endl;
cerr << "scalar data different: space4_data.f="
- << space4_data.f << ", read_data4.f=" << rdata.f << endl;
+ << space4_data.f << ", read_data4.f=" << rdata.f << endl;
TestErrPrintf("scalar data different: space4_data.c1=%c, read_data4.c1=%c\n",
- space4_data.c1, rdata.c2);
- } // end if
- PASSED();
+ space4_data.c1, rdata.c2);
+ } // end if
+ PASSED();
} // end of try block
catch (Exception& E)
{
- // all the exceptions caused by negative returned values by C APIs
- issue_fail_msg("test_h5s_compound_scalar_read()", __LINE__, __FILE__, E.getCDetailMsg());
+ // all the exceptions caused by negative returned values by C APIs
+ issue_fail_msg("test_h5s_compound_scalar_read()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_h5s_compound_scalar_read()
/*-------------------------------------------------------------------------
*
- * Function: test_h5s
+ * Function: test_h5s
*
- * Purpose: Main dataspace testing routine
+ * Purpose Main dataspace testing routine
*
- * Return: none
+ * Return none
*
- * Programmer: Binh-Minh Ribler (using C version)
+ * Programmer Binh-Minh Ribler (using C version)
* Mar 2001
*
* Modifications:
@@ -564,23 +561,23 @@ void test_h5s()
// Output message about test being performed
MESSAGE(5, ("Testing Dataspaces\n"));
- test_h5s_basic(); // Test basic H5S code
- test_h5s_scalar_write(); // Test scalar H5S writing code
- test_h5s_scalar_read(); // Test scalar H5S reading code
- test_h5s_null(); // Test null H5S code
- test_h5s_compound_scalar_write(); // Test compound datatype scalar H5S writing code
- test_h5s_compound_scalar_read(); // Test compound datatype scalar H5S reading code
+ test_h5s_basic(); // Test basic H5S code
+ test_h5s_scalar_write(); // Test scalar H5S writing code
+ test_h5s_scalar_read(); // Test scalar H5S reading code
+ test_h5s_null(); // Test null H5S code
+ test_h5s_compound_scalar_write(); // Test compound datatype scalar H5S writing code
+ test_h5s_compound_scalar_read(); // Test compound datatype scalar H5S reading code
} // test_h5s()
/*-------------------------------------------------------------------------
- * Function: cleanup_h5s
+ * Function: cleanup_h5s
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: Albert Cheng
+ * Programmer Albert Cheng
* July 2, 1998
*
* Modifications:
diff --git a/c++/test/titerate.cpp b/c++/test/titerate.cpp
index 7ee2b53..f75d92e 100644
--- a/c++/test/titerate.cpp
+++ b/c++/test/titerate.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -49,20 +47,20 @@ using namespace H5;
//#define SPACE1_RANK 1
//#define SPACE1_DIM1 4
-const H5std_string FILE_ITERATE("titerate.h5");
-const H5std_string GROUP1("Top Group");
-const H5std_string GROUP1_PATH("/Top Group");
-const H5std_string GROUP1_1("Sub-Group 1.1");
-const H5std_string GROUP1_1_PATH("/Top Group/Sub-Group 1.1");
-const H5std_string GROUP1_2("Sub-Group 1.2");
-const H5std_string GROUP1_2_PATH("/Top Group/Sub-Group 1.2");
-const H5std_string DSET_DEFAULT_NAME("default");
-const H5std_string DSET_IN_FILE("Dataset in File");
-const H5std_string DSET_IN_FILE_PATH("/Dataset in File");
-const H5std_string DSET_IN_GRP1("Dataset in Group 1");
-const H5std_string DSET_IN_GRP1_PATH("/Top Group/Dataset in Group 1");
-const H5std_string DSET_IN_GRP1_2("Dataset in Group 1.2");
-const H5std_string DSET_IN_GRP1_2_PATH("/Top Group/Sub-Group 1.2/Dataset in Group 1.2");
+const H5std_string FILE_ITERATE("titerate.h5");
+const H5std_string GROUP1("Top Group");
+const H5std_string GROUP1_PATH("/Top Group");
+const H5std_string GROUP1_1("Sub-Group 1.1");
+const H5std_string GROUP1_1_PATH("/Top Group/Sub-Group 1.1");
+const H5std_string GROUP1_2("Sub-Group 1.2");
+const H5std_string GROUP1_2_PATH("/Top Group/Sub-Group 1.2");
+const H5std_string DSET_DEFAULT_NAME("default");
+const H5std_string DSET_IN_FILE("Dataset in File");
+const H5std_string DSET_IN_FILE_PATH("/Dataset in File");
+const H5std_string DSET_IN_GRP1("Dataset in Group 1");
+const H5std_string DSET_IN_GRP1_PATH("/Top Group/Dataset in Group 1");
+const H5std_string DSET_IN_GRP1_2("Dataset in Group 1.2");
+const H5std_string DSET_IN_GRP1_2_PATH("/Top Group/Sub-Group 1.2/Dataset in Group 1.2");
typedef enum {
RET_ZERO,
@@ -127,15 +125,15 @@ liter_cb(hid_t H5_ATTR_UNUSED group, const char *name, const H5L_info_t H5_ATTR_
} /* end liter_cb() */
/*-------------------------------------------------------------------------
- * Function: test_iter_group
+ * Function: test_iter_group
*
- * Purpose: Tests group iteration
+ * Purpose Tests group iteration
*
- * Return: Success: 0
- * Failure: -1
+ * Return Success: 0
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * Friday, September 9, 2016
+ * Programmer Binh-Minh Ribler
+ * Friday, September 9, 2016
*
* Modifications:
*
@@ -148,142 +146,142 @@ static void test_iter_group(FileAccPropList& fapl)
char name[NAMELEN]; /* temporary name buffer */
char *lnames[NDATASETS + 2];/* Names of the links created */
iter_info info; /* Custom iteration information */
- herr_t ret; /* Generic return value */
+ herr_t ret; /* Generic return value */
/* Output message about test being performed */
SUBTEST("Group Iteration");
/* Create the test file with the datasets */
try {
- // Create file
- H5File file(FILE_ITERATE, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
+ // Create file
+ H5File file(FILE_ITERATE, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
- /* Test iterating over empty group */
- info.command = RET_ZERO;
- idx = 0;
- ret = H5Literate(file.getId(), H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info);
- verify_val(ret, SUCCEED, "H5Literate", __LINE__, __FILE__);
+ /* Test iterating over empty group */
+ info.command = RET_ZERO;
+ idx = 0;
+ ret = H5Literate(file.getId(), H5_INDEX_NAME, H5_ITER_INC, &idx, liter_cb, &info);
+ verify_val(ret, SUCCEED, "H5Literate", __LINE__, __FILE__);
- DataType datatype(PredType::NATIVE_INT);
+ DataType datatype(PredType::NATIVE_INT);
- // Create a scalar file space
- DataSpace filespace;
+ // Create a scalar file space
+ DataSpace filespace;
- for (i=0; i< NDATASETS; i++)
- {
+ for (i=0; i< NDATASETS; i++)
+ {
sprintf(name, "Dataset %d", i);
- // Create a dataset in the file
- DataSet dataset = file.createDataSet(name, datatype, filespace);
+ // Create a dataset in the file
+ DataSet dataset = file.createDataSet(name, datatype, filespace);
/* Keep a copy of the dataset names */
lnames[i] = HDstrdup(name);
check_values(lnames[i], "HDstrdup returns NULL", __LINE__, __FILE__);
- } /* end for */
+ } /* end for */
- /* Create a group and named datatype under root group for testing */
- Group grp(file.createGroup(GROUP1, 0));
- lnames[NDATASETS] = HDstrdup("grp");
- check_values(lnames[NDATASETS], "HDstrdup returns NULL", __LINE__, __FILE__);
+ /* Create a group and named datatype under root group for testing */
+ Group grp(file.createGroup(GROUP1, 0));
+ lnames[NDATASETS] = HDstrdup("grp");
+ check_values(lnames[NDATASETS], "HDstrdup returns NULL", __LINE__, __FILE__);
- datatype.commit(file, "dtype");
- lnames[NDATASETS + 1] = HDstrdup("dtype");
- check_values(lnames[NDATASETS], "HDstrdup returns NULL", __LINE__, __FILE__);
+ datatype.commit(file, "dtype");
+ lnames[NDATASETS + 1] = HDstrdup("dtype");
+ check_values(lnames[NDATASETS], "HDstrdup returns NULL", __LINE__, __FILE__);
- /* Sort the dataset names */
- HDqsort(lnames, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp);
+ /* Sort the dataset names */
+ HDqsort(lnames, (size_t)(NDATASETS + 2), sizeof(char *), iter_strcmp);
- /* Iterate through the datasets in the root group in various ways */
+ /* Iterate through the datasets in the root group in various ways */
- // Open data file to read
- file.openFile(FILE_ITERATE, H5F_ACC_RDONLY, fapl);
+ // Open data file to read
+ file.openFile(FILE_ITERATE, H5F_ACC_RDONLY, fapl);
- // Open the root group
- Group root_group(file.openGroup("/"));
+ // Open the root group
+ Group root_group(file.openGroup("/"));
- // Get the number of object in the root group
- hsize_t nobjs = root_group.getNumObjs();
- verify_val(nobjs, (hsize_t)(NDATASETS + 2), "H5Gget_info", __LINE__, __FILE__);
+ // Get the number of object in the root group
+ hsize_t nobjs = root_group.getNumObjs();
+ verify_val(nobjs, (hsize_t)(NDATASETS + 2), "H5Gget_info", __LINE__, __FILE__);
- H5std_string obj_name;
- for (i = 0; i < nobjs; i++)
- {
- //H5O_info_t oinfo; /* Object info */
+ H5std_string obj_name;
+ for (i = 0; i < nobjs; i++)
+ {
+ //H5O_info_t oinfo; /* Object info */
- obj_name = root_group.getObjnameByIdx(i);
+ obj_name = root_group.getObjnameByIdx(i);
//ret = (herr_t)H5Lget_name_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, dataset_name, (size_t)NAMELEN, H5P_DEFAULT);
- //oinfo = root_group.childObjType((hsize_t)i, H5_INDEX_NAME, H5_ITER_INC, ".");
+ //oinfo = root_group.childObjType((hsize_t)i, H5_INDEX_NAME, H5_ITER_INC, ".");
//ret = H5Oget_info_by_idx(root_group, ".", H5_INDEX_NAME, H5_ITER_INC, (hsize_t)i, &oinfo, H5P_DEFAULT);
- } /* end for */
-
- // Attempted to iterate with invalid index, should fail
- try {
- obj_name = root_group.getObjnameByIdx(NDATASETS + 3);
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("Group::getObjnameByIdx", "Attempt to iterate with invalid index");
- }
- catch (GroupIException& invalid_action) // invalid index
- {} // do nothing, exception expected
-
- // Attempted to iterate with negative index, should fail
- try {
- info.command = RET_ZERO;
- idx = (hsize_t)-1;
- obj_name = root_group.getObjnameByIdx(idx);
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("Group::getObjnameByIdx", "Attempt to iterate with negative index");
- }
- catch (FileIException& invalid_action) // invalid index
- {} // do nothing, exception expected
- catch (GroupIException& invalid_action) // invalid index
- {} // do nothing, exception expected
-
- /* Test skipping exactly as many entries as in the group */
- try {
- info.command = RET_ZERO;
- idx = NDATASETS + 2;
- obj_name = root_group.getObjnameByIdx(idx);
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("Group::getObjnameByIdx", "Attempt to iterate with negative index");
- }
- catch (FileIException& invalid_action) // invalid index
- {} // do nothing, exception expected
- catch (GroupIException& invalid_action) // invalid index
- {} // do nothing, exception expected
-
- /* Test skipping more entries than are in the group */
- try {
- info.command = RET_ZERO;
- idx = NDATASETS + 3;
- obj_name = root_group.getObjnameByIdx(idx);
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("Group::getObjnameByIdx", "Attempt to iterate with negative index");
- }
- catch (FileIException& invalid_action) // invalid index
- {} // do nothing, exception expected
- catch (GroupIException& invalid_action) // invalid index
- {} // do nothing, exception expected
-
- /* Free the dataset names */
- for(i = 0; i< (NDATASETS + 2); i++)
- HDfree(lnames[i]);
-
- // Everything will be closed as they go out of scope
-
- PASSED();
- } // try block
+ } /* end for */
+
+ // Attempted to iterate with invalid index, should fail
+ try {
+ obj_name = root_group.getObjnameByIdx(NDATASETS + 3);
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("Group::getObjnameByIdx", "Attempt to iterate with invalid index");
+ }
+ catch (GroupIException& invalid_action) // invalid index
+ {} // do nothing, exception expected
+
+ // Attempted to iterate with negative index, should fail
+ try {
+ info.command = RET_ZERO;
+ idx = (hsize_t)-1;
+ obj_name = root_group.getObjnameByIdx(idx);
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("Group::getObjnameByIdx", "Attempt to iterate with negative index");
+ }
+ catch (FileIException& invalid_action) // invalid index
+ {} // do nothing, exception expected
+ catch (GroupIException& invalid_action) // invalid index
+ {} // do nothing, exception expected
+
+ /* Test skipping exactly as many entries as in the group */
+ try {
+ info.command = RET_ZERO;
+ idx = NDATASETS + 2;
+ obj_name = root_group.getObjnameByIdx(idx);
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("Group::getObjnameByIdx", "Attempt to iterate with negative index");
+ }
+ catch (FileIException& invalid_action) // invalid index
+ {} // do nothing, exception expected
+ catch (GroupIException& invalid_action) // invalid index
+ {} // do nothing, exception expected
+
+ /* Test skipping more entries than are in the group */
+ try {
+ info.command = RET_ZERO;
+ idx = NDATASETS + 3;
+ obj_name = root_group.getObjnameByIdx(idx);
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("Group::getObjnameByIdx", "Attempt to iterate with negative index");
+ }
+ catch (FileIException& invalid_action) // invalid index
+ {} // do nothing, exception expected
+ catch (GroupIException& invalid_action) // invalid index
+ {} // do nothing, exception expected
+
+ /* Free the dataset names */
+ for(i = 0; i< (NDATASETS + 2); i++)
+ HDfree(lnames[i]);
+
+ // Everything will be closed as they go out of scope
+
+ PASSED();
+ } // try block
// catch all other exceptions
catch (Exception& E)
{
- issue_fail_msg("test_iter_group", __LINE__, __FILE__);
+ issue_fail_msg("test_iter_group", __LINE__, __FILE__);
}
#if 0
@@ -355,59 +353,59 @@ static void test_iter_group(FileAccPropList& fapl)
/****************************************************************
**
** printelems(): Open an attribute and verify that it has a
-** the correct name
+** the correct name
**
****************************************************************/
-const H5std_string FILE_NAME("titerate.h5");
-const H5std_string GRP_NAME("/Group_A");
-const H5std_string FDATASET_NAME( "file dset" );
-const H5std_string GDATASET_NAME( "group dset" );
-const H5std_string ATTR_NAME( "Units" );
-const H5std_string FATTR_NAME( "F attr" );
-const H5std_string GATTR_NAME( "G attr" );
-const int DIM1 = 2;
+const H5std_string FILE_NAME("titerate.h5");
+const H5std_string GRP_NAME("/Group_A");
+const H5std_string FDATASET_NAME("file dset");
+const H5std_string GDATASET_NAME("group dset");
+const H5std_string ATTR_NAME("Units");
+const H5std_string FATTR_NAME("F attr");
+const H5std_string GATTR_NAME("G attr");
+const int DIM1 = 2;
void printelems(const Group& group, const H5std_string& dsname, const H5std_string& atname)
{
try
{
- DataSet d1(group.openDataSet(dsname));
- DataSpace s1 = d1.getSpace();
- s1.close();
- d1.close();
-
- unsigned idx = 0;
- Attribute a1(group.openAttribute(idx));
- H5std_string aname = a1.getName();
+ DataSet d1(group.openDataSet(dsname));
+ DataSpace s1 = d1.getSpace();
+ s1.close();
+ d1.close();
+
+ unsigned idx = 0;
+ Attribute a1(group.openAttribute(idx));
+ H5std_string aname = a1.getName();
verify_val(aname, atname, "printelems", __LINE__, __FILE__);
- a1.close();
+ a1.close();
}
// catch failure caused by the DataSpace operations
catch( DataSpaceIException error )
{
- error.printError();
+ error.printError();
}
// catch failure caused by the Group operations
catch( GroupIException error )
{
- error.printError();
+ error.printError();
}
// catch failure caused by the DataSet operations
catch( DataSetIException error )
{
- error.printError();
+ error.printError();
}
}
/*-------------------------------------------------------------------------
- * Function: test_HDFFV_9920
+ * Function: test_HDFFV_9920
*
- * Purpose: Tests the fix for HDFFV-9920
+ * Purpose Tests the fix for HDFFV-9920
*
- * Programmer: Binh-Minh Ribler
- * Friday, September 9, 2016
+ * Programmer Binh-Minh Ribler
+ * Friday, September 9, 2016
*
* Modifications:
*
@@ -420,74 +418,74 @@ static void test_HDFFV_9920()
try
{
- // Create a new file and a group in it
- H5File file( FILE_NAME, H5F_ACC_TRUNC );
+ // Create a new file and a group in it
+ H5File file( FILE_NAME, H5F_ACC_TRUNC );
- Group gr1(file.createGroup(GRP_NAME));
+ Group gr1(file.createGroup(GRP_NAME));
- // Create the data space for the attribute.
- DataSpace dspace = DataSpace (1, dims );
+ // Create the data space for the attribute.
+ DataSpace dspace = DataSpace (1, dims );
- DataSet fds = file.createDataSet(FDATASET_NAME, PredType::STD_I32BE, dspace);
- DataSet gds = gr1.createDataSet(GDATASET_NAME, PredType::STD_I32BE, dspace);
+ DataSet fds = file.createDataSet(FDATASET_NAME, PredType::STD_I32BE, dspace);
+ DataSet gds = gr1.createDataSet(GDATASET_NAME, PredType::STD_I32BE, dspace);
- // Create a file attribute and a group attribute.
- Attribute fa1 = file.createAttribute(FATTR_NAME, PredType::STD_I32BE,
- dspace);
- Attribute ga1 = gr1.createAttribute(GATTR_NAME, PredType::STD_I32BE,
- dspace);
+ // Create a file attribute and a group attribute.
+ Attribute fa1 = file.createAttribute(FATTR_NAME, PredType::STD_I32BE,
+ dspace);
+ Attribute ga1 = gr1.createAttribute(GATTR_NAME, PredType::STD_I32BE,
+ dspace);
- // Write the attribute data.
- fa1.write( PredType::NATIVE_INT, attr_data);
- ga1.write( PredType::NATIVE_INT, attr_data);
+ // Write the attribute data.
+ fa1.write( PredType::NATIVE_INT, attr_data);
+ ga1.write( PredType::NATIVE_INT, attr_data);
- fa1.close();
- ga1.close();
- fds.close();
- gds.close();
+ fa1.close();
+ ga1.close();
+ fds.close();
+ gds.close();
- // Verify the attributes have correct names.
- printelems(file, FDATASET_NAME, FATTR_NAME);
- printelems(gr1, GDATASET_NAME, GATTR_NAME);
+ // Verify the attributes have correct names.
+ printelems(file, FDATASET_NAME, FATTR_NAME);
+ printelems(gr1, GDATASET_NAME, GATTR_NAME);
} // end of try block
// catch failure caused by the H5File operations
catch( DataSpaceIException error )
{
- error.printError();
+ error.printError();
}
// catch failure caused by the H5File operations
catch( AttributeIException error )
{
- error.printError();
+ error.printError();
}
// catch failure caused by the H5File operations
catch( FileIException error )
{
- error.printError();
+ error.printError();
}
// catch failure caused by the DataSet operations
catch( DataSetIException error )
{
- error.printError();
+ error.printError();
}
}
/*-------------------------------------------------------------------------
- * Function: test_iterate
+ * Function: test_iterate
*
- * Purpose: Tests iterate functionality
+ * Purpose Tests iterate functionality
*
- * Return: Success: 0
- * Failure: -1
+ * Return Success: 0
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * Tuesday, September 6, 2016
+ * Programmer Binh-Minh Ribler
+ * Tuesday, September 6, 2016
*
* Modifications:
*
@@ -503,20 +501,20 @@ void test_iterate()
FileAccPropList fapl;
fapl.setLibverBounds(H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
- test_iter_group(fapl); // Test iterating groups
- test_HDFFV_9920(); // Test the fix of HDFFV-9920
- //test_iter_attr(fapl); // Test iterating attributes
+ test_iter_group(fapl); // Test iterating groups
+ test_HDFFV_9920(); // Test the fix of HDFFV-9920
+ //test_iter_attr(fapl); // Test iterating attributes
} // test_iterate
/*-------------------------------------------------------------------------
* Function: cleanup_iterate
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: (use C version)
+ * Programmer (use C version)
*
* Modifications:
*
diff --git a/c++/test/tlinks.cpp b/c++/test/tlinks.cpp
index 1f7d14e..dc592b3 100644
--- a/c++/test/tlinks.cpp
+++ b/c++/test/tlinks.cpp
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -43,9 +41,9 @@ using namespace H5;
//#define H5G_TESTING
//#include "h5test.h"
-//#include "H5Gpkg.h" /* Groups */
-//#include "H5Iprivate.h" /* IDs */
-//#include "H5Lprivate.h" /* Links */
+//#include "H5Gpkg.h" /* Groups */
+//#include "H5Iprivate.h" /* IDs */
+//#include "H5Lprivate.h" /* Links */
/* File for external link test. Created with gen_udlinks.c */
#define LINKED_FILE "be_extlink2.h5"
@@ -64,39 +62,39 @@ const char *FILENAME[] = {
"links6", /* 9 */
"links7", /* 10 */
"links8", /* 11 */
- "extlinks0", /* 12: main files */
- "tmp/extlinks0", /* 13: */
- "extlinks1", /* 14: target files */
- "tmp/extlinks1", /* 15: */
- "extlinks2", /* 16: */
- "tmp/extlinks2", /* 17: */
- "extlinks3", /* 18: */
- "tmp/extlinks3", /* 19: */
- "extlinks4", /* 20: */
- "tmp/extlinks4", /* 21: */
- "extlinks5", /* 22: */
- "tmp/extlinks6", /* 23: */
- "extlinks7", /* 24: */
- "tmp/extlinks7", /* 25: */
- "tmp/extlinks8", /* 26: */
- "extlinks9", /* 27: */
- "tmp/extlinks9", /* 28: */
- "extlinks10", /* 29: */ /* TESTS for windows */
- "tmp/extlinks10", /* 30: */
- "tmp/extlinks11", /* 31: */
- "tmp/extlinks12", /* 32: */
- "extlinks13", /* 33: */
- "tmp/extlinks13", /* 34: */
- "tmp/extlinks14", /* 35: */
- "tmp/extlinks15", /* 36: */
- "extlinks16A", /* 37: */ /* TESTS for H5P_set_elink_fapl */
- "extlinks16B", /* 38: */
- "extlinks17", /* 39: */
- "extlinks18A", /* 40: */
- "extlinks18B", /* 41: */
- "extlinks19A", /* 42: */
- "extlinks19B", /* 43: */
- "extlinks20", /* 44: */
+ "extlinks0", /* 12: main files */
+ "tmp/extlinks0", /* 13: */
+ "extlinks1", /* 14: target files */
+ "tmp/extlinks1", /* 15: */
+ "extlinks2", /* 16: */
+ "tmp/extlinks2", /* 17: */
+ "extlinks3", /* 18: */
+ "tmp/extlinks3", /* 19: */
+ "extlinks4", /* 20: */
+ "tmp/extlinks4", /* 21: */
+ "extlinks5", /* 22: */
+ "tmp/extlinks6", /* 23: */
+ "extlinks7", /* 24: */
+ "tmp/extlinks7", /* 25: */
+ "tmp/extlinks8", /* 26: */
+ "extlinks9", /* 27: */
+ "tmp/extlinks9", /* 28: */
+ "extlinks10", /* 29: */ /* TESTS for windows */
+ "tmp/extlinks10", /* 30: */
+ "tmp/extlinks11", /* 31: */
+ "tmp/extlinks12", /* 32: */
+ "extlinks13", /* 33: */
+ "tmp/extlinks13", /* 34: */
+ "tmp/extlinks14", /* 35: */
+ "tmp/extlinks15", /* 36: */
+ "extlinks16A", /* 37: */ /* TESTS for H5P_set_elink_fapl */
+ "extlinks16B", /* 38: */
+ "extlinks17", /* 39: */
+ "extlinks18A", /* 40: */
+ "extlinks18B", /* 41: */
+ "extlinks19A", /* 42: */
+ "extlinks19B", /* 43: */
+ "extlinks20", /* 44: */
NULL
};
@@ -104,12 +102,12 @@ const char *FILENAME[] = {
#define TMPDIR "tmp"
-#define FAMILY_SIZE 1024
+#define FAMILY_SIZE 1024
#define CORE_INCREMENT 1024
-#define NUM400 400
+#define NUM400 400
/* do not do check_all_closed() for "ext*" files and "tmp/ext*" */
-#define EXTSTOP 12
+#define EXTSTOP 12
#define LINK_BUF_SIZE 1024
#define NAME_BUF_SIZE 1024
@@ -142,11 +140,8 @@ const char *FILENAME[] = {
#define H5L_DIM2 100
/* Creation order macros */
-#define CORDER_GROUP_NAME "corder_group"
#define CORDER_SOFT_GROUP_NAME "corder_soft_group"
#define CORDER_NLINKS 18
-#define CORDER_ITER_STOP 3
-#define CORDER_EST_ENTRY_LEN 9
/* Timestamp macros */
#define TIMESTAMP_GROUP_1 "timestamp1"
@@ -323,16 +318,16 @@ static const char *FILENAME[] = {
/*-------------------------------------------------------------------------
- * Function: test_basic_links
+ * Function: test_basic_links
*
- * Purpose: Test building a file with assorted links.
+ * Purpose Test building a file with assorted links.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: -1
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * October 16, 2009
+ * Programmer Binh-Minh Ribler
+ * October 16, 2009
*
* Modifications:
*
@@ -340,114 +335,163 @@ static const char *FILENAME[] = {
*/
static void test_basic_links(hid_t fapl_id, hbool_t new_format)
{
- hsize_t size[1] = {1};
- char filename[NAME_BUF_SIZE];
+ hsize_t size[1] = {1};
+ char filename[NAME_BUF_SIZE];
// Use the file access template id to create a file access prop. list.
FileAccPropList fapl(fapl_id);
try
{
- if(new_format)
- SUBTEST("Link creation (w/new group format)")
- else
- SUBTEST("Link creation")
+ if(new_format)
+ SUBTEST("Link creation (w/new group format)")
+ else
+ SUBTEST("Link creation")
- h5_fixname(FILENAME[0], fapl_id, filename, sizeof filename);
- H5File file(filename, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
+ h5_fixname(FILENAME[0], fapl_id, filename, sizeof filename);
+ H5File file(filename, H5F_ACC_TRUNC, FileCreatPropList::DEFAULT, fapl);
- // Create simple dataspace
- DataSpace scalar (1, size, size);
+ // Create simple dataspace
+ DataSpace scalar (1, size, size);
- // Create a group then close it by letting the object go out of scope
- {
- Group group(file.createGroup("grp1", 0));
- }
+ // Create a group then close it by letting the object go out of scope
+ {
+ Group group(file.createGroup("grp1", 0));
+ }
- // Create a dataset then close it by letting the object go out of scope
- {
- DataSet dset1(file.createDataSet("dset1", PredType::NATIVE_INT, scalar));
- }
+ // Create a dataset then close it by letting the object go out of scope
+ {
+ DataSet dset1(file.createDataSet("dset1", PredType::NATIVE_INT, scalar));
+ }
- hid_t file_id = file.getId();
+ hid_t file_id = file.getId();
- // Because these are not implemented in the C++ API yet, they are
- // used so CommonFG::getLinkval can be tested.
- // Create a hard link
- if(H5Lcreate_hard(
- file_id, "dset1", H5L_SAME_LOC, "grp1/hard1",
- H5P_DEFAULT, H5P_DEFAULT) < 0)
- throw Exception("test_basic_links", "H5Lcreate_hard failed");
+ // Because these are not implemented in the C++ API yet, they are
+ // used so CommonFG::getLinkval can be tested.
+ // Create a hard link
+ if(H5Lcreate_hard(
+ file_id, "dset1", H5L_SAME_LOC, "grp1/hard1",
+ H5P_DEFAULT, H5P_DEFAULT) < 0)
+ throw Exception("test_basic_links", "H5Lcreate_hard failed");
- // Create a symbolic link
- if(H5Lcreate_soft(
- "/dset1", file_id, "grp1/soft", H5P_DEFAULT, H5P_DEFAULT) < 0)
- throw Exception("test_basic_links", "H5Lcreate_soft failed");
+ // Create a symbolic link
+ if(H5Lcreate_soft(
+ "/dset1", file_id, "grp1/soft", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ throw Exception("test_basic_links", "H5Lcreate_soft failed");
- // Create a symbolic link to something that doesn't exist
- if(H5Lcreate_soft(
- "foobar", file_id, "grp1/dangle", H5P_DEFAULT, H5P_DEFAULT) < 0)
- throw Exception("test_basic_links", "H5Lcreate_soft failed");
+ // Create a symbolic link to something that doesn't exist
+ if(H5Lcreate_soft(
+ "foobar", file_id, "grp1/dangle", H5P_DEFAULT, H5P_DEFAULT) < 0)
+ throw Exception("test_basic_links", "H5Lcreate_soft failed");
- // Create a recursive symbolic link
- if(H5Lcreate_soft(
- "/grp1/recursive", file_id, "/grp1/recursive",
- H5P_DEFAULT, H5P_DEFAULT) < 0)
- throw Exception("test_basic_links", "H5Lcreate_soft failed");
+ // Create a recursive symbolic link
+ if(H5Lcreate_soft(
+ "/grp1/recursive", file_id, "/grp1/recursive",
+ H5P_DEFAULT, H5P_DEFAULT) < 0)
+ throw Exception("test_basic_links", "H5Lcreate_soft failed");
- // Verify link values before closing the file
+ // Verify link values before closing the file
- H5std_string softlink_val = file.getLinkval("grp1/soft");
- verify_val(softlink_val, "/dset1", "H5File::getLinkval grp1/soft", __LINE__, __FILE__);
+ H5std_string softlink_val = file.getLinkval("grp1/soft");
+ verify_val(softlink_val, "/dset1", "H5File::getLinkval grp1/soft", __LINE__, __FILE__);
- H5std_string dngllink_val = file.getLinkval("grp1/dangle");
- verify_val(dngllink_val, "foobar", "H5File::getLinkval grp1/dangle", __LINE__, __FILE__);
+ H5std_string dngllink_val = file.getLinkval("grp1/dangle");
+ verify_val(dngllink_val, "foobar", "H5File::getLinkval grp1/dangle", __LINE__, __FILE__);
- H5std_string reclink_val = file.getLinkval("grp1/recursive");
- verify_val(reclink_val, "/grp1/recursive", "H5File::getLinkval grp1/recursive", __LINE__, __FILE__);
+ H5std_string reclink_val = file.getLinkval("grp1/recursive");
+ verify_val(reclink_val, "/grp1/recursive", "H5File::getLinkval grp1/recursive", __LINE__, __FILE__);
} // end of try block
catch (Exception& E)
{
- issue_fail_msg("test_basic_links()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_basic_links()", __LINE__, __FILE__, E.getCDetailMsg());
}
// Open the file and check on the links in it
try
{
- // Open the file above
- H5File file(filename, H5F_ACC_RDWR, FileCreatPropList::DEFAULT, fapl);
+ // Open the file above
+ H5File file(filename, H5F_ACC_RDWR, FileCreatPropList::DEFAULT, fapl);
- // Verify link existence
- if(H5Lexists(file.getId(), "dset1", H5P_DEFAULT) != TRUE)
- throw InvalidActionException("H5Lexists", "dset1 doesn't exist");
- if(H5Lexists(file.getId(), "grp1/soft", H5P_DEFAULT) != TRUE)
- throw InvalidActionException("H5Lexists", "grp1/soft doesn't exist");
+ // Verify link existence
+ if(file.exists("dset1", LinkAccPropList::DEFAULT) != TRUE)
+ throw InvalidActionException("H5File::exists", "dset1 doesn't exist");
+ if(file.exists("grp1/soft", LinkAccPropList::DEFAULT) != TRUE)
+ throw InvalidActionException("H5File::exists", "grp1/soft doesn't exist");
- // Verify link values
- H5std_string softlink_val = file.getLinkval("grp1/soft");
- verify_val(softlink_val, "/dset1", "H5File::getLinkval grp1/soft", __LINE__, __FILE__);
+ // Verify link values
+ H5std_string softlink_val = file.getLinkval("grp1/soft");
+ verify_val(softlink_val, "/dset1", "H5File::getLinkval grp1/soft", __LINE__, __FILE__);
- H5std_string reclink_val = file.getLinkval("grp1/recursive");
- verify_val(reclink_val, "/grp1/recursive", "H5File::getLinkval grp1/recursive", __LINE__, __FILE__);
+ H5std_string reclink_val = file.getLinkval("grp1/recursive");
+ verify_val(reclink_val, "/grp1/recursive", "H5File::getLinkval grp1/recursive", __LINE__, __FILE__);
- PASSED();
+ PASSED();
} // end of try block
catch (Exception& E)
{
- issue_fail_msg("test_basic_links()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_basic_links()", __LINE__, __FILE__, E.getCDetailMsg());
}
}
/*-------------------------------------------------------------------------
- * Function: test_links
+ * Function: test_num_links
+ *
+ * Purpose Test setting and getting limit of number of links
*
- * Purpose: Test links
+ * Return Success: 0
*
- * Return: None
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
+ * Programmer Binh-Minh Ribler
+ * October 16, 2009
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void test_num_links(hid_t fapl_id, hbool_t new_format)
+{
+ char filename[NAME_BUF_SIZE];
+
+ if(new_format)
+ SUBTEST("Setting number of links (w/new group format)")
+ else
+ SUBTEST("Setting number of links")
+
+ try
+ {
+ // Use the file access template id to create a file access prop. list.
+ FileAccPropList fapl(fapl_id);
+
+ h5_fixname(FILENAME[0], fapl_id, filename, sizeof filename);
+ H5File file(filename, H5F_ACC_RDWR, FileCreatPropList::DEFAULT, fapl);
+
+ LinkAccPropList lapl;
+ size_t nlinks = 5;
+ lapl.setNumLinks(nlinks);
+
+ // Read it back and verify
+ size_t read_nlinks = lapl.getNumLinks();
+ verify_val(read_nlinks, nlinks, "LinkAccPropList::setNumLinks", __LINE__, __FILE__);
+
+ PASSED();
+ } // end of try block
+ catch (Exception& E)
+ {
+ issue_fail_msg("test_num_links()", __LINE__, __FILE__, E.getCDetailMsg());
+ }
+} // test_num_links
+
+/*-------------------------------------------------------------------------
+ * Function: test_links
+ *
+ * Purpose Test links
+ *
+ * Return None
+ *
+ * Programmer Binh-Minh Ribler
* October 16, 2009
*
*-------------------------------------------------------------------------
@@ -455,7 +499,7 @@ static void test_basic_links(hid_t fapl_id, hbool_t new_format)
extern "C"
void test_links()
{
- hid_t fapl_id, fapl2_id; /* File access property lists */
+ hid_t fapl_id, fapl2_id; /* File access property lists */
unsigned new_format; /* Whether to use the new format or not */
const char *envval;
@@ -469,40 +513,40 @@ void test_links()
MESSAGE(5, ("Testing Various Links\n"));
try
{
- /* Copy the file access property list */
- if((fapl2_id = H5Pcopy(fapl_id)) < 0)
- throw Exception("test_links", "H5Pcopy failed");
-
- /* Set the "use the latest version of the format" bounds for creating objects in the file */
- if(H5Pset_libver_bounds(fapl2_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
- throw Exception("test_links", "H5Pset_libver_bounds failed");
-
- /* Loop over using new group format */
- for(new_format = FALSE; new_format <= TRUE; new_format++)
- {
- hid_t my_fapl_id;
-
- /* Check for FAPL to use */
- if(new_format)
- my_fapl_id = fapl2_id;
- else
- my_fapl_id = fapl_id;
-
- /* General tests... (on both old & new format groups */
- // FileAccPropList may be passed in instead of fapl id
- test_basic_links(my_fapl_id, new_format);
+ /* Copy the file access property list */
+ if((fapl2_id = H5Pcopy(fapl_id)) < 0)
+ throw Exception("test_links", "H5Pcopy failed");
+
+ /* Set the "use the latest version of the format" bounds for creating objects in the file */
+ if(H5Pset_libver_bounds(fapl2_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ throw Exception("test_links", "H5Pset_libver_bounds failed");
+
+ /* Loop over using new group format */
+ for(new_format = FALSE; new_format <= TRUE; new_format++)
+ {
+ hid_t my_fapl_id;
+
+ /* Check for FAPL to use */
+ if(new_format)
+ my_fapl_id = fapl2_id;
+ else
+ my_fapl_id = fapl_id;
+
+ /* General tests... (on both old & new format groups */
+ // FileAccPropList may be passed in instead of fapl id
+ test_basic_links(my_fapl_id, new_format);
#if 0
// these tests are from the C test links.c and left here for future
// implementation of H5L API
- nerrors += test_basic_links(fapl_id, new_format) < 0 ? 1 : 0;
- nerrors += cklinks(my_fapl, new_format) < 0 ? 1 : 0;
- nerrors += new_links(my_fapl, new_format) < 0 ? 1 : 0;
- nerrors += ck_new_links(my_fapl, new_format) < 0 ? 1 : 0;
- nerrors += long_links(my_fapl, new_format) < 0 ? 1 : 0;
- nerrors += toomany(my_fapl, new_format) < 0 ? 1 : 0;
-
- /* Test new H5L link creation routine */
- nerrors += test_lcpl(my_fapl, new_format);
+ nerrors += test_basic_links(fapl_id, new_format) < 0 ? 1 : 0;
+ nerrors += cklinks(my_fapl, new_format) < 0 ? 1 : 0;
+ nerrors += new_links(my_fapl, new_format) < 0 ? 1 : 0;
+ nerrors += ck_new_links(my_fapl, new_format) < 0 ? 1 : 0;
+ nerrors += long_links(my_fapl, new_format) < 0 ? 1 : 0;
+ nerrors += toomany(my_fapl, new_format) < 0 ? 1 : 0;
+
+ /* Test new H5L link creation routine */
+ nerrors += test_lcpl(my_fapl, new_format);
nerrors += test_move(my_fapl, new_format);
nerrors += test_copy(my_fapl, new_format);
nerrors += test_move_preserves(my_fapl, new_format);
@@ -579,7 +623,7 @@ void test_links()
/* do not do this for files used by external link tests */
nerrors += check_all_closed(my_fapl, new_format, EXTSTOP) < 0 ? 1 : 0;
#endif // 0
- } /* end for */
+ } /* end for */
#if 0
/* New group revision feature tests */
@@ -609,33 +653,33 @@ void test_links()
nerrors += group_info_old(fapl) < 0 ? 1 : 0;
#endif
- /* Close 2nd FAPL */
- H5Pclose(fapl2_id);
+ /* Close 2nd FAPL */
+ H5Pclose(fapl2_id);
- h5_clean_files(FILENAME, fapl_id);
+ h5_clean_files(FILENAME, fapl_id);
- /* Test that external links can be used after a library reset. MUST be
- * called last so the reset doesn't interfere with the property lists. This
- * routine will delete its own file. */
- /* nerrors += external_reset_register() < 0 ? 1 : 0;
+ /* Test that external links can be used after a library reset. MUST be
+ * called last so the reset doesn't interfere with the property lists. This
+ * routine will delete its own file. */
+ /* nerrors += external_reset_register() < 0 ? 1 : 0;
*/
}
catch (Exception& E)
{
- issue_fail_msg("test_links()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_links()", __LINE__, __FILE__, E.getCDetailMsg());
}
}
/*-------------------------------------------------------------------------
- * Function: cleanup_links
+ * Function: cleanup_links
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: Binh-Minh Ribler
- * October 16, 2009
+ * Programmer Binh-Minh Ribler
+ * October 16, 2009
*
* Modifications:
*
diff --git a/c++/test/tobject.cpp b/c++/test/tobject.cpp
index b8654c3..d9f4075 100644
--- a/c++/test/tobject.cpp
+++ b/c++/test/tobject.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -23,8 +21,6 @@
#else
#include <iostream>
#endif
-using std::cerr;
-using std::endl;
#include <string>
#include "H5Cpp.h" // C++ API header file
@@ -33,31 +29,42 @@ using namespace H5;
#include "h5test.h"
#include "h5cpputil.h" // C++ utilility header file
-const H5std_string FILE_OBJECTS("tobjects.h5");
-const H5std_string GROUP1("Top Group");
-const H5std_string GROUP1_PATH("/Top Group");
-const H5std_string GROUP1_1("Sub-Group 1.1");
-const H5std_string GROUP1_1_PATH("/Top Group/Sub-Group 1.1");
-const H5std_string GROUP1_2("Sub-Group 1.2");
-const H5std_string GROUP1_2_PATH("/Top Group/Sub-Group 1.2");
-const H5std_string DSET_DEFAULT_NAME("default");
-const H5std_string DSET_IN_FILE("Dataset in File");
-const H5std_string DSET_IN_FILE_PATH("/Dataset in File");
-const H5std_string DSET_IN_GRP1("Dataset in Group 1");
-const H5std_string DSET_IN_GRP1_PATH("/Top Group/Dataset in Group 1");
-const H5std_string DSET_IN_GRP1_2("Dataset in Group 1.2");
-const H5std_string DSET_IN_GRP1_2_PATH("/Top Group/Sub-Group 1.2/Dataset in Group 1.2");
+const H5std_string FILE_OBJECTS("tobjects.h5");
+const H5std_string FILE_OBJHDR("tobject_header.h5");
+const H5std_string GROUP1("Top Group");
+const H5std_string GROUP1_PATH("/Top Group");
+const H5std_string GROUP1_1("Sub-Group 1.1");
+const H5std_string GROUP1_1_PATH("/Top Group/Sub-Group 1.1");
+const H5std_string GROUP1_2("Sub-Group 1.2");
+const H5std_string GROUP1_2_PATH("/Top Group/Sub-Group 1.2");
+const H5std_string DSET_DEFAULT_NAME("default");
+const H5std_string DSET_IN_FILE("Dataset in File");
+const H5std_string DSET_IN_FILE_PATH("/Dataset in File");
+const H5std_string DSET_IN_GRP1("Dataset_in_Group_1");
+const H5std_string DSET_IN_GRP1_PATH("/Top Group/Dataset_in_Group_1");
+const H5std_string DSET_IN_GRP1_2("Dataset_in_Group_1.2");
+const H5std_string DSET_IN_GRP1_2_PATH("/Top Group/Sub-Group 1.2/Dataset_in_Group_1.2");
/*-------------------------------------------------------------------------
- * Function: test_get_objname
+ * Function: test_get_objname
*
- * Purpose: Tests getting object name of groups and datasets.
+ * Purpose Tests getting object name of groups and datasets.
*
- * Return: Success: 0
- * Failure: -1
+ * Description:
+ * File structure:
+ * GROUP1
+ * GROUP1_1
+ * GROUP1_2
+ * DSET_IN_GRP1_2
+ * DSET_IN_GRP1
+ * DSET_IN_FILE
*
- * Programmer: Binh-Minh Ribler
- * Friday, March 4, 2014
+ *
+ * Return Success: 0
+ * Failure: -1
+ *
+ * Programmer Binh-Minh Ribler
+ * Friday, March 4, 2014
*
* Modifications:
*
@@ -68,94 +75,170 @@ static void test_get_objname()
SUBTEST("H5Object::getObjName on Groups and Datasets");
try {
- // Create file
- H5File file(FILE_OBJECTS, H5F_ACC_TRUNC);
+ // Create file
+ H5File file(FILE_OBJECTS, H5F_ACC_TRUNC);
- // Create a top group and 2 subgroups
- Group grp1 = file.createGroup(GROUP1, 0);
- Group grp1_1 = grp1.createGroup(GROUP1_1, 0);
- Group grp1_2 = grp1.createGroup(GROUP1_2, 0);
+ // Create a top group and 2 subgroups
+ Group grp1 = file.createGroup(GROUP1, 0);
+ Group grp1_1 = grp1.createGroup(GROUP1_1, 0);
+ Group grp1_2 = grp1.createGroup(GROUP1_2, 0);
- // Get part of the group's name, random length using
- // ssize_t getObjName(char* comment, size_t buf_size)
+ // Get part of the group's name, random length using
+ // ssize_t getObjName(char* comment, size_t buf_size)
- // Get the length of the group's name first
- ssize_t name_len = grp1.getObjName(NULL);
+ // Get the length of the group's name first
+ ssize_t name_len = grp1.getObjName(NULL);
- // Random length is 4
- if (name_len > 4)
- {
- char* grp1_name = new char[5];
- name_len = grp1.getObjName(grp1_name, 5);
- verify_val((const char*)grp1_name, "/Top", "Group::getObjName", __LINE__, __FILE__);
- delete []grp1_name;
- }
+ // Random length is 4
+ if (name_len > 4)
+ {
+ char* grp1_name = new char[5];
+ name_len = grp1.getObjName(grp1_name, 5);
+ verify_val((const char*)grp1_name, "/Top", "Group::getObjName", __LINE__, __FILE__);
+ delete []grp1_name;
+ }
- // Create a data space
- hsize_t dims[2];
- dims[0] = 2;
- dims[1] = 5;
- DataSpace space (2, dims, NULL);
+ // Create a data space
+ hsize_t dims[2];
+ dims[0] = 2;
+ dims[1] = 5;
+ DataSpace space (2, dims, NULL);
- // Create a dataset in the file
- DataSet dsinfile = file.createDataSet(DSET_IN_FILE,
- PredType::NATIVE_DOUBLE, space);
+ // Create a dataset in the file
+ DataSet dsinfile = file.createDataSet(DSET_IN_FILE,
+ PredType::NATIVE_DOUBLE, space);
- // Create a dataset in the group
- DataSet dsingrp = grp1.createDataSet(DSET_IN_GRP1,
- PredType::NATIVE_INT, space);
+ // Create a dataset in the group
+ DataSet dsingrp = grp1.createDataSet(DSET_IN_GRP1,
+ PredType::NATIVE_INT, space);
- // Get and verify the name of each dataset, using
- // H5std_string getObjName() and
- // ssize_t getObjName(H5std_string& obj_name, size_t len = 0)
- H5std_string ds_name = dsinfile.getObjName();
- verify_val(ds_name, DSET_IN_FILE_PATH, "DataSet::getObjName", __LINE__, __FILE__);
+ // Get and verify the name of each dataset, using
+ // H5std_string getObjName() and
+ // ssize_t getObjName(H5std_string& obj_name, size_t len = 0)
+ H5std_string ds_name = dsinfile.getObjName();
+ verify_val(ds_name, DSET_IN_FILE_PATH, "DataSet::getObjName", __LINE__, __FILE__);
- name_len = dsingrp.getObjName(ds_name); // default len
- verify_val(ds_name, DSET_IN_GRP1_PATH, "DataSet::getObjName", __LINE__, __FILE__);
+ name_len = dsingrp.getObjName(ds_name); // default len
+ verify_val(ds_name, DSET_IN_GRP1_PATH, "DataSet::getObjName", __LINE__, __FILE__);
- // Close dataset
- dsingrp.close();
+ // Close dataset
+ dsingrp.close();
- // Create a dataset in sub-group 1.2
- dsingrp = grp1_2.createDataSet(DSET_IN_GRP1_2, PredType::NATIVE_INT, space);
+ // Create a dataset in sub-group 1.2
+ dsingrp = grp1_2.createDataSet(DSET_IN_GRP1_2, PredType::NATIVE_INT, space);
- // Get and verify the name of the dataset that belongs to subgroup
- // 1.2, using H5std_string getObjName()
- ds_name = dsingrp.getObjName();
- verify_val(ds_name, DSET_IN_GRP1_2_PATH, "DataSet::getObjName", __LINE__, __FILE__);
+ // Get and verify the name of the dataset that belongs to subgroup
+ // 1.2, using H5std_string getObjName()
+ ds_name = dsingrp.getObjName();
+ verify_val(ds_name, DSET_IN_GRP1_2_PATH, "DataSet::getObjName", __LINE__, __FILE__);
- // Close dataset
- dsingrp.close();
+ // Close dataset
+ dsingrp.close();
- // Reopen that same dataset then check the name again with another
- // overload: ssize_t getObjName(H5std_string& obj_name, size_t len = 0)
- dsingrp = grp1_2.openDataSet(DSET_IN_GRP1_2);
- name_len = dsingrp.getObjName(ds_name);
- verify_val(ds_name, DSET_IN_GRP1_2_PATH, "DataSet::getObjName", __LINE__, __FILE__);
+ // Reopen that same dataset then check the name again with another
+ // overload: ssize_t getObjName(H5std_string& obj_name, size_t len = 0)
+ dsingrp = grp1_2.openDataSet(DSET_IN_GRP1_2);
+ name_len = dsingrp.getObjName(ds_name);
+ verify_val(ds_name, DSET_IN_GRP1_2_PATH, "DataSet::getObjName", __LINE__, __FILE__);
- // Everything will be closed as they go out of scope
+ // Everything will be closed as they go out of scope
- PASSED();
- } // try block
+ PASSED();
+ } // try block
// catch all other exceptions
catch (Exception& E)
{
- issue_fail_msg("test_get_objname", __LINE__, __FILE__);
+ issue_fail_msg("test_get_objname", __LINE__, __FILE__);
}
} // test_get_objname
/*-------------------------------------------------------------------------
- * Function: test_get_objname_ontypes
+ * Function: test_existance
+ *
+ * Purpose Tests getting object name of groups and datasets.
+ *
+ * Description:
+ * File structure:
+ * GROUP1
+ * GROUP1_1
+ * GROUP1_2
+ * DSET_IN_GRP1_2
+ * DSET_IN_GRP1
+ * DSET_IN_FILE
*
- * Purpose: Test getting object name from various committed types.
*
- * Return: Success: 0
- * Failure: -1
+ * Return Success: 0
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * March 4, 2014
+ * Programmer Binh-Minh Ribler
+ * Friday, March 4, 2014
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+static void test_existance()
+{
+ SUBTEST("H5File::exists and Group::exists");
+
+ try {
+ // Open file
+ H5File file(FILE_OBJECTS, H5F_ACC_RDONLY);
+
+ // Check if GROUP1 exists in the file
+ bool exists = file.exists(GROUP1);
+
+ // Open GROUP1
+ Group grp1 = file.openGroup(GROUP1);
+
+ // Check if GROUP1_1 and GROUP1_2 exist in GROUP1
+ exists = grp1.exists(GROUP1_1);
+ verify_val(exists, TRUE, "Group::exists GROUP1_1", __LINE__, __FILE__);
+ exists = grp1.exists(GROUP1_2);
+ verify_val(exists, TRUE, "Group::exists GROUP1_2", __LINE__, __FILE__);
+
+ // Check if DSET_IN_GRP1 exists in GROUP1
+ exists = grp1.exists(DSET_IN_GRP1);
+ verify_val(exists, TRUE, "Group::exists DSET_IN_GRP1", __LINE__, __FILE__);
+
+ // Open GROUP1_2
+ Group grp1_2 = grp1.openGroup(GROUP1_2);
+
+ // Check if DSET_IN_GRP1_2 exists in GROUP1_2
+ exists = grp1_2.exists(DSET_IN_GRP1_2);
+ verify_val(exists, TRUE, "Group::exists DSET_IN_GRP1_2", __LINE__, __FILE__);
+
+ // Check if a dataset exists given dataset as location with full path name
+ DataSet dset1 = file.openDataSet(DSET_IN_FILE);
+ exists = dset1.exists("/Top Group/Dataset_in_Group_1");
+ verify_val(exists, TRUE, "Group::exists given dataset with full path name", __LINE__, __FILE__);
+
+ exists = grp1_2.exists(DSET_IN_GRP1);
+ verify_val(exists, FALSE, "Group::exists DSET_IN_GRP1", __LINE__, __FILE__);
+
+ // Everything will be closed as they go out of scope
+
+ PASSED();
+ } // try block
+
+ // catch all other exceptions
+ catch (Exception& E)
+ {
+ issue_fail_msg("test_existance", __LINE__, __FILE__);
+ }
+} // test_existance
+
+/*-------------------------------------------------------------------------
+ * Function: test_get_objname_ontypes
+ *
+ * Purpose Test getting object name from various committed types.
+ *
+ * Return Success: 0
+ * Failure: -1
+ *
+ * Programmer Binh-Minh Ribler
+ * March 4, 2014
*
* Modifications:
*
@@ -166,81 +249,97 @@ static void test_get_objname_ontypes()
SUBTEST("H5Object::getObjName on Committed Datatypes");
try {
- // Create a file with default prop lists
- H5File file(FILE_OBJECTS, H5F_ACC_RDWR);
-
- // Create a group
- Group grp = file.createGroup ("typetests");
-
- // Create a datatype and save it
- IntType inttype(PredType::STD_B8LE);
- inttype.commit(file, "INT type of STD_B8LE");
-
- // Close the type then open it again to test getting its name
- inttype.close();
- inttype = file.openIntType("INT type of STD_B8LE");
-
- // Get and verify its name
- H5std_string inttype_name = inttype.getObjName();
- verify_val(inttype_name, "/INT type of STD_B8LE", "DataType::getObjName", __LINE__, __FILE__);
-
- // Make copy of a predefined type and save it
- DataType dtype(PredType::STD_B8LE);
- dtype.commit(file, "STD_B8LE");
-
- // Close the data type and file
- dtype.close();
- file.close();
-
- // Re-open the file and the data type to test getting its name
- file.openFile(FILE_OBJECTS, H5F_ACC_RDWR);
- dtype = file.openDataType("STD_B8LE");
-
- // Get and verify its name
- H5std_string type_name = dtype.getObjName();
- verify_val(type_name, "/STD_B8LE", "DataType::getObjName", __LINE__, __FILE__);
-
- // Test getting type's name from copied type
- DataType copied_type;
- copied_type.copy(dtype);
- copied_type.commit(file, "copy of STD_B8LE");
- type_name = copied_type.getObjName();
- verify_val(type_name, "/copy of STD_B8LE", "DataType::getObjName", __LINE__, __FILE__);
-
- // Test copying an integer predefined type
- IntType new_int_type(PredType::NATIVE_INT);
-
- // Name this datatype
- new_int_type.commit(grp, "IntType NATIVE_INT");
- ssize_t name_len = new_int_type.getObjName(type_name); // default len
- verify_val(name_len, (ssize_t)HDstrlen("/typetests/IntType NATIVE_INT"), "DataType::getObjName", __LINE__, __FILE__);
- verify_val(type_name, "/typetests/IntType NATIVE_INT", "DataType::getObjName", __LINE__, __FILE__);
-
- // Close everything or they can be closed when objects go out of scope
- dtype.close();
- copied_type.close();
- new_int_type.close();
- grp.close();
-
- PASSED();
+ // Create a file with default prop lists
+ H5File file(FILE_OBJECTS, H5F_ACC_RDWR);
+
+ // Create a group
+ Group grp = file.createGroup ("typetests");
+
+ // Create a datatype and save it
+ IntType inttype(PredType::STD_B8LE);
+ inttype.commit(file, "INT type of STD_B8LE");
+
+ // Close the type then open it again to test getting its name
+ inttype.close();
+ inttype = file.openIntType("INT type of STD_B8LE"); // deprecated
+
+ // Get and verify its name
+ H5std_string inttype_name = inttype.getObjName();
+ verify_val(inttype_name, "/INT type of STD_B8LE", "DataType::getObjName", __LINE__, __FILE__);
+
+ // Close the type then open it again to test getting its name, but
+ // with the constructor this time
+ inttype.close();
+ IntType std_b8le(file, "INT type of STD_B8LE");
+
+ // Get and verify its name
+ H5std_string std_b8le_name = std_b8le.getObjName();
+ verify_val(std_b8le_name, "/INT type of STD_B8LE", "DataType::getObjName", __LINE__, __FILE__);
+
+ // Make copy of a predefined type and save it
+ DataType dtype(PredType::STD_B8LE);
+ dtype.commit(file, "STD_B8LE");
+
+ // Close the data type and file
+ dtype.close();
+ file.close();
+
+ // Re-open the file and the data type to test getting its name
+ file.openFile(FILE_OBJECTS, H5F_ACC_RDWR);
+ dtype = file.openDataType("STD_B8LE"); // deprecated
+
+ // Get and verify its name
+ H5std_string type_name = dtype.getObjName();
+ verify_val(type_name, "/STD_B8LE", "DataType::getObjName", __LINE__, __FILE__);
+
+ // Close the type and open it again with the constructor then test
+ // getting its name
+ dtype.close();
+ DataType dtype2(file, "STD_B8LE");
+ type_name = dtype2.getObjName();
+ verify_val(type_name, "/STD_B8LE", "DataType::getObjName", __LINE__, __FILE__);
+
+ // Test getting type's name from copied type
+ DataType copied_type;
+ copied_type.copy(dtype2);
+ copied_type.commit(file, "copy of STD_B8LE");
+ type_name = copied_type.getObjName();
+ verify_val(type_name, "/copy of STD_B8LE", "DataType::getObjName", __LINE__, __FILE__);
+
+ // Test copying an integer predefined type
+ IntType new_int_type(PredType::NATIVE_INT);
+
+ // Name this datatype
+ new_int_type.commit(grp, "IntType NATIVE_INT");
+ ssize_t name_len = new_int_type.getObjName(type_name); // default len
+ verify_val(name_len, (ssize_t)HDstrlen("/typetests/IntType NATIVE_INT"), "DataType::getObjName", __LINE__, __FILE__);
+ verify_val(type_name, "/typetests/IntType NATIVE_INT", "DataType::getObjName", __LINE__, __FILE__);
+
+ // Close everything or they can be closed when objects go out of scope
+ dtype2.close();
+ copied_type.close();
+ new_int_type.close();
+ grp.close();
+
+ PASSED();
} // end top try block
catch (Exception& E)
{
- issue_fail_msg("test_get_objname_ontypes", __LINE__, __FILE__);
+ issue_fail_msg("test_get_objname_ontypes", __LINE__, __FILE__);
}
} // test_get_objname_ontypes
/*-------------------------------------------------------------------------
- * Function: test_get_objtype
+ * Function: test_get_objtype
*
- * Purpose: Tests getting object type
+ * Purpose Tests getting object type
*
- * Return: Success: 0
- * Failure: -1
+ * Return Success: 0
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * Friday, March 4, 2014
+ * Programmer Binh-Minh Ribler
+ * Friday, March 4, 2014
*
* Modifications:
*
@@ -251,61 +350,180 @@ static void test_get_objtype()
SUBTEST("H5File::childObjType and H5Group::childObjType");
try {
- // Open file
- H5File file(FILE_OBJECTS, H5F_ACC_RDWR);
+ // Open file
+ H5File file(FILE_OBJECTS, H5F_ACC_RDWR);
- // Open the top group
- Group grp1 = file.openGroup(GROUP1);
+ // Open the top group
+ Group grp1 = file.openGroup(GROUP1);
- // Create a datatype and save it
- DataType dtype(PredType::STD_I32LE);
- dtype.commit(grp1, "STD_I32LE");
+ // Create a datatype and save it
+ DataType dtype(PredType::STD_I32LE);
+ dtype.commit(grp1, "STD_I32LE");
- // Get and verify object type with
- // H5O_type_t childObjType(const H5std_string& objname)
- H5O_type_t objtype = file.childObjType(DSET_IN_FILE);
- verify_val(objtype, H5O_TYPE_DATASET, "DataSet::childObjType", __LINE__, __FILE__);
+ // Get and verify object type with
+ // H5O_type_t childObjType(const H5std_string& objname)
+ H5O_type_t objtype = file.childObjType(DSET_IN_FILE);
+ verify_val(objtype, H5O_TYPE_DATASET, "DataSet::childObjType", __LINE__, __FILE__);
- // Get and verify object type with
- // H5O_type_t childObjType(const char* objname)
- objtype = grp1.childObjType(GROUP1_1.c_str());
- verify_val(objtype, H5O_TYPE_GROUP, "DataSet::childObjType", __LINE__, __FILE__);
+ // Get and verify object type with
+ // H5O_type_t childObjType(const char* objname)
+ objtype = grp1.childObjType(GROUP1_1.c_str());
+ verify_val(objtype, H5O_TYPE_GROUP, "DataSet::childObjType", __LINE__, __FILE__);
- // Get and verify object type with
- // H5O_type_t childObjType(hsize_t index, H5_index_t index_type,
- // H5_iter_order_t order, const char* objname=".")
- objtype = grp1.childObjType((hsize_t)1, H5_INDEX_NAME, H5_ITER_INC);
- verify_val(objtype, H5O_TYPE_NAMED_DATATYPE, "DataSet::childObjType", __LINE__, __FILE__);
+ // Get and verify object type with
+ // H5O_type_t childObjType(hsize_t index, H5_index_t index_type,
+ // H5_iter_order_t order, const char* objname=".")
+ objtype = grp1.childObjType((hsize_t)1, H5_INDEX_NAME, H5_ITER_INC);
+ verify_val(objtype, H5O_TYPE_NAMED_DATATYPE, "DataSet::childObjType", __LINE__, __FILE__);
- // Get and verify object type with
- // H5O_type_t childObjType(hsize_t index,
- // H5_index_t index_type=H5_INDEX_NAME,
- // H5_iter_order_t order=H5_ITER_INC, const char* objname=".")
- objtype = grp1.childObjType((hsize_t)2);
- verify_val(objtype, H5O_TYPE_GROUP, "DataSet::childObjType", __LINE__, __FILE__);
+ // Get and verify object type with
+ // H5O_type_t childObjType(hsize_t index,
+ // H5_index_t index_type=H5_INDEX_NAME,
+ // H5_iter_order_t order=H5_ITER_INC, const char* objname=".")
+ objtype = grp1.childObjType((hsize_t)2);
+ verify_val(objtype, H5O_TYPE_GROUP, "DataSet::childObjType", __LINE__, __FILE__);
- // Everything will be closed as they go out of scope
+ // Everything will be closed as they go out of scope
- PASSED();
- } // try block
+ PASSED();
+ } // try block
// catch all other exceptions
catch (Exception& E)
{
- issue_fail_msg("test_get_objtype", __LINE__, __FILE__);
+ issue_fail_msg("test_get_objtype", __LINE__, __FILE__);
}
} // test_get_objtype
+
+/*-------------------------------------------------------------------------
+ * Function: test_open_object_header
+ *
+ * Purpose Test Group::getObjId function.
+ *
+ * Return None
+ *
+ * Programmer Binh-Minh Ribler (use C version)
+ * March, 2017
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+const H5std_string GROUPNAME("group");
+const H5std_string DTYPENAME("group/datatype");
+const H5std_string DTYPENAME_INGRP("datatype");
+const H5std_string DSETNAME("dataset");
+#define RANK 2
+#define DIM0 5
+#define DIM1 10
+static void test_open_object_header()
+{
+ hsize_t dims[2];
+ H5G_info_t ginfo; /* Group info struct */
+ // Output message about test being performed
+ SUBTEST("Group::getObjId");
+
+ try {
+ // Create file FILE1
+ H5File file1(FILE_OBJHDR, H5F_ACC_TRUNC);
+ /* Create a group, dataset, and committed datatype within the file */
+
+ // Create a group in the root group
+ Group grp(file1.createGroup(GROUPNAME));
+ grp.close();
+
+ // Commit the type inside the file
+ IntType dtype(PredType::NATIVE_INT);
+ dtype.commit(file1, DTYPENAME);
+ dtype.close();
+
+ // Create a new dataset
+ dims[0] = DIM0;
+ dims[1] = DIM1;
+ DataSpace dspace(RANK, dims);
+ DataSet dset(file1.createDataSet(DSETNAME, PredType::NATIVE_INT, dspace));
+
+ // Close dataset and dataspace
+ dset.close();
+ dspace.close();
+
+ // Now make sure that getObjId can open all three types of objects
+ hid_t obj_grp = file1.getObjId(GROUPNAME);
+ hid_t obj_dtype = file1.getObjId(DTYPENAME);
+ hid_t obj_dset = file1.getObjId(DSETNAME);
+
+ // Make sure that each is the right kind of ID
+ H5I_type_t id_type = IdComponent::getHDFObjType(obj_grp);
+ verify_val(id_type, H5I_GROUP, "H5Iget_type for group ID", __LINE__, __FILE__);
+ id_type = IdComponent::getHDFObjType(obj_dtype);
+ verify_val(id_type, H5I_DATATYPE, "H5Iget_type for datatype ID", __LINE__, __FILE__);
+ id_type = IdComponent::getHDFObjType(obj_dset);
+ verify_val(id_type, H5I_DATASET, "H5Iget_type for dataset ID", __LINE__, __FILE__);
+
+ /* Do something more complex with each of the IDs to make sure */
+
+ Group grp2(obj_grp);
+ hsize_t num_objs = grp2.getNumObjs();
+ verify_val(num_objs, 1, "H5Gget_info", __LINE__, __FILE__);
+ // There should be one object, the datatype
+
+ // Close datatype object opened from the file
+ file1.closeObjId(obj_dtype);
+
+ dset.setId(obj_dset);
+ dspace = dset.getSpace();
+ bool is_simple = dspace.isSimple();
+ dspace.close();
+
+ // Open datatype object from the group
+ obj_dtype = grp2.getObjId(DTYPENAME_INGRP);
+
+ dtype.setId(obj_dtype);
+ H5T_class_t type_class = dtype.getClass();
+ verify_val(type_class, H5T_INTEGER, "H5Tget_class", __LINE__, __FILE__);
+ dtype.close();
+
+ // Close datatype object
+ grp2.closeObjId(obj_dtype);
+
+ // Close the group object
+ file1.closeObjId(obj_grp);
+
+ // Try doing something with group, the ID should still work
+ num_objs = grp2.getNumObjs();
+ verify_val(num_objs, 1, "H5Gget_info", __LINE__, __FILE__);
+
+ // Close the cloned group
+ grp2.close();
+
+ PASSED();
+ } // end of try block
+ // catch invalid action exception
+ catch (InvalidActionException& E)
+ {
+ cerr << " in InvalidActionException" << endl;
+ cerr << " *FAILED*" << endl;
+ cerr << " <<< " << E.getDetailMsg() << " >>>" << endl << endl;
+ }
+ // catch all other exceptions
+ catch (Exception& E)
+ {
+ cerr << " in Exception" << endl;
+ issue_fail_msg("test_file_name()", __LINE__, __FILE__, E.getCDetailMsg());
+ }
+} /* test_open_object_header() */
+
/*-------------------------------------------------------------------------
- * Function: test_objects
+ * Function: test_objects
*
- * Purpose: Tests HDF5 object related functionality
+ * Purpose Tests HDF5 object related functionality
*
- * Return: Success: 0
- * Failure: -1
+ * Return Success: 0
+ * Failure: -1
*
- * Programmer: Binh-Minh Ribler
- * Friday, Mar 4, 2014
+ * Programmer Binh-Minh Ribler
+ * Friday, Mar 4, 2014
*
* Modifications:
*
@@ -318,19 +536,21 @@ void test_object()
MESSAGE(5, ("Testing Object Functions\n"));
test_get_objname(); // Test get object name from groups/datasets
- test_get_objname_ontypes(); // Test get object name from types
+ test_existance(); // Test check for object existance
+ test_get_objname_ontypes(); // Test get object name from types
test_get_objtype(); // Test get object type
+ test_open_object_header(); // Test object header functions (H5O)
} // test_objects
/*-------------------------------------------------------------------------
* Function: cleanup_objects
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: (use C version)
+ * Programmer (use C version)
*
* Modifications:
*
diff --git a/c++/test/trefer.cpp b/c++/test/trefer.cpp
index 9bc2eb0..9074154 100644
--- a/c++/test/trefer.cpp
+++ b/c++/test/trefer.cpp
@@ -5,18 +5,16 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
FILE
trefer.cpp - HDF5 C++ testing the functionalities associated with the C
- Reference interface (H5R)
+ Reference interface (H5R)
***************************************************************************/
#ifdef OLD_HEADER_FILENAME
@@ -79,100 +77,100 @@ test_reference_params(void)
H5File* file1 = NULL;
try {
- hobj_ref_t *wbuf, // buffer to write to disk
- *rbuf, // buffer read from disk
- *tbuf; // temp. buffer read from disk
+ hobj_ref_t *wbuf, // buffer to write to disk
+ *rbuf, // buffer read from disk
+ *tbuf; // temp. buffer read from disk
- // Allocate write & read buffers
- int temp_size = MAX(sizeof(unsigned),sizeof(hobj_ref_t));
- wbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
- rbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
- tbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
+ // Allocate write & read buffers
+ int temp_size = MAX(sizeof(unsigned),sizeof(hobj_ref_t));
+ wbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
+ rbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
+ tbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
// Create file FILE1
file1 = new H5File (FILE1, H5F_ACC_TRUNC);
- // Create dataspace for datasets
- hsize_t dims1[] = {SPACE1_DIM1};
- DataSpace sid1(SPACE1_RANK, dims1);
-
- // Create a group
- Group group = file1->createGroup("Group1");
-
- // Set group's comment
- group.setComment(".", write_comment);
-
- // Create a dataset (inside /Group1)
- DataSet dataset = group.createDataSet(DSET1_NAME, PredType::NATIVE_UINT, sid1);
-
- unsigned *tu32; // Temporary pointer to uint32 data
- int i;
- for (tu32=(unsigned *)wbuf, i=0; i<SPACE1_DIM1; i++)
- *tu32++=i*3; // from C test
-
- // Write selection to disk
- dataset.write(wbuf, PredType::NATIVE_UINT);
-
- // Close Dataset
- dataset.close();
-
- // Create another dataset (inside /Group1)
- dataset = group.createDataSet("Dataset2", PredType::NATIVE_UCHAR, sid1);
-
- // Close Dataset
- dataset.close();
-
- // Create a datatype to refer to
- CompType dtype1(sizeof(s1_t));
-
- // Insert fields
- dtype1.insertMember(MEMBER1, HOFFSET(s1_t, a), PredType::NATIVE_INT);
- dtype1.insertMember(MEMBER2, HOFFSET(s1_t, b), PredType::NATIVE_INT);
- dtype1.insertMember(MEMBER3, HOFFSET(s1_t, c), PredType::NATIVE_FLOAT);
-
- // Save datatype for later
- dtype1.commit(group, "Datatype1");
-
- // Close datatype and group
- dtype1.close();
- group.close();
-
- // Create a dataset
- dataset = file1->createDataSet("Dataset3", PredType::STD_REF_OBJ, sid1);
-
- /* Test parameters to H5Location::reference */
- try {
- file1->reference(NULL, "/Group1/Dataset1");
- } catch (ReferenceException& E) {} // We expect this to fail
- try {
- file1->reference(&wbuf[0], NULL);
- } catch (ReferenceException& E) {} // We expect this to fail
- try {
- file1->reference(&wbuf[0], "");
- } catch (ReferenceException& E) {} // We expect this to fail
- try {
- file1->reference(&wbuf[0], "/Group1/Dataset1", H5R_MAXTYPE);
- } catch (ReferenceException& E) {} // We expect this to fail
- try {
- file1->reference(&wbuf[0], "/Group1/Dataset1", H5R_DATASET_REGION);
- } catch (ReferenceException& E) {} // We expect this to fail
-
- // Close resources
- dataset.close();
- file1->close();
- // Let sid1 go out of scope
-
- // Free memory buffers
- HDfree(wbuf);
- HDfree(rbuf);
- HDfree(tbuf);
-
- PASSED();
+ // Create dataspace for datasets
+ hsize_t dims1[] = {SPACE1_DIM1};
+ DataSpace sid1(SPACE1_RANK, dims1);
+
+ // Create a group
+ Group group = file1->createGroup("Group1");
+
+ // Set group's comment
+ group.setComment(".", write_comment);
+
+ // Create a dataset (inside /Group1)
+ DataSet dataset = group.createDataSet(DSET1_NAME, PredType::NATIVE_UINT, sid1);
+
+ unsigned *tu32; // Temporary pointer to uint32 data
+ int i;
+ for (tu32=(unsigned *)wbuf, i=0; i<SPACE1_DIM1; i++)
+ *tu32++=i*3; // from C test
+
+ // Write selection to disk
+ dataset.write(wbuf, PredType::NATIVE_UINT);
+
+ // Close Dataset
+ dataset.close();
+
+ // Create another dataset (inside /Group1)
+ dataset = group.createDataSet("Dataset2", PredType::NATIVE_UCHAR, sid1);
+
+ // Close Dataset
+ dataset.close();
+
+ // Create a datatype to refer to
+ CompType dtype1(sizeof(s1_t));
+
+ // Insert fields
+ dtype1.insertMember(MEMBER1, HOFFSET(s1_t, a), PredType::NATIVE_INT);
+ dtype1.insertMember(MEMBER2, HOFFSET(s1_t, b), PredType::NATIVE_INT);
+ dtype1.insertMember(MEMBER3, HOFFSET(s1_t, c), PredType::NATIVE_FLOAT);
+
+ // Save datatype for later
+ dtype1.commit(group, "Datatype1");
+
+ // Close datatype and group
+ dtype1.close();
+ group.close();
+
+ // Create a dataset
+ dataset = file1->createDataSet("Dataset3", PredType::STD_REF_OBJ, sid1);
+
+ /* Test parameters to H5Location::reference */
+ try {
+ file1->reference(NULL, "/Group1/Dataset1");
+ } catch (ReferenceException& E) {} // We expect this to fail
+ try {
+ file1->reference(&wbuf[0], NULL);
+ } catch (ReferenceException& E) {} // We expect this to fail
+ try {
+ file1->reference(&wbuf[0], "");
+ } catch (ReferenceException& E) {} // We expect this to fail
+ try {
+ file1->reference(&wbuf[0], "/Group1/Dataset1", H5R_MAXTYPE);
+ } catch (ReferenceException& E) {} // We expect this to fail
+ try {
+ file1->reference(&wbuf[0], "/Group1/Dataset1", H5R_DATASET_REGION);
+ } catch (ReferenceException& E) {} // We expect this to fail
+
+ // Close resources
+ dataset.close();
+ file1->close();
+ // Let sid1 go out of scope
+
+ // Free memory buffers
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(tbuf);
+
+ PASSED();
} // end try
catch (Exception& E)
{
- issue_fail_msg("test_reference_param()",__LINE__,__FILE__,
- E.getCFuncName(), E.getCDetailMsg());
+ issue_fail_msg("test_reference_param()",__LINE__,__FILE__,
+ E.getCFuncName(), E.getCDetailMsg());
}
if(file1)
@@ -182,7 +180,7 @@ test_reference_params(void)
/****************************************************************
**
** test_reference_obj(): Test basic object reference functions
-** to various kinds of objects
+** to various kinds of objects
**
****************************************************************/
static void test_reference_obj(void)
@@ -195,176 +193,176 @@ static void test_reference_obj(void)
H5File* file1 = NULL;
try {
- hobj_ref_t *wbuf, // buffer to write to disk
- *rbuf, // buffer read from disk
- *tbuf; // temp. buffer read from disk
+ hobj_ref_t *wbuf, // buffer to write to disk
+ *rbuf, // buffer read from disk
+ *tbuf; // temp. buffer read from disk
- // Allocate write & read buffers
- int temp_size = MAX(sizeof(unsigned),sizeof(hobj_ref_t));
- wbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
- rbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
- tbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
+ // Allocate write & read buffers
+ int temp_size = MAX(sizeof(unsigned),sizeof(hobj_ref_t));
+ wbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
+ rbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
+ tbuf=(hobj_ref_t*)HDmalloc(temp_size*SPACE1_DIM1);
// Create file FILE1
file1 = new H5File (FILE1, H5F_ACC_TRUNC);
- // Create dataspace for datasets
- hsize_t dims1[] = {SPACE1_DIM1};
- DataSpace sid1(SPACE1_RANK, dims1);
+ // Create dataspace for datasets
+ hsize_t dims1[] = {SPACE1_DIM1};
+ DataSpace sid1(SPACE1_RANK, dims1);
- // Create dataset access property list
- PropList dapl(H5P_DATASET_ACCESS);
+ // Create dataset access property list
+ PropList dapl(H5P_DATASET_ACCESS);
- // Create a group
- Group group = file1->createGroup("Group1");
+ // Create a group
+ Group group = file1->createGroup("Group1");
- // Set group's comment
- group.setComment(".", write_comment);
+ // Set group's comment
+ group.setComment(".", write_comment);
- // Create a dataset (inside /Group1)
- DataSet dataset = group.createDataSet(DSET1_NAME, PredType::NATIVE_UINT, sid1);
+ // Create a dataset (inside /Group1)
+ DataSet dataset = group.createDataSet(DSET1_NAME, PredType::NATIVE_UINT, sid1);
- unsigned *tu32; // Temporary pointer to uint32 data
- for (tu32 = (unsigned *)wbuf, i = 0; i < SPACE1_DIM1; i++)
- *tu32++=i*3; // from C test
+ unsigned *tu32; // Temporary pointer to uint32 data
+ for (tu32 = (unsigned *)wbuf, i = 0; i < SPACE1_DIM1; i++)
+ *tu32++=i*3; // from C test
- // Write selection to disk
- dataset.write(wbuf, PredType::NATIVE_UINT);
+ // Write selection to disk
+ dataset.write(wbuf, PredType::NATIVE_UINT);
- // Close Dataset
- dataset.close();
+ // Close Dataset
+ dataset.close();
- // Create another dataset (inside /Group1)
- dataset = group.createDataSet("Dataset2", PredType::NATIVE_UCHAR, sid1);
+ // Create another dataset (inside /Group1)
+ dataset = group.createDataSet("Dataset2", PredType::NATIVE_UCHAR, sid1);
- // Close Dataset
- dataset.close();
+ // Close Dataset
+ dataset.close();
- // Create a datatype to refer to
- CompType dtype1(sizeof(s1_t));
+ // Create a datatype to refer to
+ CompType dtype1(sizeof(s1_t));
- // Insert fields
- dtype1.insertMember(MEMBER1, HOFFSET(s1_t, a), PredType::NATIVE_INT);
- dtype1.insertMember(MEMBER2, HOFFSET(s1_t, b), PredType::NATIVE_INT);
- dtype1.insertMember(MEMBER3, HOFFSET(s1_t, c), PredType::NATIVE_FLOAT);
+ // Insert fields
+ dtype1.insertMember(MEMBER1, HOFFSET(s1_t, a), PredType::NATIVE_INT);
+ dtype1.insertMember(MEMBER2, HOFFSET(s1_t, b), PredType::NATIVE_INT);
+ dtype1.insertMember(MEMBER3, HOFFSET(s1_t, c), PredType::NATIVE_FLOAT);
- // Save datatype for later
- dtype1.commit(group, "Datatype1");
+ // Save datatype for later
+ dtype1.commit(group, "Datatype1");
- // Close datatype and group
- dtype1.close();
- group.close();
+ // Close datatype and group
+ dtype1.close();
+ group.close();
- // Create a dataset
- dataset = file1->createDataSet("Dataset3", PredType::STD_REF_OBJ, sid1);
+ // Create a dataset
+ dataset = file1->createDataSet("Dataset3", PredType::STD_REF_OBJ, sid1);
- // Create reference to dataset and test getRefObjType
- file1->reference(&wbuf[0], "/Group1/Dataset1");
- H5O_type_t refobj_type = dataset.getRefObjType(&wbuf[0], H5R_OBJECT);
- verify_val(refobj_type, H5O_TYPE_DATASET, "DataSet::getRefObjType",__LINE__,__FILE__);
+ // Create reference to dataset and test getRefObjType
+ file1->reference(&wbuf[0], "/Group1/Dataset1");
+ H5O_type_t refobj_type = dataset.getRefObjType(&wbuf[0], H5R_OBJECT);
+ verify_val(refobj_type, H5O_TYPE_DATASET, "DataSet::getRefObjType",__LINE__,__FILE__);
- // Create reference to dataset and test getRefObjType
- file1->reference(&wbuf[1], "/Group1/Dataset2");
- refobj_type = dataset.getRefObjType(&wbuf[1], H5R_OBJECT);
- verify_val(refobj_type, H5O_TYPE_DATASET, "DataSet::getRefObjType",__LINE__,__FILE__);
+ // Create reference to dataset and test getRefObjType
+ file1->reference(&wbuf[1], "/Group1/Dataset2");
+ refobj_type = dataset.getRefObjType(&wbuf[1], H5R_OBJECT);
+ verify_val(refobj_type, H5O_TYPE_DATASET, "DataSet::getRefObjType",__LINE__,__FILE__);
- // Create reference to group
- file1->reference(&wbuf[2], "/Group1");
- refobj_type = dataset.getRefObjType(&wbuf[2], H5R_OBJECT);
- verify_val(refobj_type, H5O_TYPE_GROUP, "DataSet::getRefObjType",__LINE__,__FILE__);
+ // Create reference to group
+ file1->reference(&wbuf[2], "/Group1");
+ refobj_type = dataset.getRefObjType(&wbuf[2], H5R_OBJECT);
+ verify_val(refobj_type, H5O_TYPE_GROUP, "DataSet::getRefObjType",__LINE__,__FILE__);
- // Create reference to named datatype
- file1->reference(&wbuf[3], "/Group1/Datatype1");
- refobj_type = dataset.getRefObjType(&wbuf[3], H5R_OBJECT);
- verify_val(refobj_type, H5O_TYPE_NAMED_DATATYPE, "DataSet::getRefObjType",__LINE__,__FILE__);
+ // Create reference to named datatype
+ file1->reference(&wbuf[3], "/Group1/Datatype1");
+ refobj_type = dataset.getRefObjType(&wbuf[3], H5R_OBJECT);
+ verify_val(refobj_type, H5O_TYPE_NAMED_DATATYPE, "DataSet::getRefObjType",__LINE__,__FILE__);
- // Write selection to disk
- dataset.write(wbuf, PredType::STD_REF_OBJ);
+ // Write selection to disk
+ dataset.write(wbuf, PredType::STD_REF_OBJ);
- // Close disk dataspace, dataset, and file
- sid1.close();
- dataset.close();
- delete file1;
+ // Close disk dataspace, dataset, and file
+ sid1.close();
+ dataset.close();
+ delete file1;
- // Re-open the file
- file1 = new H5File(FILE1, H5F_ACC_RDWR);
+ // Re-open the file
+ file1 = new H5File(FILE1, H5F_ACC_RDWR);
- // Open the dataset
- dataset = file1->openDataSet("/Dataset3");
+ // Open the dataset
+ dataset = file1->openDataSet("/Dataset3");
- // Read selection from disk
- dataset.read(rbuf, PredType::STD_REF_OBJ);
+ // Read selection from disk
+ dataset.read(rbuf, PredType::STD_REF_OBJ);
- // Dereference dataset object by ctor, from the location where
- // 'dataset' is located
- DataSet dset2(dataset, &rbuf[0], H5R_OBJECT, dapl);
+ // Dereference dataset object by ctor, from the location where
+ // 'dataset' is located
+ DataSet dset2(dataset, &rbuf[0], H5R_OBJECT, dapl);
- // Check information in the referenced dataset
- sid1 = dset2.getSpace();
- hssize_t n_elements = sid1.getSimpleExtentNpoints();
- verify_val((long)n_elements, 4, "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
+ // Check information in the referenced dataset
+ sid1 = dset2.getSpace();
+ hssize_t n_elements = sid1.getSimpleExtentNpoints();
+ verify_val((long)n_elements, 4, "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
- // Read from disk
- dset2.read(tbuf, PredType::NATIVE_UINT);
+ // Read from disk
+ dset2.read(tbuf, PredType::NATIVE_UINT);
- for(tu32 = (unsigned *)tbuf, i = 0; i < SPACE1_DIM1; i++, tu32++)
- verify_val(*tu32, (uint32_t)(i*3), "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
+ for(tu32 = (unsigned *)tbuf, i = 0; i < SPACE1_DIM1; i++, tu32++)
+ verify_val(*tu32, (uint32_t)(i*3), "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
- // Close dereferenced dataset
- dset2.close();
+ // Close dereferenced dataset
+ dset2.close();
- // Dereference group object from the location where 'dataset' is located
- group.dereference(dataset, &rbuf[2]);
+ // Dereference group object from the location where 'dataset' is located
+ group.dereference(dataset, &rbuf[2]);
- // Get group's comment using
- // H5std_string getComment(const char* name, <buf_size=0 by default>)
- H5std_string read_comment1 = group.getComment(".", 10);
- verify_val(read_comment1.c_str(), write_comment, "Group::getComment",__LINE__,__FILE__);
+ // Get group's comment using
+ // H5std_string getComment(const char* name, <buf_size=0 by default>)
+ H5std_string read_comment1 = group.getComment(".", 10);
+ verify_val(read_comment1.c_str(), write_comment, "Group::getComment",__LINE__,__FILE__);
- // Test with the old default value
- read_comment1 = group.getComment(".", 256);
- verify_val(read_comment1.c_str(), write_comment, "Group::getComment",__LINE__,__FILE__);
+ // Test with the old default value
+ read_comment1 = group.getComment(".", 256);
+ verify_val(read_comment1.c_str(), write_comment, "Group::getComment",__LINE__,__FILE__);
- // Test that getComment handles failures gracefully, using
- // H5std_string getComment(const char* name, <buf_size=0 by default>)
- try {
- H5std_string read_comment_tmp = group.getComment(NULL);
- }
- catch (Exception& E) {} // We expect this to fail
+ // Test that getComment handles failures gracefully, using
+ // H5std_string getComment(const char* name, <buf_size=0 by default>)
+ try {
+ H5std_string read_comment_tmp = group.getComment(NULL);
+ }
+ catch (Exception& E) {} // We expect this to fail
- // Close group
- group.close();
+ // Close group
+ group.close();
- /*
- * Verify correct referenced datatype
- */
- // Open datatype object
- dtype1.dereference(dataset, &rbuf[3]);
+ /*
+ * Verify correct referenced datatype
+ */
+ // Open datatype object
+ dtype1.dereference(dataset, &rbuf[3]);
- // Verify correct datatype
+ // Verify correct datatype
H5T_class_t tclass;
tclass = dtype1.getClass();
- verify_val(tclass, H5T_COMPOUND, "DataType::getClass",__LINE__,__FILE__);
- int n_members = dtype1.getNmembers();
+ verify_val(tclass, H5T_COMPOUND, "DataType::getClass",__LINE__,__FILE__);
+ int n_members = dtype1.getNmembers();
verify_val(n_members, 3, "CompType::getNmembers",__LINE__,__FILE__);
- // Close all objects and file
- dtype1.close();
- dataset.close();
- file1->close();
+ // Close all objects and file
+ dtype1.close();
+ dataset.close();
+ file1->close();
- // Free allocated buffers
- HDfree(wbuf);
- HDfree(rbuf);
- HDfree(tbuf);
+ // Free allocated buffers
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(tbuf);
- PASSED();
+ PASSED();
} // end try
catch (Exception& E)
{
- issue_fail_msg("test_reference_obj()",__LINE__,__FILE__,
- E.getCFuncName(), E.getCDetailMsg());
+ issue_fail_msg("test_reference_obj()",__LINE__,__FILE__,
+ E.getCFuncName(), E.getCDetailMsg());
}
if(file1)
@@ -376,7 +374,7 @@ static void test_reference_obj(void)
**
** test_reference_group(): Test object reference functionality
** Tests for correct behavior of various routines on
-** dereferenced group
+** dereferenced group
**
****************************************************************/
#define GROUPNAME "/group"
@@ -389,8 +387,8 @@ static void test_reference_obj(void)
static void
test_reference_group(void)
{
- hobj_ref_t wref; /* Reference to write */
- hobj_ref_t rref; /* Reference to read */
+ hobj_ref_t wref; /* Reference to write */
+ hobj_ref_t rref; /* Reference to read */
const H5std_string write_comment="Foo!"; // Comments for group
// Output message about test being performed
@@ -398,102 +396,102 @@ test_reference_group(void)
H5File* file1 = NULL;
try {
- /*
- * Create file with a group and a dataset containing an object
- * reference to the group
- */
+ /*
+ * Create file with a group and a dataset containing an object
+ * reference to the group
+ */
// Create file FILE1
file1 = new H5File (FILE1, H5F_ACC_TRUNC);
- // Create scalar dataspace
- DataSpace sid1;
+ // Create scalar dataspace
+ DataSpace sid1;
- // Create a group
- Group group = file1->createGroup(GROUPNAME);
+ // Create a group
+ Group group = file1->createGroup(GROUPNAME);
- /* Create nested groups */
- Group group2 = group.createGroup(GROUPNAME2);
- group2.close();
- group2 = group.createGroup(GROUPNAME3);
- group2.close();
+ /* Create nested groups */
+ Group group2 = group.createGroup(GROUPNAME2);
+ group2.close();
+ group2 = group.createGroup(GROUPNAME3);
+ group2.close();
- // Create bottom dataset
- DataSet dset1 = group.createDataSet(DSETNAME2, PredType::NATIVE_INT, sid1);
- dset1.close();
+ // Create bottom dataset
+ DataSet dset1 = group.createDataSet(DSETNAME2, PredType::NATIVE_INT, sid1);
+ dset1.close();
- // Close group 1
- group.close();
+ // Close group 1
+ group.close();
- // Create dataset
- DataSet dset2 = file1->createDataSet(DSETNAME, PredType::STD_REF_OBJ, sid1);
+ // Create dataset
+ DataSet dset2 = file1->createDataSet(DSETNAME, PredType::STD_REF_OBJ, sid1);
- file1->reference(&wref, GROUPNAME);
+ file1->reference(&wref, GROUPNAME);
- // Write selection to disk
- dset2.write(&wref, PredType::STD_REF_OBJ);
+ // Write selection to disk
+ dset2.write(&wref, PredType::STD_REF_OBJ);
- // Close resources
- dset2.close();
- sid1.close();
- file1->close();
+ // Close resources
+ dset2.close();
+ sid1.close();
+ file1->close();
- /*
- * Re-open the file and test deferencing group
- */
+ /*
+ * Re-open the file and test deferencing group
+ */
- // Re-open file
+ // Re-open file
file1->openFile(FILE1, H5F_ACC_RDWR);
- // Re-open dataset
- dset1 = file1->openDataSet(DSETNAME);
+ // Re-open dataset
+ dset1 = file1->openDataSet(DSETNAME);
- // Read in the reference
- dset1.read(&rref, PredType::STD_REF_OBJ);
+ // Read in the reference
+ dset1.read(&rref, PredType::STD_REF_OBJ);
- // Dereference to get the group
- Group refgroup(dset1, &rref);
+ // Dereference to get the group
+ Group refgroup(dset1, &rref);
- // Dereference group object the other way
- group.dereference(dset1, &rref);
+ // Dereference group object the other way
+ group.dereference(dset1, &rref);
- /*
- * Various queries on the group opened
- */
+ /*
+ * Various queries on the group opened
+ */
- // Check number of objects in the group dereferenced by constructor
- hsize_t nobjs = refgroup.getNumObjs();
- verify_val(nobjs, (hsize_t)3, "H5Group::getNumObjs",__LINE__,__FILE__);
+ // Check number of objects in the group dereferenced by constructor
+ hsize_t nobjs = refgroup.getNumObjs();
+ verify_val(nobjs, (hsize_t)3, "H5Group::getNumObjs",__LINE__,__FILE__);
- // Check number of objects in the group dereferenced by ::reference
- nobjs = group.getNumObjs();
- verify_val(nobjs, (hsize_t)3, "H5Group::getNumObjs",__LINE__,__FILE__);
+ // Check number of objects in the group dereferenced by ::reference
+ nobjs = group.getNumObjs();
+ verify_val(nobjs, (hsize_t)3, "H5Group::getNumObjs",__LINE__,__FILE__);
- // Check getting file name given the group dereferenced via constructor
- H5std_string fname = refgroup.getFileName();
- verify_val(fname, FILE1, "H5Group::getFileName",__LINE__,__FILE__);
+ // Check getting file name given the group dereferenced via constructor
+ H5std_string fname = refgroup.getFileName();
+ verify_val(fname, FILE1, "H5Group::getFileName",__LINE__,__FILE__);
- // Check getting file name given the group dereferenced by ::reference
- fname = group.getFileName();
- verify_val(fname, FILE1, "H5Group::getFileName",__LINE__,__FILE__);
-
- // Unlink one of the objects in the dereferenced group, and re-check
- refgroup.unlink(GROUPNAME2);
- nobjs = refgroup.getNumObjs();
- verify_val(nobjs, (hsize_t)2, "H5Group::getNumObjs",__LINE__,__FILE__);
-
- // Close resources
- group.close();
- refgroup.close();
- dset1.close();
- file1->close();
-
- PASSED();
+ // Check getting file name given the group dereferenced by ::reference
+ fname = group.getFileName();
+ verify_val(fname, FILE1, "H5Group::getFileName",__LINE__,__FILE__);
+
+ // Unlink one of the objects in the dereferenced group, and re-check
+ refgroup.unlink(GROUPNAME2);
+ nobjs = refgroup.getNumObjs();
+ verify_val(nobjs, (hsize_t)2, "H5Group::getNumObjs",__LINE__,__FILE__);
+
+ // Close resources
+ group.close();
+ refgroup.close();
+ dset1.close();
+ file1->close();
+
+ PASSED();
} // end try
catch (Exception& E)
{
- issue_fail_msg("test_reference_group()",__LINE__,__FILE__,
- E.getCFuncName(), E.getCDetailMsg());
+ issue_fail_msg("test_reference_group()",__LINE__,__FILE__,
+ E.getCFuncName(), E.getCDetailMsg());
}
if(file1)
@@ -509,283 +507,283 @@ test_reference_group(void)
static void
test_reference_region_1D(void)
{
- hsize_t start[SPACE3_RANK]; /* Starting location of hyperslab */
- hsize_t stride[SPACE3_RANK]; /* Stride of hyperslab */
- hsize_t count[SPACE3_RANK]; /* Element count of hyperslab */
- hsize_t block[SPACE3_RANK]; /* Block size of hyperslab */
- hsize_t coord1[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */
+ hsize_t start[SPACE3_RANK]; /* Starting location of hyperslab */
+ hsize_t stride[SPACE3_RANK]; /* Stride of hyperslab */
+ hsize_t count[SPACE3_RANK]; /* Element count of hyperslab */
+ hsize_t block[SPACE3_RANK]; /* Block size of hyperslab */
+ hsize_t coord1[POINT1_NPOINTS][SPACE3_RANK]; /* Coordinates for point selection */
hsize_t * coords; /* Coordinate buffer */
- hsize_t low[SPACE3_RANK]; /* Selection bounds */
- hsize_t high[SPACE3_RANK]; /* Selection bounds */
+ hsize_t low[SPACE3_RANK]; /* Selection bounds */
+ hsize_t high[SPACE3_RANK]; /* Selection bounds */
int i; /* counting variables */
// Output message about test being performed
SUBTEST("1-D Dataset Region Reference Functions");
try {
- hdset_reg_ref_t *wbuf, // buffer to write to disk
- *rbuf; // buffer read from disk
- uint8_t *dwbuf, // Buffer for writing numeric data to disk
- *drbuf; // Buffer for reading numeric data from disk
+ hdset_reg_ref_t *wbuf, // buffer to write to disk
+ *rbuf; // buffer read from disk
+ uint8_t *dwbuf, // Buffer for writing numeric data to disk
+ *drbuf; // Buffer for reading numeric data from disk
- // Allocate write & read buffers
- wbuf = (hdset_reg_ref_t *)HDcalloc(sizeof(hdset_reg_ref_t), (size_t)SPACE1_DIM1);
- rbuf = (hdset_reg_ref_t *)HDmalloc(sizeof(hdset_reg_ref_t) * SPACE1_DIM1);
- dwbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE3_DIM1);
- drbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)SPACE3_DIM1);
+ // Allocate write & read buffers
+ wbuf = (hdset_reg_ref_t *)HDcalloc(sizeof(hdset_reg_ref_t), (size_t)SPACE1_DIM1);
+ rbuf = (hdset_reg_ref_t *)HDmalloc(sizeof(hdset_reg_ref_t) * SPACE1_DIM1);
+ dwbuf = (uint8_t *)HDmalloc(sizeof(uint8_t) * SPACE3_DIM1);
+ drbuf = (uint8_t *)HDcalloc(sizeof(uint8_t), (size_t)SPACE3_DIM1);
// Create file FILE1
H5File file1(FILE2, H5F_ACC_TRUNC);
- // Create dataspace for datasets
- hsize_t dims3[] = {SPACE3_DIM1};
- DataSpace sid3(SPACE3_RANK, dims3);
+ // Create dataspace for datasets
+ hsize_t dims3[] = {SPACE3_DIM1};
+ DataSpace sid3(SPACE3_RANK, dims3);
- // Create dataset access property list
- PropList dapl(H5P_DATASET_ACCESS);
+ // Create dataset access property list
+ PropList dapl(H5P_DATASET_ACCESS);
- // Create a dataset
- DataSet dset3 = file1.createDataSet(DSET2_NAME, PredType::STD_U8LE, sid3);
+ // Create a dataset
+ DataSet dset3 = file1.createDataSet(DSET2_NAME, PredType::STD_U8LE, sid3);
- uint8_t *tu8; // Temporary pointer to uint8 data
- for (tu8 = dwbuf, i = 0; i < SPACE3_DIM1; i++)
- *tu8++ = i * 3; // from C test
+ uint8_t *tu8; // Temporary pointer to uint8 data
+ for (tu8 = dwbuf, i = 0; i < SPACE3_DIM1; i++)
+ *tu8++ = i * 3; // from C test
- // Write selection to disk
- dset3.write(dwbuf, PredType::STD_U8LE);
+ // Write selection to disk
+ dset3.write(dwbuf, PredType::STD_U8LE);
- // Close Dataset
- dset3.close();
+ // Close Dataset
+ dset3.close();
- // Create dataspace for datasets
- hsize_t dims1[] = {SPACE1_DIM1};
- DataSpace sid1(SPACE1_RANK, dims1);
+ // Create dataspace for datasets
+ hsize_t dims1[] = {SPACE1_DIM1};
+ DataSpace sid1(SPACE1_RANK, dims1);
- // Create a dataset
- DataSet dset1 = file1.createDataSet(DSET1_NAME, PredType::STD_REF_DSETREG, sid1);
+ // Create a dataset
+ DataSet dset1 = file1.createDataSet(DSET1_NAME, PredType::STD_REF_DSETREG, sid1);
- /*
- * Create references and prepare for testing
- */
+ /*
+ * Create references and prepare for testing
+ */
- /* Select 15 2x1 hyperslabs for first reference */
- start[0] = 2;
- stride[0] = 5;
- count[0] = 15;
- block[0] = 2;
+ /* Select 15 2x1 hyperslabs for first reference */
+ start[0] = 2;
+ stride[0] = 5;
+ count[0] = 15;
+ block[0] = 2;
- // Select a hyperslab region to add to the current selected region
- sid3.selectHyperslab(H5S_SELECT_SET, count, start, stride, block);
+ // Select a hyperslab region to add to the current selected region
+ sid3.selectHyperslab(H5S_SELECT_SET, count, start, stride, block);
- // Get and verify the number of elements in a dataspace selection
- hssize_t nelms = sid3.getSelectNpoints();
- verify_val(nelms, 30, "DataSet::getRefObjType",__LINE__,__FILE__);
+ // Get and verify the number of elements in a dataspace selection
+ hssize_t nelms = sid3.getSelectNpoints();
+ verify_val(nelms, 30, "DataSet::getRefObjType",__LINE__,__FILE__);
- // Store first dataset region
- file1.reference(&wbuf[0], "/Dataset2", sid3);
+ // Store first dataset region
+ file1.reference(&wbuf[0], "/Dataset2", sid3);
- // Get and verify object type
- H5O_type_t obj_type = dset1.getRefObjType(&wbuf[0], H5R_DATASET_REGION);
- verify_val(obj_type, H5O_TYPE_DATASET, "DataSet::getRefObjType",__LINE__,__FILE__);
+ // Get and verify object type
+ H5O_type_t obj_type = dset1.getRefObjType(&wbuf[0], H5R_DATASET_REGION);
+ verify_val(obj_type, H5O_TYPE_DATASET, "DataSet::getRefObjType",__LINE__,__FILE__);
- /* Select sequence of ten points for second reference */
- coord1[0][0] = 16;
- coord1[1][0] = 22;
- coord1[2][0] = 38;
- coord1[3][0] = 41;
- coord1[4][0] = 52;
- coord1[5][0] = 63;
- coord1[6][0] = 70;
- coord1[7][0] = 89;
- coord1[8][0] = 97;
- coord1[9][0] = 3;
-
- // Selects array elements to be included in the selection for sid3
- sid3.selectElements(H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
-
- // Get and verify the number of elements in a dataspace selection
- nelms = sid3.getSelectNpoints();
- verify_val(nelms, 10, "DataSet::getRefObjType",__LINE__,__FILE__);
-
- // Store first dataset region
- file1.reference(&wbuf[1], "/Dataset2", sid3);
-
- // Write selection to disk
- dset1.write(wbuf, PredType::STD_REF_DSETREG);
-
- // Close disk dataspace, dataset, and file
- sid1.close();
- dset1.close();
- sid3.close();
- file1.close();
-
- /*
- * Testing various dereference functions
- */
-
- // Re-open the file
- file1.openFile(FILE2, H5F_ACC_RDWR);
-
- // Open the dataset
- dset1 = file1.openDataSet("/Dataset1");
-
- // Read selection from disk
- dset1.read(rbuf, PredType::STD_REF_DSETREG);
-
- { // Test DataSet::dereference
- dset3.dereference(dset1, &rbuf[0], H5R_DATASET_REGION, dapl);
-
- // Get and verify object type
- obj_type = dset1.getRefObjType(&rbuf[0], H5R_DATASET_REGION);
- verify_val(obj_type, H5O_TYPE_DATASET, "DataSet::getRefObjType",__LINE__,__FILE__);
-
- // Get dataspace of dset3 the verify number of elements
- sid1 = dset3.getSpace();
- nelms = sid1.getSimpleExtentNpoints();
- verify_val((long)nelms, 100, "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
- } // End of test DataSet::dereference
-
- { // Test DataSet constructor -by dereference
- // Dereference dataset object by ctor, from the location where
- // 'dset1' is located
- DataSet newds(dset1, &rbuf[0], H5R_DATASET_REGION, dapl);
-
- // Get dataspace of newds then verify number of elements
- sid1 = newds.getSpace();
- nelms = sid1.getSimpleExtentNpoints();
- verify_val((long)nelms, 100, "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
-
- // Close objects for this mini test
- newds.close();
- sid1.close();
- } // End of test DataSet constructor -by dereference
-
- // Read from disk
- dset3.read(drbuf, PredType::STD_U8LE);
-
- for(tu8 = (uint8_t *)drbuf, i = 0; i < SPACE3_DIM1; i++, tu8++)
- verify_val(*tu8, (uint8_t)(i * 3), "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
-
- /*
- * Test getting the referenced region
- */
-
- // Get region
- DataSpace reg_sp = dset1.getRegion(&rbuf[0]);
-
- // Get and verify number of elements in a dataspace selection
- nelms = reg_sp.getSelectNpoints();
- verify_val((long)nelms, 30, "DataSpace::getSelectNpoints",__LINE__,__FILE__);
-
- // Get and verify number of hyperslab blocks
- nelms = reg_sp.getSelectHyperNblocks();
- verify_val((long)nelms, 15, "DataSpace::getSelectNpoints",__LINE__,__FILE__);
-
- /* Allocate space for the hyperslab blocks */
- coords = (hsize_t *)HDmalloc(nelms * SPACE3_RANK * sizeof(hsize_t) * 2);
-
- // Get the list of hyperslab blocks currently selected
- reg_sp.getSelectHyperBlocklist((hsize_t)0, (hsize_t)nelms, coords);
-
- // Verify values in the list
- verify_val(coords[0], (hsize_t)2, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[1], (hsize_t)3, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[2], (hsize_t)7, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[3], (hsize_t)8, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[4],(hsize_t)12, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[5],(hsize_t)13, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[6],(hsize_t)17, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[7],(hsize_t)18, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[8],(hsize_t)22, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[9],(hsize_t)23, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[10],(hsize_t)27, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[11],(hsize_t)28, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[12],(hsize_t)32, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[13],(hsize_t)33, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[14],(hsize_t)37, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[15],(hsize_t)38, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[16],(hsize_t)42, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[17],(hsize_t)43, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[18],(hsize_t)47, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[19],(hsize_t)48, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[20],(hsize_t)52, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[21],(hsize_t)53, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[22],(hsize_t)57, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[23],(hsize_t)58, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[24],(hsize_t)62, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[25],(hsize_t)63, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[26],(hsize_t)67, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[27],(hsize_t)68, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[28],(hsize_t)72, "Hyperslab Coordinates",__LINE__,__FILE__);
- verify_val(coords[29],(hsize_t)73, "Hyperslab Coordinates",__LINE__,__FILE__);
-
- HDfree(coords);
-
- // Check boundaries
- reg_sp.getSelectBounds(low, high);
- verify_val(low[0],(hsize_t)2, "DataSpace::getSelectBounds",__LINE__,__FILE__);
- verify_val(high[0],(hsize_t)73, "DataSpace::getSelectBounds",__LINE__,__FILE__);
-
- /* Close region space */
- reg_sp.close();
-
- /*
- * Another test on getting the referenced region
- */
-
- // Get region
- DataSpace elm_sp = dset1.getRegion(&rbuf[1]);
-
- // Get and verify number of element points in the current selection
- hssize_t nelmspts = elm_sp.getSelectElemNpoints();
- verify_val((long)nelmspts, 10, "DataSpace::getSelectNpoints",__LINE__,__FILE__);
-
- /* Allocate space for the hyperslab blocks */
- coords = (hsize_t *)HDmalloc(nelmspts * SPACE3_RANK * sizeof(hsize_t));
-
- // Get the list of element points currently selected
- elm_sp.getSelectElemPointlist((hsize_t)0, (hsize_t)nelmspts, coords);
-
- // Verify points
- verify_val(coords[0], coord1[0][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[1], coord1[1][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[2], coord1[2][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[3], coord1[3][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[4], coord1[4][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[5], coord1[5][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[6], coord1[6][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[7], coord1[7][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[8], coord1[8][0], "Element Coordinates",__LINE__,__FILE__);
- verify_val(coords[9], coord1[9][0], "Element Coordinates",__LINE__,__FILE__);
-
- HDfree(coords);
-
- // Check boundaries
- elm_sp.getSelectBounds(low, high);
- verify_val(low[0],(hsize_t)3, "DataSpace::getSelectBounds",__LINE__,__FILE__);
- verify_val(high[0],(hsize_t)97, "DataSpace::getSelectBounds",__LINE__,__FILE__);
-
- // Close element space
- elm_sp.close();
-
- // Close resources
- sid1.close();
- dset3.close();
- dset1.close();
- file1.close();
-
- // Free memory buffers
- HDfree(wbuf);
- HDfree(rbuf);
- HDfree(dwbuf);
- HDfree(drbuf);
-
- PASSED();
+ /* Select sequence of ten points for second reference */
+ coord1[0][0] = 16;
+ coord1[1][0] = 22;
+ coord1[2][0] = 38;
+ coord1[3][0] = 41;
+ coord1[4][0] = 52;
+ coord1[5][0] = 63;
+ coord1[6][0] = 70;
+ coord1[7][0] = 89;
+ coord1[8][0] = 97;
+ coord1[9][0] = 3;
+
+ // Selects array elements to be included in the selection for sid3
+ sid3.selectElements(H5S_SELECT_SET, (size_t)POINT1_NPOINTS, (const hsize_t *)coord1);
+
+ // Get and verify the number of elements in a dataspace selection
+ nelms = sid3.getSelectNpoints();
+ verify_val(nelms, 10, "DataSet::getRefObjType",__LINE__,__FILE__);
+
+ // Store first dataset region
+ file1.reference(&wbuf[1], "/Dataset2", sid3);
+
+ // Write selection to disk
+ dset1.write(wbuf, PredType::STD_REF_DSETREG);
+
+ // Close disk dataspace, dataset, and file
+ sid1.close();
+ dset1.close();
+ sid3.close();
+ file1.close();
+
+ /*
+ * Testing various dereference functions
+ */
+
+ // Re-open the file
+ file1.openFile(FILE2, H5F_ACC_RDWR);
+
+ // Open the dataset
+ dset1 = file1.openDataSet("/Dataset1");
+
+ // Read selection from disk
+ dset1.read(rbuf, PredType::STD_REF_DSETREG);
+
+ { // Test DataSet::dereference
+ dset3.dereference(dset1, &rbuf[0], H5R_DATASET_REGION, dapl);
+
+ // Get and verify object type
+ obj_type = dset1.getRefObjType(&rbuf[0], H5R_DATASET_REGION);
+ verify_val(obj_type, H5O_TYPE_DATASET, "DataSet::getRefObjType",__LINE__,__FILE__);
+
+ // Get dataspace of dset3 the verify number of elements
+ sid1 = dset3.getSpace();
+ nelms = sid1.getSimpleExtentNpoints();
+ verify_val((long)nelms, 100, "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
+ } // End of test DataSet::dereference
+
+ { // Test DataSet constructor -by dereference
+ // Dereference dataset object by ctor, from the location where
+ // 'dset1' is located
+ DataSet newds(dset1, &rbuf[0], H5R_DATASET_REGION, dapl);
+
+ // Get dataspace of newds then verify number of elements
+ sid1 = newds.getSpace();
+ nelms = sid1.getSimpleExtentNpoints();
+ verify_val((long)nelms, 100, "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
+
+ // Close objects for this mini test
+ newds.close();
+ sid1.close();
+ } // End of test DataSet constructor -by dereference
+
+ // Read from disk
+ dset3.read(drbuf, PredType::STD_U8LE);
+
+ for(tu8 = (uint8_t *)drbuf, i = 0; i < SPACE3_DIM1; i++, tu8++)
+ verify_val(*tu8, (uint8_t)(i * 3), "DataSpace::getSimpleExtentNpoints",__LINE__,__FILE__);
+
+ /*
+ * Test getting the referenced region
+ */
+
+ // Get region
+ DataSpace reg_sp = dset1.getRegion(&rbuf[0]);
+
+ // Get and verify number of elements in a dataspace selection
+ nelms = reg_sp.getSelectNpoints();
+ verify_val((long)nelms, 30, "DataSpace::getSelectNpoints",__LINE__,__FILE__);
+
+ // Get and verify number of hyperslab blocks
+ nelms = reg_sp.getSelectHyperNblocks();
+ verify_val((long)nelms, 15, "DataSpace::getSelectNpoints",__LINE__,__FILE__);
+
+ /* Allocate space for the hyperslab blocks */
+ coords = (hsize_t *)HDmalloc(nelms * SPACE3_RANK * sizeof(hsize_t) * 2);
+
+ // Get the list of hyperslab blocks currently selected
+ reg_sp.getSelectHyperBlocklist((hsize_t)0, (hsize_t)nelms, coords);
+
+ // Verify values in the list
+ verify_val(coords[0], (hsize_t)2, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[1], (hsize_t)3, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[2], (hsize_t)7, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[3], (hsize_t)8, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[4],(hsize_t)12, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[5],(hsize_t)13, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[6],(hsize_t)17, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[7],(hsize_t)18, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[8],(hsize_t)22, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[9],(hsize_t)23, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[10],(hsize_t)27, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[11],(hsize_t)28, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[12],(hsize_t)32, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[13],(hsize_t)33, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[14],(hsize_t)37, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[15],(hsize_t)38, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[16],(hsize_t)42, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[17],(hsize_t)43, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[18],(hsize_t)47, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[19],(hsize_t)48, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[20],(hsize_t)52, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[21],(hsize_t)53, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[22],(hsize_t)57, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[23],(hsize_t)58, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[24],(hsize_t)62, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[25],(hsize_t)63, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[26],(hsize_t)67, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[27],(hsize_t)68, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[28],(hsize_t)72, "Hyperslab Coordinates",__LINE__,__FILE__);
+ verify_val(coords[29],(hsize_t)73, "Hyperslab Coordinates",__LINE__,__FILE__);
+
+ HDfree(coords);
+
+ // Check boundaries
+ reg_sp.getSelectBounds(low, high);
+ verify_val(low[0],(hsize_t)2, "DataSpace::getSelectBounds",__LINE__,__FILE__);
+ verify_val(high[0],(hsize_t)73, "DataSpace::getSelectBounds",__LINE__,__FILE__);
+
+ /* Close region space */
+ reg_sp.close();
+
+ /*
+ * Another test on getting the referenced region
+ */
+
+ // Get region
+ DataSpace elm_sp = dset1.getRegion(&rbuf[1]);
+
+ // Get and verify number of element points in the current selection
+ hssize_t nelmspts = elm_sp.getSelectElemNpoints();
+ verify_val((long)nelmspts, 10, "DataSpace::getSelectNpoints",__LINE__,__FILE__);
+
+ /* Allocate space for the hyperslab blocks */
+ coords = (hsize_t *)HDmalloc(nelmspts * SPACE3_RANK * sizeof(hsize_t));
+
+ // Get the list of element points currently selected
+ elm_sp.getSelectElemPointlist((hsize_t)0, (hsize_t)nelmspts, coords);
+
+ // Verify points
+ verify_val(coords[0], coord1[0][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[1], coord1[1][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[2], coord1[2][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[3], coord1[3][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[4], coord1[4][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[5], coord1[5][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[6], coord1[6][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[7], coord1[7][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[8], coord1[8][0], "Element Coordinates",__LINE__,__FILE__);
+ verify_val(coords[9], coord1[9][0], "Element Coordinates",__LINE__,__FILE__);
+
+ HDfree(coords);
+
+ // Check boundaries
+ elm_sp.getSelectBounds(low, high);
+ verify_val(low[0],(hsize_t)3, "DataSpace::getSelectBounds",__LINE__,__FILE__);
+ verify_val(high[0],(hsize_t)97, "DataSpace::getSelectBounds",__LINE__,__FILE__);
+
+ // Close element space
+ elm_sp.close();
+
+ // Close resources
+ sid1.close();
+ dset3.close();
+ dset1.close();
+ file1.close();
+
+ // Free memory buffers
+ HDfree(wbuf);
+ HDfree(rbuf);
+ HDfree(dwbuf);
+ HDfree(drbuf);
+
+ PASSED();
} // end try
catch (Exception& E)
{
- issue_fail_msg("test_reference_region_1D()",__LINE__,__FILE__,
- E.getCFuncName(), E.getCDetailMsg());
+ issue_fail_msg("test_reference_region_1D()",__LINE__,__FILE__,
+ E.getCFuncName(), E.getCDetailMsg());
}
} /* test_reference_region_1D() */
@@ -823,9 +821,9 @@ void test_reference(void)
/****************************************************************
-** Function: cleanup_reference
-** Purpose: Cleanup temporary test files
-** Return: none
+** Function: cleanup_reference
+** Purpose Cleanup temporary test files
+** Return none
****************************************************************/
extern "C"
void cleanup_reference(void)
diff --git a/c++/test/ttypes.cpp b/c++/test/ttypes.cpp
index 1ef7bdd..f76f780 100644
--- a/c++/test/ttypes.cpp
+++ b/c++/test/ttypes.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -37,7 +35,7 @@ using namespace H5;
* Offset from alinged memory returned by malloc(). This can be used to test
* that type conversions handle non-aligned buffers correctly.
*/
-#define ALIGNMENT 1
+#define ALIGNMENT 1
/*
* Define if you want to test alignment code on a machine that doesn't
@@ -70,7 +68,7 @@ const char *FILENAME[] = {
* endian. If local variable `endian' is H5T_ORDER_BE then the result will
* be I, otherwise the result will be Z-(I+1).
*/
-#define ENDIAN(Z,I) (H5T_ORDER_BE==endian?(I):(Z)-((I)+1))
+#define ENDIAN(Z,I) (H5T_ORDER_BE==endian?(I):(Z)-((I)+1))
typedef enum flt_t {
@@ -86,12 +84,12 @@ typedef enum int_t {
/*-------------------------------------------------------------------------
* Function: test_classes
*
- * Purpose: Test type classes
+ * Purpose Test type classes
*
- * Return: None.
+ * Return None.
*
- * Programmer: Binh-Minh Ribler (using C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (using C version)
+ * January, 2007
*
* Modifications:
*
@@ -101,38 +99,38 @@ static void test_classes()
{
SUBTEST("PredType::getClass()");
try {
- // maybe later, int curr_nerrors = GetTestNumErrs();
-
- // PredType::NATIVE_INT should be in H5T_INTEGER class
- H5T_class_t tcls = PredType::NATIVE_INT.getClass();
- if (H5T_INTEGER!=tcls) {
- puts(" Invalid type class for H5T_NATIVE_INT");
- }
-
- // PredType::NATIVE_DOUBLE should be in H5T_FLOAT class
- tcls = PredType::NATIVE_DOUBLE.getClass();
- if (H5T_FLOAT!=tcls) {
- verify_val(tcls, H5T_FLOAT, "test_class: invalid type class for NATIVE_DOUBLE -", __LINE__, __FILE__);
- }
- PASSED();
+ // maybe later, int curr_nerrors = GetTestNumErrs();
+
+ // PredType::NATIVE_INT should be in H5T_INTEGER class
+ H5T_class_t tcls = PredType::NATIVE_INT.getClass();
+ if (H5T_INTEGER!=tcls) {
+ puts(" Invalid type class for H5T_NATIVE_INT");
+ }
+
+ // PredType::NATIVE_DOUBLE should be in H5T_FLOAT class
+ tcls = PredType::NATIVE_DOUBLE.getClass();
+ if (H5T_FLOAT!=tcls) {
+ verify_val(tcls, H5T_FLOAT, "test_class: invalid type class for NATIVE_DOUBLE -", __LINE__, __FILE__);
+ }
+ PASSED();
} // end of try block
catch (Exception& E)
{
- issue_fail_msg("test_classes", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_classes", __LINE__, __FILE__, E.getCDetailMsg());
}
}
/*-------------------------------------------------------------------------
* Function: test_copy
*
- * Purpose: Test datatype copy functionality
+ * Purpose Test datatype copy functionality
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: number of errors
+ * Failure: number of errors
*
- * Programmer: Binh-Minh Ribler (using C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (using C version)
+ * January, 2007
*
* Modifications:
*
@@ -143,21 +141,21 @@ static void test_copy()
SUBTEST("DataType::copy() and DataType::operator=");
try {
- // Test copying from a predefined datatype using DataType::operator=
- DataType assigned_type;
- assigned_type = PredType::NATIVE_SHORT;
+ // Test copying from a predefined datatype using DataType::operator=
+ DataType assigned_type;
+ assigned_type = PredType::NATIVE_SHORT;
// Test copying a predefined type using DataType::copy
- DataType copied_type;
+ DataType copied_type;
copied_type.copy (PredType::STD_B8LE);
- // Test copying a user-defined type using DataType::operator=
- DataType assigned_usertype;
- assigned_usertype = copied_type;
+ // Test copying a user-defined type using DataType::operator=
+ DataType assigned_usertype;
+ assigned_usertype = copied_type;
- // Test copying from a user-defined datatype using DataType::copy
- DataType copied_usertype;
- copied_usertype.copy(copied_type);
+ // Test copying from a user-defined datatype using DataType::copy
+ DataType copied_usertype;
+ copied_usertype.copy(copied_type);
// Test copying a user-defined int type using DataType::operator=
IntType orig_int(PredType::STD_B8LE);
@@ -171,26 +169,26 @@ static void test_copy()
IntType another_int_type;
another_int_type = new_int_type;
- PASSED();
+ PASSED();
}
catch (Exception& E)
{
- issue_fail_msg("test_copy", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_copy", __LINE__, __FILE__, E.getCDetailMsg());
}
}
/*-------------------------------------------------------------------------
- * Function: test_query
+ * Function: test_query
*
- * Purpose: Tests query functions of compound and enumeration types.
+ * Purpose Tests query functions of compound and enumeration types.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: number of errors
+ * Failure: number of errors
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
* Modifications:
*
@@ -203,106 +201,128 @@ const H5std_string EnumT_NAME("Enum_type");
static void test_query()
{
typedef struct {
- int a;
- float b;
- long c;
- double d;
+ int a;
+ float b;
+ long c;
+ double d;
} src_typ_t;
- short enum_val;
+ short enum_val;
// Output message about test being performed
SUBTEST("Query functions of compound and enumeration types");
try
{
- // Create File
- H5File file(FILENAME[2], H5F_ACC_TRUNC);
-
- // Create a compound datatype
- CompType tid1(sizeof(src_typ_t));
-
- tid1.insertMember("a", HOFFSET(src_typ_t, a), PredType::NATIVE_INT);
- tid1.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_FLOAT);
- tid1.insertMember("c", HOFFSET(src_typ_t, c), PredType::NATIVE_LONG);
- tid1.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_DOUBLE);
-
- // Create a enumerate datatype
- EnumType tid2(sizeof(short));
-
- tid2.insert("RED", (enum_val=0,&enum_val));
- tid2.insert("GREEN", (enum_val=1,&enum_val));
- tid2.insert("BLUE", (enum_val=2,&enum_val));
- tid2.insert("ORANGE", (enum_val=3,&enum_val));
- tid2.insert("YELLOW", (enum_val=4,&enum_val));
-
- // Query member number and member index by name, for compound type
- int nmembs = tid1.getNmembers();
- verify_val(nmembs, 4, "CompType::getNmembers()", __LINE__, __FILE__);
-
- int index = tid1.getMemberIndex("c");
- verify_val(index, 2, "CompType::getMemberIndex()", __LINE__, __FILE__);
-
- // Query member number and member index by name, for enumeration type.
- nmembs = tid2.getNmembers();
- verify_val(nmembs, 5, "EnumType::getNmembers()", __LINE__, __FILE__);
-
- index = tid2.getMemberIndex("ORANGE");
- verify_val(index, 3, "EnumType::getMemberIndex()", __LINE__, __FILE__);
-
- // Commit compound datatype and close it
- tid1.commit(file, CompT_NAME);
- tid1.close();
-
- // Commit enumeration datatype and close it
- tid2.commit(file, EnumT_NAME);
- tid2.close();
-
- // Open the datatypes for query
- tid1 = file.openCompType(CompT_NAME);
- tid2 = file.openEnumType(EnumT_NAME);
-
- // Query member number and member index by name, for compound type
- nmembs = tid1.getNmembers();
- verify_val(nmembs, 4, "CompType::getNmembers()", __LINE__, __FILE__);
- index = tid1.getMemberIndex("c");
- verify_val(index, 2, "CompType::getMemberIndex()", __LINE__, __FILE__);
-
- // Query member number and member index by name, for enumeration type
- nmembs = tid2.getNmembers();
- verify_val(nmembs, 5, "EnumType::getNmembers()", __LINE__, __FILE__);
- index = tid2.getMemberIndex("ORANGE");
- verify_val(index, 3, "EnumType::getMemberIndex()", __LINE__, __FILE__);
-
- // Close datatypes and file
- tid1.close();
- tid2.close();
- file.close();
-
- // Try truncating the file to make sure reference counting is good.
- // If any references to ids of tid1 and tid2 are left unterminated,
- // the truncating will fail, because the file will not be closed in
- // the file.close() above.
- H5File file1(FILENAME[2], H5F_ACC_TRUNC);
-
- PASSED();
+ // Create File
+ H5File file(FILENAME[2], H5F_ACC_TRUNC);
+
+ // Create a compound datatype
+ CompType tid1(sizeof(src_typ_t));
+
+ tid1.insertMember("a", HOFFSET(src_typ_t, a), PredType::NATIVE_INT);
+ tid1.insertMember("b", HOFFSET(src_typ_t, b), PredType::NATIVE_FLOAT);
+ tid1.insertMember("c", HOFFSET(src_typ_t, c), PredType::NATIVE_LONG);
+ tid1.insertMember("d", HOFFSET(src_typ_t, d), PredType::NATIVE_DOUBLE);
+
+ // Create a enumerate datatype
+ EnumType tid2(sizeof(short));
+
+ tid2.insert("RED", (enum_val=0,&enum_val));
+ tid2.insert("GREEN", (enum_val=1,&enum_val));
+ tid2.insert("BLUE", (enum_val=2,&enum_val));
+ tid2.insert("ORANGE", (enum_val=3,&enum_val));
+ tid2.insert("YELLOW", (enum_val=4,&enum_val));
+
+ // Query member number and member index by name, for compound type
+ int nmembs = tid1.getNmembers();
+ verify_val(nmembs, 4, "CompType::getNmembers()", __LINE__, __FILE__);
+
+ int index = tid1.getMemberIndex("c");
+ verify_val(index, 2, "CompType::getMemberIndex()", __LINE__, __FILE__);
+
+ // Query member number and member index by name, for enumeration type.
+ nmembs = tid2.getNmembers();
+ verify_val(nmembs, 5, "EnumType::getNmembers()", __LINE__, __FILE__);
+
+ index = tid2.getMemberIndex("ORANGE");
+ verify_val(index, 3, "EnumType::getMemberIndex()", __LINE__, __FILE__);
+
+ // Commit compound datatype, and test getting the datatype creation
+ // prop list, then close it
+ tid1.commit(file, CompT_NAME);
+ PropList tcpl = tid1.getCreatePlist();
+ if (!IdComponent::isValid(tcpl.getId()))
+ {
+ // Throw an invalid action exception
+ throw InvalidActionException("H5Object::createAttribute", "Datatype creation property list is not valid");
+ }
+ tcpl.close();
+ tid1.close();
+
+ // Commit enumeration datatype, and test getting the datatype creation
+ // prop list, then close it
+ tid2.commit(file, EnumT_NAME);
+ tcpl = tid2.getCreatePlist();
+ if (!IdComponent::isValid(tcpl.getId()))
+ {
+ // Throw an invalid action exception
+ throw InvalidActionException("H5Object::createAttribute", "Datatype creation property list is not valid");
+ }
+ tcpl.close();
+ tid2.close();
+
+ // Open the datatypes for query. Testing both ways
+
+ tid1 = file.openCompType(CompT_NAME);
+ tid1.close();
+ tid2 = file.openEnumType(EnumT_NAME);
+ tid2.close();
+
+ CompType comptype(file, CompT_NAME);
+ EnumType enumtype(file, EnumT_NAME);
+
+ // Query member number and member index by name, for compound type
+ nmembs = comptype.getNmembers();
+ verify_val(nmembs, 4, "CompType::getNmembers()", __LINE__, __FILE__);
+ index = comptype.getMemberIndex("c");
+ verify_val(index, 2, "CompType::getMemberIndex()", __LINE__, __FILE__);
+
+ // Query member number and member index by name, for enumeration type
+ nmembs = enumtype.getNmembers();
+ verify_val(nmembs, 5, "EnumType::getNmembers()", __LINE__, __FILE__);
+ index = enumtype.getMemberIndex("ORANGE");
+ verify_val(index, 3, "EnumType::getMemberIndex()", __LINE__, __FILE__);
+
+ // Close datatypes and file
+ comptype.close();
+ enumtype.close();
+ file.close();
+
+ // Try truncating the file to make sure reference counting is good.
+ // If any references to ids of the accessed types are left unterminated,
+ // the truncating will fail, because the file will not be closed in
+ // the file.close() above.
+ H5File file1(FILENAME[2], H5F_ACC_TRUNC);
+
+ PASSED();
} // end of try block
catch (Exception& E)
{
- issue_fail_msg("test_query", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_query", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_query
/*-------------------------------------------------------------------------
- * Function: test_transient
+ * Function: test_transient
*
- * Purpose: Tests transient datatypes.
+ * Purpose Tests transient datatypes.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: number of errors
+ * Failure: number of errors
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
* Modifications:
*
@@ -311,58 +331,58 @@ static void test_query()
const char* filename1 = "dtypes1.h5";
static void test_transient ()
{
- static hsize_t ds_size[2] = {10, 20};
+ static hsize_t ds_size[2] = {10, 20};
SUBTEST("Transient datatypes");
try {
- // Create the file and the dataspace.
- H5File file(filename1, H5F_ACC_TRUNC);
- DataSpace space(2, ds_size, ds_size);
-
- // Copying a predefined type results in a modifiable copy
- IntType type(PredType::NATIVE_INT);
- type.setPrecision(256);
-
- // It should not be possible to create an attribute for a transient type
- try {
- Attribute attr(type.createAttribute("attr1", PredType::NATIVE_INT, space));
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("H5Object::createAttribute", "Attempted to commit a predefined datatype.");
- } catch (AttributeIException& err) {} // do nothing, failure expected
-
- // Create a dataset from a transient datatype
- // type.close(); - put trace in H5Tclose to make sure it's closed
- type.copy(PredType::NATIVE_INT);
- DataSet dset(file.createDataSet("dset1", type, space));
-
- // The type returned from a dataset should not be modifiable
- IntType itype(dset);
- try {
- itype.setPrecision(256);
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("PredType::setPrecision", "Dataset datatypes should not be modifiable!");
- } catch (DataTypeIException& err) {}
- itype.close();
-
- // Get a copy of the dataset's datatype by applying DataType::copy()
- // to the dataset. The resulted datatype should be modifiable.
- itype.copy(dset);
- itype.setPrecision(256);
- itype.close();
-
- // Close the dataset and reopen it, testing that its type is still
- // read-only. (Note that a copy of it is modifiable.)
- dset.close();
- dset = file.openDataSet("dset1");
-
- // Close objects and file.
- dset.close();
- file.close();
- type.close();
- space.close();
- PASSED();
+ // Create the file and the dataspace.
+ H5File file(filename1, H5F_ACC_TRUNC);
+ DataSpace space(2, ds_size, ds_size);
+
+ // Copying a predefined type results in a modifiable copy
+ IntType type(PredType::NATIVE_INT);
+ type.setPrecision(256);
+
+ // It should not be possible to create an attribute for a transient type
+ try {
+ Attribute attr(type.createAttribute("attr1", PredType::NATIVE_INT, space));
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("H5Object::createAttribute", "Attempted to commit a predefined datatype.");
+ } catch (AttributeIException& err) {} // do nothing, failure expected
+
+ // Create a dataset from a transient datatype
+ // type.close(); - put trace in H5Tclose to make sure it's closed
+ type.copy(PredType::NATIVE_INT);
+ DataSet dset(file.createDataSet("dset1", type, space));
+
+ // The type returned from a dataset should not be modifiable
+ IntType itype(dset);
+ try {
+ itype.setPrecision(256);
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("PredType::setPrecision", "Dataset datatypes should not be modifiable!");
+ } catch (DataTypeIException& err) {}
+ itype.close();
+
+ // Get a copy of the dataset's datatype by applying DataType::copy()
+ // to the dataset. The resulted datatype should be modifiable.
+ itype.copy(dset);
+ itype.setPrecision(256);
+ itype.close();
+
+ // Close the dataset and reopen it, testing that its type is still
+ // read-only. (Note that a copy of it is modifiable.)
+ dset.close();
+ dset = file.openDataSet("dset1");
+
+ // Close objects and file.
+ dset.close();
+ file.close();
+ type.close();
+ space.close();
+ PASSED();
} // end of try block
catch (Exception& E)
{
@@ -372,16 +392,16 @@ static void test_transient ()
/*-------------------------------------------------------------------------
- * Function: test_named
+ * Function: test_named
*
- * Purpose: Tests named datatypes.
+ * Purpose Tests named datatypes.
*
- * Return: Success: 0
+ * Return Success: 0
*
- * Failure: number of errors
+ * Failure: number of errors
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
* Modifications:
*
@@ -390,141 +410,141 @@ static void test_transient ()
const H5std_string filename2("dtypes2.h5");
static void test_named ()
{
- static hsize_t ds_size[2] = {10, 20};
- hsize_t i;
- unsigned attr_data[10][20];
- DataType *ds_type = NULL;
+ static hsize_t ds_size[2] = {10, 20};
+ hsize_t i;
+ unsigned attr_data[10][20];
+ DataType *ds_type = NULL;
SUBTEST("Named datatypes");
try {
- // Create the file.
- H5File file(filename2, H5F_ACC_TRUNC);
+ // Create the file.
+ H5File file(filename2, H5F_ACC_TRUNC);
- // Create a simple dataspace.
- DataSpace space(2, ds_size, ds_size);
+ // Create a simple dataspace.
+ DataSpace space(2, ds_size, ds_size);
- // Predefined types cannot be committed.
- try {
- PredType nativeint(PredType::NATIVE_INT);
- nativeint.commit(file, "test_named_1 (should not exist)");
+ // Predefined types cannot be committed.
+ try {
+ PredType nativeint(PredType::NATIVE_INT);
+ nativeint.commit(file, "test_named_1 (should not exist)");
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("PredType::commit", "Attempted to commit a predefined datatype.");
- } catch (DataTypeIException& err) {}
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("PredType::commit", "Attempted to commit a predefined datatype.");
+ } catch (DataTypeIException& err) {}
- // Copy a predefined datatype and commit the copy.
+ // Copy a predefined datatype and commit the copy.
IntType itype(PredType::NATIVE_INT);
itype.commit(file, "native-int");
- // Test commit passing in const H5File& for prototype with const
- try
- {
- // Create random char type
- IntType atype(PredType::NATIVE_UCHAR);
+ // Test commit passing in const H5File& for prototype with const
+ try
+ {
+ // Create random char type
+ IntType atype(PredType::NATIVE_UCHAR);
- // Creating group, declared as const
- const Group const_grp = file.createGroup("GR as loc");
+ // Creating group, declared as const
+ const Group const_grp = file.createGroup("GR as loc");
- // Commit type passing in const group; compilation would fail if
- // no matching prototype
- atype.commit(const_grp, "random uchar");
- } // end of try block
- catch (Exception& E)
+ // Commit type passing in const group; compilation would fail if
+ // no matching prototype
+ atype.commit(const_grp, "random uchar");
+ } // end of try block
+ catch (Exception& E)
{
- issue_fail_msg("test_named", __LINE__, __FILE__, "Commit at const group");
- }
-
- // Check that it is committed.
- if (itype.committed() == false)
- cerr << "IntType::committed() returned false" << endl;
-
- // We should not be able to modify a type after it has been committed.
- try {
- itype.setPrecision(256); // attempt an invalid action...
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("IntType::setPrecision", "Attempted to modify a committed datatype.");
- } catch (DataTypeIException& err) {}
-
- // We should not be able to re-commit a committed type
- try {
- itype.commit(file, "test_named_2 (should not exist)");
-
- // Should FAIL but didn't, so throw an invalid action exception
- throw InvalidActionException("IntType::commit", "Attempted to re-commit a committed datatype.");
- } catch (DataTypeIException& err) {} // do nothing, failure expected
-
- // It should be possible to define an attribute for the named type
- Attribute attr1 = itype.createAttribute("attr1", PredType::NATIVE_UCHAR, space);
- for (i=0; i<ds_size[0]*ds_size[1]; i++)
- attr_data[0][i] = (int)i;/*tricky*/
- attr1.write(PredType::NATIVE_UINT, attr_data);
- attr1.close();
-
- // Copying a committed type should result in a transient type which is
- // not locked.
- IntType trans_type;
- trans_type.copy(itype);
- bool iscommitted = trans_type.committed();
- verify_val(iscommitted, 0, "DataType::committed() - Copying a named type should result in a transient type!", __LINE__, __FILE__);
- trans_type.setPrecision(256);
- trans_type.close();
-
- // Close the committed type and reopen it. It should be a named type.
- itype.close();
- itype = file.openIntType("native-int");
- iscommitted = itype.committed();
- if (!iscommitted)
- throw InvalidActionException("IntType::committed()", "Opened named types should be named types!");
-
- // Create a dataset that uses the named type, then get the dataset's
- // datatype and make sure it's a named type.
- DataSet dset = file.createDataSet("dset1", itype, space);
- ds_type = new DataType(dset.getDataType());
- iscommitted = ds_type->committed();
- if (!iscommitted)
- throw InvalidActionException("IntType::committed()", "Dataset type should be named type!");
- dset.close();
- ds_type->close();
+ issue_fail_msg("test_named", __LINE__, __FILE__, "Commit at const group");
+ }
+
+ // Check that it is committed.
+ if (itype.committed() == false)
+ cerr << "IntType::committed() returned false" << endl;
+
+ // We should not be able to modify a type after it has been committed.
+ try {
+ itype.setPrecision(256); // attempt an invalid action...
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("IntType::setPrecision", "Attempted to modify a committed datatype.");
+ } catch (DataTypeIException& err) {}
+
+ // We should not be able to re-commit a committed type
+ try {
+ itype.commit(file, "test_named_2 (should not exist)");
+
+ // Should FAIL but didn't, so throw an invalid action exception
+ throw InvalidActionException("IntType::commit", "Attempted to re-commit a committed datatype.");
+ } catch (DataTypeIException& err) {} // do nothing, failure expected
+
+ // It should be possible to define an attribute for the named type
+ Attribute attr1 = itype.createAttribute("attr1", PredType::NATIVE_UCHAR, space);
+ for (i=0; i<ds_size[0]*ds_size[1]; i++)
+ attr_data[0][i] = (int)i;/*tricky*/
+ attr1.write(PredType::NATIVE_UINT, attr_data);
+ attr1.close();
+
+ // Copying a committed type should result in a transient type which is
+ // not locked.
+ IntType trans_type;
+ trans_type.copy(itype);
+ bool iscommitted = trans_type.committed();
+ verify_val(iscommitted, 0, "DataType::committed() - Copying a named type should result in a transient type!", __LINE__, __FILE__);
+ trans_type.setPrecision(256);
+ trans_type.close();
+
+ // Close the committed type and reopen it. It should be a named type.
+ itype.close();
+ itype = file.openIntType("native-int");
+ iscommitted = itype.committed();
+ if (!iscommitted)
+ throw InvalidActionException("IntType::committed()", "Opened named types should be named types!");
+
+ // Create a dataset that uses the named type, then get the dataset's
+ // datatype and make sure it's a named type.
+ DataSet dset = file.createDataSet("dset1", itype, space);
+ ds_type = new DataType(dset.getDataType());
+ iscommitted = ds_type->committed();
+ if (!iscommitted)
+ throw InvalidActionException("IntType::committed()", "Dataset type should be named type!");
+ dset.close();
+ ds_type->close();
delete ds_type;
- // Reopen the dataset and its type, then make sure the type is
- // a named type.
- dset = file.openDataSet("dset1");
- ds_type = new DataType(dset.getDataType());
- iscommitted = ds_type->committed();
- if (!iscommitted)
- throw InvalidActionException("IntType::committed()", "Dataset type should be named type!");
-
- // Close the dataset and create another with the type returned from
- // the first dataset.
- dset.close();
- dset = file.createDataSet("dset2", *ds_type, space);
- ds_type->close();
- dset.close();
+ // Reopen the dataset and its type, then make sure the type is
+ // a named type.
+ dset = file.openDataSet("dset1");
+ ds_type = new DataType(dset.getDataType());
+ iscommitted = ds_type->committed();
+ if (!iscommitted)
+ throw InvalidActionException("IntType::committed()", "Dataset type should be named type!");
+
+ // Close the dataset and create another with the type returned from
+ // the first dataset.
+ dset.close();
+ dset = file.createDataSet("dset2", *ds_type, space);
+ ds_type->close();
+ dset.close();
delete ds_type;
- // Reopen the second dataset and make sure the type is shared
- dset = file.openDataSet("dset2");
- ds_type = new DataType(dset.getDataType());
- iscommitted = ds_type->committed();
- if (!iscommitted)
- throw InvalidActionException("DataType::iscommitted()", "Dataset type should be named type!");
- ds_type->close();
-
- // Get the dataset datatype by applying DataType::copy() to the
- // dataset. The resulted datatype should be modifiable.
- IntType copied_type;
- copied_type.copy(dset);
- copied_type.setPrecision(256);
- copied_type.close();
-
- // Clean up
- dset.close();
- itype.close();
- space.close();
- file.close();
- PASSED();
+ // Reopen the second dataset and make sure the type is shared
+ dset = file.openDataSet("dset2");
+ ds_type = new DataType(dset.getDataType());
+ iscommitted = ds_type->committed();
+ if (!iscommitted)
+ throw InvalidActionException("DataType::iscommitted()", "Dataset type should be named type!");
+ ds_type->close();
+
+ // Get the dataset datatype by applying DataType::copy() to the
+ // dataset. The resulted datatype should be modifiable.
+ IntType copied_type;
+ copied_type.copy(dset);
+ copied_type.setPrecision(256);
+ copied_type.close();
+
+ // Clean up
+ dset.close();
+ itype.close();
+ space.close();
+ file.close();
+ PASSED();
} // end of try block
catch (Exception& E)
{
@@ -558,14 +578,14 @@ void test_types()
/*-------------------------------------------------------------------------
- * Function: cleanup_types
+ * Function: cleanup_types
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: Quincey Koziol
- * September 10, 1999
+ * Programmer Quincey Koziol
+ * September 10, 1999
*
* Modifications:
*
@@ -575,5 +595,5 @@ extern "C"
void cleanup_types()
{
for (int i = 0; i < 3; i++)
- HDremove(FILENAME[i]);
+ HDremove(FILENAME[i]);
} // cleanup_types
diff --git a/c++/test/tvlstr.cpp b/c++/test/tvlstr.cpp
index d39d092..d5fea07 100644
--- a/c++/test/tvlstr.cpp
+++ b/c++/test/tvlstr.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
@@ -45,9 +43,9 @@ const hsize_t SPACE1_DIM1 = 4;
/****************************************************************
**
** test_vlstr_alloc_custom(): Test VL datatype custom memory
-** allocation routines. This routine just uses malloc to
-** allocate the memory and increments the amount of memory
-** allocated. It is passed into setVlenMemManager.
+** allocation routines. This routine just uses malloc to
+** allocate the memory and increments the amount of memory
+** allocated. It is passed into setVlenMemManager.
**
** Note: exact copy from the C version.
** (Not used now)
@@ -55,9 +53,9 @@ const hsize_t SPACE1_DIM1 = 4;
#if 0 // not used now
static void *test_vlstr_alloc_custom(size_t size, void *info)
{
- void *ret_value=NULL; // Pointer to return
+ void *ret_value=NULL; // Pointer to return
size_t *mem_used=(size_t *)info; // Get the pointer to the memory used
- size_t extra; // Extra space needed
+ size_t extra; // Extra space needed
/*
* This weird contortion is required on the DEC Alpha to keep the
@@ -67,8 +65,8 @@ static void *test_vlstr_alloc_custom(size_t size, void *info)
extra=MAX(sizeof(void *),sizeof(size_t));
if((ret_value=HDmalloc(extra+size))!=NULL) {
- *(size_t *)ret_value=size;
- *mem_used+=size;
+ *(size_t *)ret_value=size;
+ *mem_used+=size;
} // end if
ret_value = ((unsigned char *)ret_value) + extra;
@@ -79,9 +77,9 @@ static void *test_vlstr_alloc_custom(size_t size, void *info)
/****************************************************************
**
** test_vlstr_free_custom(): Test VL datatype custom memory
-** allocation routines. This routine just uses free to
-** release the memory and decrements the amount of memory
-** allocated. It is passed into setVlenMemManager.
+** allocation routines. This routine just uses free to
+** release the memory and decrements the amount of memory
+** allocated. It is passed into setVlenMemManager.
**
** Note: exact copy from the C version.
** (Not used now)
@@ -91,7 +89,7 @@ static void test_vlstr_free_custom(void *_mem, void *info)
{
unsigned char *mem;
size_t *mem_used=(size_t *)info; // Get the pointer to the memory used
- size_t extra; // Extra space needed
+ size_t extra; // Extra space needed
/*
* This weird contortion is required on the DEC Alpha to keep the
@@ -109,14 +107,14 @@ static void test_vlstr_free_custom(void *_mem, void *info)
#endif
/*-------------------------------------------------------------------------
- * Function: test_vlstring_dataset
+ * Function: test_vlstring_dataset
*
- * Purpose: Test writing/reading VL strings on datasets.
+ * Purpose Test writing/reading VL strings on datasets.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
*-------------------------------------------------------------------------
*/
@@ -136,89 +134,89 @@ static void test_vlstring_dataset()
SUBTEST("VL String on Datasets");
try {
- // Open the file
- H5File file1(FILENAME, H5F_ACC_TRUNC);
+ // Open the file
+ H5File file1(FILENAME, H5F_ACC_TRUNC);
- // Create a datatype to refer to.
- StrType vlst(0, H5T_VARIABLE);
+ // Create a datatype to refer to.
+ StrType vlst(0, H5T_VARIABLE);
- // Open the root group.
- Group root = file1.openGroup("/");
+ // Open the root group.
+ Group root = file1.openGroup("/");
- // Create dataspace for the dataset.
- DataSpace ds_space (H5S_SCALAR);
+ // Create dataspace for the dataset.
+ DataSpace ds_space (H5S_SCALAR);
- // Create an dataset in the root group.
- DataSet dset1 = root.createDataSet(DSET1_NAME, vlst, ds_space);
+ // Create an dataset in the root group.
+ DataSet dset1 = root.createDataSet(DSET1_NAME, vlst, ds_space);
- // Write data to the dataset.
- dset1.write(DSET1_DATA, vlst);
+ // Write data to the dataset.
+ dset1.write(DSET1_DATA, vlst);
- // Read and verify the dataset string as a string of chars.
- dset1.read(&string_ds_check, vlst);
- if(HDstrcmp(string_ds_check, DSET1_DATA.c_str())!=0)
- TestErrPrintf("Line %d: Attribute data different: DSET1_DATA=%s,string_ds_check=%s\n",__LINE__, DSET1_DATA.c_str(), string_ds_check);
+ // Read and verify the dataset string as a string of chars.
+ dset1.read(&string_ds_check, vlst);
+ if(HDstrcmp(string_ds_check, DSET1_DATA.c_str())!=0)
+ TestErrPrintf("Line %d: Attribute data different: DSET1_DATA=%s,string_ds_check=%s\n",__LINE__, DSET1_DATA.c_str(), string_ds_check);
- HDfree(string_ds_check); // note: no need for std::string test
+ HDfree(string_ds_check); // note: no need for std::string test
string_ds_check = NULL;
- // Read and verify the dataset string as an std::string.
- H5std_string read_str;
- dset1.read(read_str, vlst);
- if (read_str != DSET1_DATA)
- TestErrPrintf("Line %d: Attribute data different: DSET1_DATA=%s,read_str=%s\n",__LINE__, DSET1_DATA.c_str(), read_str.c_str());
+ // Read and verify the dataset string as an std::string.
+ H5std_string read_str;
+ dset1.read(read_str, vlst);
+ if (read_str != DSET1_DATA)
+ TestErrPrintf("Line %d: Attribute data different: DSET1_DATA=%s,read_str=%s\n",__LINE__, DSET1_DATA.c_str(), read_str.c_str());
- // Close the dataset.
- dset1.close();
+ // Close the dataset.
+ dset1.close();
- // Test scalar type dataset with 1 value.
- dset1 = root.createDataSet("test_scalar_small", vlst, ds_space);
+ // Test scalar type dataset with 1 value.
+ dset1 = root.createDataSet("test_scalar_small", vlst, ds_space);
- dynstring_ds_write = (char*)HDcalloc(1, sizeof(char));
- HDmemset(dynstring_ds_write, 'A', 1);
+ dynstring_ds_write = (char*)HDcalloc(1, sizeof(char));
+ HDmemset(dynstring_ds_write, 'A', 1);
- // Write data to the dataset, then read it back.
- dset1.write(&dynstring_ds_write, vlst);
- dset1.read(&string_ds_check, vlst);
+ // Write data to the dataset, then read it back.
+ dset1.write(&dynstring_ds_write, vlst);
+ dset1.read(&string_ds_check, vlst);
- // Verify data read.
- if(HDstrcmp(string_ds_check,dynstring_ds_write)!=0)
- TestErrPrintf("VL string datasets don't match!, dynstring_ds_write=%s, string_ds_check=%s\n",dynstring_ds_write,string_ds_check);
- HDfree(string_ds_check);
+ // Verify data read.
+ if(HDstrcmp(string_ds_check,dynstring_ds_write)!=0)
+ TestErrPrintf("VL string datasets don't match!, dynstring_ds_write=%s, string_ds_check=%s\n",dynstring_ds_write,string_ds_check);
+ HDfree(string_ds_check);
string_ds_check = NULL;
- dset1.close();
+ dset1.close();
- // Open dataset DSET1_NAME again.
- dset1 = root.openDataSet(DSET1_NAME);
+ // Open dataset DSET1_NAME again.
+ dset1 = root.openDataSet(DSET1_NAME);
- // Close dataset and file
- dset1.close();
- file1.close();
+ // Close dataset and file
+ dset1.close();
+ file1.close();
- PASSED();
+ PASSED();
} // end try block
// Catch all exceptions.
catch (Exception& E)
{
- issue_fail_msg("test_vlstring_dataset()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_vlstring_dataset()", __LINE__, __FILE__, E.getCDetailMsg());
}
if(dynstring_ds_write)
HDfree(dynstring_ds_write);
if(string_ds_check)
- HDfree(string_ds_check);
+ HDfree(string_ds_check);
} // test_vlstring_dataset()
/*-------------------------------------------------------------------------
- * Function: test_vlstring_array_dataset
+ * Function: test_vlstring_array_dataset
*
- * Purpose: Test writing/reading VL string array to/from datasets.
+ * Purpose Test writing/reading VL string array to/from datasets.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler
- * July, 2009
+ * Programmer Binh-Minh Ribler
+ * July, 2009
*
*-------------------------------------------------------------------------
*/
@@ -235,89 +233,89 @@ static void test_vlstring_array_dataset()
H5File* file1 = NULL;
try {
// Create file.
- file1 = new H5File(FILENAME, H5F_ACC_RDWR);
+ file1 = new H5File(FILENAME, H5F_ACC_RDWR);
// Create dataspace for datasets.
hsize_t dims1[] = {SPACE1_DIM1};
DataSpace ds_space(SPACE1_RANK, dims1);
- // Create a datatype to refer to.
- StrType vlst(0, H5T_VARIABLE);
-
- // Create and write a dataset.
- DataSet dataset(file1->createDataSet(DSSTRARR_NAME, vlst, ds_space));
- dataset.write(string_ds_array, vlst);
-
- // Read and verify the dataset using strings of chars as buffer.
- // Note: reading by array of H5std_string doesn't work yet.
- char *string_ds_check[SPACE1_DIM1];
- dataset.read(string_ds_check, vlst);
-
- hsize_t ii;
- for (ii = 0; ii < SPACE1_DIM1; ii++)
- {
- if(HDstrcmp(string_ds_check[ii], string_ds_array[ii])!=0)
- TestErrPrintf("Line %d: Dataset data different: written=%s,read=%s\n",__LINE__, string_ds_array[ii], string_ds_check[ii]);
-
- HDfree(string_ds_check[ii]);
- }
-
- // Close objects that are no longer needed.
- dataset.close();
- ds_space.close();
-
- //
- // Test with scalar data space.
- //
-
- // Create H5S_SCALAR data space.
- DataSpace scalar_space;
-
- // Create and write another dataset.
- DataSet dataset2(file1->createDataSet("Dataset2", vlst, scalar_space));
- char *wdata2 = (char*)HDcalloc(65534, sizeof(char));
- HDmemset(wdata2, 'A', 65533);
- dataset2.write(&wdata2, vlst);
-
- char *rdata2;
- dataset2.read(&rdata2, vlst);
- if (HDstrcmp(wdata2, rdata2)!=0)
- TestErrPrintf("Line %d: Dataset data different: written=%s,read=%s\n",__LINE__, wdata2, rdata2);
-
- // Release resources from second dataset operation.
- scalar_space.close();
- dataset2.close();
- HDfree(wdata2);
- HDfree(rdata2);
-
- // Close objects and file.
- dataset2.close();
- vlst.close();
- file1->close();
-
- PASSED();
+ // Create a datatype to refer to.
+ StrType vlst(0, H5T_VARIABLE);
+
+ // Create and write a dataset.
+ DataSet dataset(file1->createDataSet(DSSTRARR_NAME, vlst, ds_space));
+ dataset.write(string_ds_array, vlst);
+
+ // Read and verify the dataset using strings of chars as buffer.
+ // Note: reading by array of H5std_string doesn't work yet.
+ char *string_ds_check[SPACE1_DIM1];
+ dataset.read(string_ds_check, vlst);
+
+ hsize_t ii;
+ for (ii = 0; ii < SPACE1_DIM1; ii++)
+ {
+ if(HDstrcmp(string_ds_check[ii], string_ds_array[ii])!=0)
+ TestErrPrintf("Line %d: Dataset data different: written=%s,read=%s\n",__LINE__, string_ds_array[ii], string_ds_check[ii]);
+
+ HDfree(string_ds_check[ii]);
+ }
+
+ // Close objects that are no longer needed.
+ dataset.close();
+ ds_space.close();
+
+ //
+ // Test with scalar data space.
+ //
+
+ // Create H5S_SCALAR data space.
+ DataSpace scalar_space;
+
+ // Create and write another dataset.
+ DataSet dataset2(file1->createDataSet("Dataset2", vlst, scalar_space));
+ char *wdata2 = (char*)HDcalloc(65534, sizeof(char));
+ HDmemset(wdata2, 'A', 65533);
+ dataset2.write(&wdata2, vlst);
+
+ char *rdata2;
+ dataset2.read(&rdata2, vlst);
+ if (HDstrcmp(wdata2, rdata2)!=0)
+ TestErrPrintf("Line %d: Dataset data different: written=%s,read=%s\n",__LINE__, wdata2, rdata2);
+
+ // Release resources from second dataset operation.
+ scalar_space.close();
+ dataset2.close();
+ HDfree(wdata2);
+ HDfree(rdata2);
+
+ // Close objects and file.
+ dataset2.close();
+ vlst.close();
+ file1->close();
+
+ PASSED();
} // end try
// Catch all exceptions.
catch (Exception& E)
{
- issue_fail_msg("test_vlstring_array_dataset()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_vlstring_array_dataset()", __LINE__, __FILE__, E.getCDetailMsg());
}
if(file1)
- delete file1;
+ delete file1;
} // end test_vlstring_array_dataset()
/*-------------------------------------------------------------------------
- * Function: test_vlstrings_special
+ * Function: test_vlstrings_special
*
- * Purpose: Test VL string code for special string cases, nil and
- * zero-sized.
+ * Purpose Test VL string code for special string cases, nil and
+ * zero-sized.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
*-------------------------------------------------------------------------
*/
@@ -331,110 +329,113 @@ static void test_vlstrings_special()
SUBTEST("Special VL Strings");
try {
- // Create file.
- H5File file1(FILENAME, H5F_ACC_TRUNC);
+ // Create file.
+ H5File file1(FILENAME, H5F_ACC_TRUNC);
// Create dataspace for datasets.
hsize_t dims1[] = {SPACE1_DIM1};
DataSpace sid1(SPACE1_RANK, dims1);
- // Create a datatype to refer to.
- StrType vlst(0, H5T_VARIABLE);
-
- // Create a dataset.
- DataSet dataset(file1.createDataSet("Dataset3", vlst, sid1));
-
- // Read from the dataset before writing data.
- dataset.read(rdata, vlst);
-
- // Check data read in.
- hsize_t ii; // counting variable
- for (ii=0; ii<SPACE1_DIM1; ii++)
- if(rdata[ii]!=NULL)
- TestErrPrintf("VL doesn't match!, rdata[%d]=%p\n",(int)ii,rdata[ii]);
-
- // Write dataset to disk, then read it back.
- dataset.write(wdata, vlst);
- dataset.read(rdata, vlst);
-
- // Compare data read in.
- for (ii = 0; ii < SPACE1_DIM1; ii++) {
- size_t wlen = HDstrlen(wdata[ii]);
- size_t rlen = HDstrlen(rdata[ii]);
- if(wlen != rlen) {
- TestErrPrintf("VL data lengths don't match!, strlen(wdata[%d])=%u, strlen(rdata[%d])=%u\n", (int)ii, (unsigned)wlen, (int)ii, (unsigned)rlen);
- continue;
- } // end if
- if(HDstrcmp(wdata[ii],rdata[ii]) != 0) {
- TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)ii, wdata[ii], (int)ii, rdata[ii]);
- continue;
- } // end if
- } // end for
-
- // Reclaim the read VL data.
- DataSet::vlenReclaim((void *)rdata, vlst, sid1);
-
- // Close Dataset.
- dataset.close();
-
- /*
- * Create another dataset to test nil strings.
- */
-
- // Create the property list and set the fill value for the second
- // dataset.
- DSetCreatPropList dcpl;
- char *fill = NULL; // Fill value
- dcpl.setFillValue(vlst, &fill);
- dataset = file1.createDataSet("Dataset4", vlst, sid1, dcpl);
-
- // Close dataset creation property list.
- dcpl.close();
-
- // Read from dataset before writing data.
- dataset.read(rdata, vlst);
-
- // Check data read in.
- for (ii=0; ii<SPACE1_DIM1; ii++)
- if(rdata[ii]!=NULL)
- TestErrPrintf("VL doesn't match!, rdata[%d]=%p\n",(int)ii,rdata[ii]);
-
- // Try to write nil strings to disk.
- dataset.write(wdata2, vlst);
-
- // Read nil strings back from disk.
- dataset.read(rdata, vlst);
-
- // Check data read in.
- for (ii=0; ii<SPACE1_DIM1; ii++)
- if(rdata[ii]!=NULL)
- TestErrPrintf("VL doesn't match!, rdata[%d]=%p\n",(int)ii,rdata[ii]);
-
- // Close objects and file.
- dataset.close();
- vlst.close();
- sid1.close();
- file1.close();
-
- PASSED();
+ // Create a datatype to refer to.
+ StrType vlst(0, H5T_VARIABLE);
+
+ // Create a dataset.
+ DataSet dataset(file1.createDataSet("Dataset3", vlst, sid1));
+
+ // Read from the dataset before writing data.
+ dataset.read(rdata, vlst);
+
+ // Check data read in.
+ hsize_t ii; // counting variable
+ for (ii=0; ii<SPACE1_DIM1; ii++)
+ if(rdata[ii]!=NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d]=%p\n",(int)ii,rdata[ii]);
+
+ // Write dataset to disk, then read it back.
+ dataset.write(wdata, vlst);
+ dataset.read(rdata, vlst);
+
+ // Compare data read in.
+ for (ii = 0; ii < SPACE1_DIM1; ii++)
+ {
+ size_t wlen = HDstrlen(wdata[ii]);
+ size_t rlen = HDstrlen(rdata[ii]);
+ if(wlen != rlen)
+ {
+ TestErrPrintf("VL data lengths don't match!, strlen(wdata[%d])=%u, strlen(rdata[%d])=%u\n", (int)ii, (unsigned)wlen, (int)ii, (unsigned)rlen);
+ continue;
+ } // end if
+ if(HDstrcmp(wdata[ii],rdata[ii]) != 0)
+ {
+ TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n", (int)ii, wdata[ii], (int)ii, rdata[ii]);
+ continue;
+ } // end if
+ } // end for
+
+ // Reclaim the read VL data.
+ DataSet::vlenReclaim((void *)rdata, vlst, sid1);
+
+ // Close Dataset.
+ dataset.close();
+
+ /*
+ * Create another dataset to test nil strings.
+ */
+
+ // Create the property list and set the fill value for the second
+ // dataset.
+ DSetCreatPropList dcpl;
+ char *fill = NULL; // Fill value
+ dcpl.setFillValue(vlst, &fill);
+ dataset = file1.createDataSet("Dataset4", vlst, sid1, dcpl);
+
+ // Close dataset creation property list.
+ dcpl.close();
+
+ // Read from dataset before writing data.
+ dataset.read(rdata, vlst);
+
+ // Check data read in.
+ for (ii=0; ii<SPACE1_DIM1; ii++)
+ if(rdata[ii]!=NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d]=%p\n",(int)ii,rdata[ii]);
+
+ // Try to write nil strings to disk.
+ dataset.write(wdata2, vlst);
+
+ // Read nil strings back from disk.
+ dataset.read(rdata, vlst);
+
+ // Check data read in.
+ for (ii=0; ii<SPACE1_DIM1; ii++)
+ if(rdata[ii]!=NULL)
+ TestErrPrintf("VL doesn't match!, rdata[%d]=%p\n",(int)ii,rdata[ii]);
+
+ // Close objects and file.
+ dataset.close();
+ vlst.close();
+ sid1.close();
+ file1.close();
+
+ PASSED();
} // end try
// Catch all exceptions.
catch (Exception& E)
{
- issue_fail_msg("test_vlstrings_special()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_vlstrings_special()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_vlstrings_special
/*-------------------------------------------------------------------------
- * Function: test_vlstring_type
+ * Function: test_vlstring_type
*
- * Purpose: Test if VL string is treated as string.
+ * Purpose Test if VL string is treated as string.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
*-------------------------------------------------------------------------
*/
@@ -446,63 +447,67 @@ static void test_vlstring_type()
H5File* file1 = NULL;
try {
- // Open file.
- file1 = new H5File(FILENAME, H5F_ACC_RDWR);
+ // Open file.
+ file1 = new H5File(FILENAME, H5F_ACC_RDWR);
- // Create a datatype to refer to.
- StrType vlst(PredType::C_S1);
+ // Create a datatype to refer to.
+ StrType vlst(PredType::C_S1);
- // Change padding and verify it.
- vlst.setStrpad(H5T_STR_NULLPAD);
- H5T_str_t pad = vlst.getStrpad();
- verify_val(pad, H5T_STR_NULLPAD, "StrType::getStrpad", __LINE__, __FILE__);
+ // Change padding and verify it.
+ vlst.setStrpad(H5T_STR_NULLPAD);
+ H5T_str_t pad = vlst.getStrpad();
+ verify_val(pad, H5T_STR_NULLPAD, "StrType::getStrpad", __LINE__, __FILE__);
- // Convert to variable-length string.
- vlst.setSize(H5T_VARIABLE);
+ // Convert to variable-length string.
+ vlst.setSize(H5T_VARIABLE);
- // Check if datatype is VL string.
- H5T_class_t type_class = vlst.getClass();
- verify_val(type_class, H5T_STRING, "DataType::getClass", __LINE__, __FILE__);
- bool is_variable_str = vlst.isVariableStr();
- verify_val(is_variable_str, true, "DataType::isVariableStr", __LINE__, __FILE__);
+ // Check if datatype is VL string.
+ H5T_class_t type_class = vlst.getClass();
+ verify_val(type_class, H5T_STRING, "DataType::getClass", __LINE__, __FILE__);
+ bool is_variable_str = vlst.isVariableStr();
+ verify_val(is_variable_str, true, "DataType::isVariableStr", __LINE__, __FILE__);
- // Check default character set and padding.
- H5T_cset_t cset = vlst.getCset();
- verify_val(cset, H5T_CSET_ASCII, "StrType::getCset", __LINE__, __FILE__);
- pad = vlst.getStrpad();
- verify_val(pad, H5T_STR_NULLPAD, "StrType::getStrpad", __LINE__, __FILE__);
+ // Check default character set and padding.
+ H5T_cset_t cset = vlst.getCset();
+ verify_val(cset, H5T_CSET_ASCII, "StrType::getCset", __LINE__, __FILE__);
+ pad = vlst.getStrpad();
+ verify_val(pad, H5T_STR_NULLPAD, "StrType::getStrpad", __LINE__, __FILE__);
- // Commit variable-length string datatype to storage.
- vlst.commit(*file1, VLSTR_TYPE);
+ // Commit variable-length string datatype to storage.
+ vlst.commit(*file1, VLSTR_TYPE);
- // Close datatype.
- vlst.close();
+ // Close datatype.
+ vlst.close();
- // Try opening datatype again.
- vlst = file1->openStrType(VLSTR_TYPE);
+ // Try opening datatype again.
+ vlst = file1->openStrType(VLSTR_TYPE); // deprecated
- // Close datatype and file.
- vlst.close();
- file1->close();
+ // Close again and reopen with constructor.
+ vlst.close();
+ StrType vlst1(*file1, VLSTR_TYPE);
+
+ // Close datatype and file.
+ vlst1.close();
+ file1->close();
delete file1;
- // Open file.
- file1 = new H5File(FILENAME, H5F_ACC_RDWR);
+ // Open file.
+ file1 = new H5File(FILENAME, H5F_ACC_RDWR);
- // Open the variable-length string datatype just created
- vlst = file1->openStrType(VLSTR_TYPE);
+ // Open the variable-length string datatype just created
+ StrType vlst2(*file1, VLSTR_TYPE);
- // Verify character set and padding
- cset = vlst.getCset();
- verify_val(cset, H5T_CSET_ASCII, "StrType::getCset", __LINE__, __FILE__);
- pad = vlst.getStrpad();
- verify_val(pad, H5T_STR_NULLPAD, "StrType::getStrpad", __LINE__, __FILE__);
+ // Verify character set and padding
+ cset = vlst2.getCset();
+ verify_val(cset, H5T_CSET_ASCII, "StrType::getCset", __LINE__, __FILE__);
+ pad = vlst2.getStrpad();
+ verify_val(pad, H5T_STR_NULLPAD, "StrType::getStrpad", __LINE__, __FILE__);
- // Close datatype and file
- vlst.close();
- file1->close();
+ // Close datatype and file
+ vlst2.close();
+ file1->close();
- PASSED();
+ PASSED();
} // end try block
// Catch all exceptions.
@@ -512,18 +517,18 @@ static void test_vlstring_type()
}
if(file1)
- delete file1;
+ delete file1;
} // end test_vlstring_type()
/*-------------------------------------------------------------------------
- * Function: test_compact_vlstring
+ * Function: test_compact_vlstring
*
- * Purpose: Test storing VL strings in compact datasets.
+ * Purpose Test storing VL strings in compact datasets.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
*-------------------------------------------------------------------------
*/
@@ -533,55 +538,55 @@ static void test_compact_vlstring()
SUBTEST("VL Strings on Compact Dataset");
try {
- // Create file
- H5File file1(FILENAME, H5F_ACC_TRUNC);
+ // Create file
+ H5File file1(FILENAME, H5F_ACC_TRUNC);
- // Create dataspace for datasets
+ // Create dataspace for datasets
hsize_t dims1[] = {SPACE1_DIM1};
DataSpace sid1(SPACE1_RANK, dims1);
- // Create a datatype to refer to
- StrType vlst(0, H5T_VARIABLE);
-
- // Create dataset create property list and set layout
- DSetCreatPropList plist;
- plist.setLayout(H5D_COMPACT);
-
- // Create a dataset
- DataSet dataset(file1.createDataSet("Dataset5", vlst, sid1, plist));
-
- // Write dataset to disk
- const char *wdata[SPACE1_DIM1] = {"one", "two", "three", "four"};
- dataset.write(wdata, vlst);
-
- // Read dataset from disk
- char *rdata[SPACE1_DIM1]; // Information read in
- dataset.read(rdata, vlst);
-
- // Compare data read in
- hsize_t i;
- for (i=0; i<SPACE1_DIM1; i++) {
- if (HDstrlen(wdata[i])!=strlen(rdata[i])) {
- TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n",(int)i,(int)strlen(wdata[i]),(int)i,(int)strlen(rdata[i]));
- continue;
- } // end if
- if (HDstrcmp(wdata[i],rdata[i]) != 0) {
- TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n",(int)i,wdata[i],(int)i,rdata[i]);
- continue;
- } // end if
- } // end for
-
- // Reclaim the read VL data
- DataSet::vlenReclaim((void *)rdata, vlst, sid1);
-
- // Close objects and file
- dataset.close();
- vlst.close();
- sid1.close();
- plist.close();
- file1.close();
-
- PASSED();
+ // Create a datatype to refer to
+ StrType vlst(0, H5T_VARIABLE);
+
+ // Create dataset create property list and set layout
+ DSetCreatPropList plist;
+ plist.setLayout(H5D_COMPACT);
+
+ // Create a dataset
+ DataSet dataset(file1.createDataSet("Dataset5", vlst, sid1, plist));
+
+ // Write dataset to disk
+ const char *wdata[SPACE1_DIM1] = {"one", "two", "three", "four"};
+ dataset.write(wdata, vlst);
+
+ // Read dataset from disk
+ char *rdata[SPACE1_DIM1]; // Information read in
+ dataset.read(rdata, vlst);
+
+ // Compare data read in
+ hsize_t i;
+ for (i=0; i<SPACE1_DIM1; i++) {
+ if (HDstrlen(wdata[i])!=strlen(rdata[i])) {
+ TestErrPrintf("VL data length don't match!, strlen(wdata[%d])=%d, strlen(rdata[%d])=%d\n",(int)i,(int)strlen(wdata[i]),(int)i,(int)strlen(rdata[i]));
+ continue;
+ } // end if
+ if (HDstrcmp(wdata[i],rdata[i]) != 0) {
+ TestErrPrintf("VL data values don't match!, wdata[%d]=%s, rdata[%d]=%s\n",(int)i,wdata[i],(int)i,rdata[i]);
+ continue;
+ } // end if
+ } // end for
+
+ // Reclaim the read VL data
+ DataSet::vlenReclaim((void *)rdata, vlst, sid1);
+
+ // Close objects and file
+ dataset.close();
+ vlst.close();
+ sid1.close();
+ plist.close();
+ file1.close();
+
+ PASSED();
} // end try
// Catch all exceptions.
@@ -592,14 +597,14 @@ static void test_compact_vlstring()
} // test_compact_vlstrings
/*-------------------------------------------------------------------------
- * Function: test_vlstring_attribute
+ * Function: test_vlstring_attribute
*
- * Purpose: Test writing/reading VL strings on attributes.
+ * Purpose Test writing/reading VL strings on attributes.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
*-------------------------------------------------------------------------
*/
@@ -616,81 +621,81 @@ static void test_vlstring_attribute()
SUBTEST("VL String on Attributes");
try {
- // Open the file
- H5File file1(FILENAME, H5F_ACC_RDWR);
+ // Open the file
+ H5File file1(FILENAME, H5F_ACC_RDWR);
- // Create a datatype to refer to.
- StrType vlst(0, H5T_VARIABLE);
+ // Create a datatype to refer to.
+ StrType vlst(0, H5T_VARIABLE);
- // Open the root group.
- Group root = file1.openGroup("/");
+ // Open the root group.
+ Group root = file1.openGroup("/");
- // Create dataspace for the attribute.
- DataSpace att_space (H5S_SCALAR);
+ // Create dataspace for the attribute.
+ DataSpace att_space (H5S_SCALAR);
- // Create an attribute for the root group.
- Attribute gr_attr = root.createAttribute(ATTRSTR_NAME, vlst, att_space);
+ // Create an attribute for the root group.
+ Attribute gr_attr = root.createAttribute(ATTRSTR_NAME, vlst, att_space);
- // Write data to the attribute.
- gr_attr.write(vlst, ATTRSTR_DATA);
+ // Write data to the attribute.
+ gr_attr.write(vlst, ATTRSTR_DATA);
- // Read and verify the attribute string as a string of chars.
- char *string_att_check;
- gr_attr.read(vlst, &string_att_check);
- if(HDstrcmp(string_att_check, ATTRSTR_DATA.c_str())!=0)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,string_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), string_att_check);
+ // Read and verify the attribute string as a string of chars.
+ char *string_att_check;
+ gr_attr.read(vlst, &string_att_check);
+ if(HDstrcmp(string_att_check, ATTRSTR_DATA.c_str())!=0)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,string_att_check=%s\n",__LINE__, ATTRSTR_DATA.c_str(), string_att_check);
- HDfree(string_att_check); // note: no need for std::string test
+ HDfree(string_att_check); // note: no need for std::string test
- // Read and verify the attribute string as an std::string.
- H5std_string read_str;
- gr_attr.read(vlst, read_str);
- if (read_str != ATTRSTR_DATA)
- TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,read_str=%s\n",__LINE__, ATTRSTR_DATA.c_str(), read_str.c_str());
+ // Read and verify the attribute string as an std::string.
+ H5std_string read_str;
+ gr_attr.read(vlst, read_str);
+ if (read_str != ATTRSTR_DATA)
+ TestErrPrintf("Line %d: Attribute data different: ATTRSTR_DATA=%s,read_str=%s\n",__LINE__, ATTRSTR_DATA.c_str(), read_str.c_str());
- // Close group's attribute.
- gr_attr.close();
+ // Close group's attribute.
+ gr_attr.close();
- // Test creating a "large" sized string attribute
- gr_attr = root.createAttribute("test_scalar_large", vlst, att_space);
+ // Test creating a "large" sized string attribute
+ gr_attr = root.createAttribute("test_scalar_large", vlst, att_space);
- string_att_write = (char*)HDcalloc(8192, sizeof(char));
- HDmemset(string_att_write, 'A', 8191);
+ string_att_write = (char*)HDcalloc(8192, sizeof(char));
+ HDmemset(string_att_write, 'A', 8191);
- // Write data to the attribute, then read it back.
- gr_attr.write(vlst, &string_att_write);
- gr_attr.read(vlst, &string_att_check);
+ // Write data to the attribute, then read it back.
+ gr_attr.write(vlst, &string_att_write);
+ gr_attr.read(vlst, &string_att_check);
- // Verify data read.
- if(HDstrcmp(string_att_check,string_att_write)!=0)
- TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n",string_att_write,string_att_check);
+ // Verify data read.
+ if(HDstrcmp(string_att_check,string_att_write)!=0)
+ TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n",string_att_write,string_att_check);
- // Release resources.
- HDfree(string_att_check);
- HDfree(string_att_write);
- gr_attr.close();
- file1.close();
+ // Release resources.
+ HDfree(string_att_check);
+ HDfree(string_att_write);
+ gr_attr.close();
+ file1.close();
- PASSED();
+ PASSED();
} // end try block
// Catch all exceptions.
catch (Exception& E)
{
- issue_fail_msg("test_vlstring_attribute()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_vlstring_attribute()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_vlstring_attribute()
#if 0
/*-------------------------------------------------------------------------
- * Function: test_read_vl_string_attribute
+ * Function: test_read_vl_string_attribute
*
- * Purpose: Test reading VL strings from attributes.
+ * Purpose Test reading VL strings from attributes.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
*-------------------------------------------------------------------------
*/
@@ -701,58 +706,58 @@ static void test_read_vl_string_attribute()
SUBTEST("reading VL String as attributes");
try {
- // Open file
- H5File file1(FILENAME, H5F_ACC_RDONLY);
-
- // Create a datatype to refer to.
- StrType vlst(0, H5T_VARIABLE);
-
- // Open the root group and its attribute named ATTRSTR_NAME.
- Group root = file1.openGroup("/");
- Attribute att = root.openAttribute(ATTRSTR_NAME);
-
- // Test reading "normal" sized string attribute
- char *string_att_check;
- att.read(vlst, &string_att_check);
- if(HDstrcmp(string_att_check,ATTRSTR_DATA.c_str())!=0)
- TestErrPrintf("VL string attributes don't match!, string_att=%s, string_att_check=%s\n",ATTRSTR_DATA.c_str(),string_att_check);
- HDfree(string_att_check);
- att.close();
-
- // Test reading "large" sized string attribute
- att = root.openAttribute("test_scalar_large");
- att.read(vlst, &string_att_check);
- if(HDstrcmp(string_att_check,string_att_write)!=0)
- TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n",string_att_write,string_att_check);
- HDfree(string_att_check);
- HDfree(string_att_write); // Free string allocated in test_write_vl_string_attribute
-
- // Close objects and file.
- att.close();
- vlst.close();
- root.close();
- file1.close();
-
- PASSED();
+ // Open file
+ H5File file1(FILENAME, H5F_ACC_RDONLY);
+
+ // Create a datatype to refer to.
+ StrType vlst(0, H5T_VARIABLE);
+
+ // Open the root group and its attribute named ATTRSTR_NAME.
+ Group root = file1.openGroup("/");
+ Attribute att = root.openAttribute(ATTRSTR_NAME);
+
+ // Test reading "normal" sized string attribute
+ char *string_att_check;
+ att.read(vlst, &string_att_check);
+ if(HDstrcmp(string_att_check,ATTRSTR_DATA.c_str())!=0)
+ TestErrPrintf("VL string attributes don't match!, string_att=%s, string_att_check=%s\n",ATTRSTR_DATA.c_str(),string_att_check);
+ HDfree(string_att_check);
+ att.close();
+
+ // Test reading "large" sized string attribute
+ att = root.openAttribute("test_scalar_large");
+ att.read(vlst, &string_att_check);
+ if(HDstrcmp(string_att_check,string_att_write)!=0)
+ TestErrPrintf("VL string attributes don't match!, string_att_write=%s, string_att_check=%s\n",string_att_write,string_att_check);
+ HDfree(string_att_check);
+ HDfree(string_att_write); // Free string allocated in test_write_vl_string_attribute
+
+ // Close objects and file.
+ att.close();
+ vlst.close();
+ root.close();
+ file1.close();
+
+ PASSED();
} // end try
// Catch all exceptions.
catch (Exception& E)
{
- issue_fail_msg("test_read_vl_string_attribute()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_read_vl_string_attribute()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_read_vl_string_attribute
#endif // 2013: need to verify before adding to test
/*-------------------------------------------------------------------------
- * Function: test_vlstring_array_attribute
+ * Function: test_vlstring_array_attribute
*
- * Purpose: Test writing/reading VL string array to/from attributes.
+ * Purpose Test writing/reading VL string array to/from attributes.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler
- * July, 2009
+ * Programmer Binh-Minh Ribler
+ * July, 2009
*
*-------------------------------------------------------------------------
*/
@@ -768,105 +773,105 @@ static void test_vlstring_array_attribute()
SUBTEST("VL String Array on Attributes");
try {
- // Open the file
- H5File file1(FILENAME, H5F_ACC_RDWR);
+ // Open the file
+ H5File file1(FILENAME, H5F_ACC_RDWR);
- // Create a datatype to refer to.
- StrType vlst(0, H5T_VARIABLE);
+ // Create a datatype to refer to.
+ StrType vlst(0, H5T_VARIABLE);
- // Open the root group.
- Group root = file1.openGroup("/");
+ // Open the root group.
+ Group root = file1.openGroup("/");
// Create dataspace for datasets.
hsize_t dims1[] = {SPACE1_DIM1};
DataSpace att_space(SPACE1_RANK, dims1);
- // Create an attribute for the root group.
- Attribute gr_attr = root.createAttribute(ATTRSTRARR_NAME, vlst, att_space);
+ // Create an attribute for the root group.
+ Attribute gr_attr = root.createAttribute(ATTRSTRARR_NAME, vlst, att_space);
- // Write data to the attribute.
- gr_attr.write(vlst, string_att_array);
+ // Write data to the attribute.
+ gr_attr.write(vlst, string_att_array);
- // Read and verify the attribute string as a string of chars.
- // Note: reading by array of H5std_string doesn't work yet.
- char *string_att_check[SPACE1_DIM1];
- gr_attr.read(vlst, &string_att_check);
+ // Read and verify the attribute string as a string of chars.
+ // Note: reading by array of H5std_string doesn't work yet.
+ char *string_att_check[SPACE1_DIM1];
+ gr_attr.read(vlst, &string_att_check);
- hsize_t ii;
- for (ii = 0; ii < SPACE1_DIM1; ii++)
- {
- if(HDstrcmp(string_att_check[ii], string_att_array[ii])!=0)
- TestErrPrintf("Line %d: Attribute data different: written=%s,read=%s\n",__LINE__, string_att_check[ii], string_att_check[ii]);
+ hsize_t ii;
+ for (ii = 0; ii < SPACE1_DIM1; ii++)
+ {
+ if(HDstrcmp(string_att_check[ii], string_att_array[ii])!=0)
+ TestErrPrintf("Line %d: Attribute data different: written=%s,read=%s\n",__LINE__, string_att_check[ii], string_att_check[ii]);
- HDfree(string_att_check[ii]); // note: no need for std::string test
- }
+ HDfree(string_att_check[ii]); // note: no need for std::string test
+ }
- // Close group's attribute.
- gr_attr.close();
- file1.close();
+ // Close group's attribute.
+ gr_attr.close();
+ file1.close();
- PASSED();
+ PASSED();
} // end try block
// Catch all exceptions.
catch (Exception& E)
{
- issue_fail_msg("test_vlstring_array_attribute()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_vlstring_array_attribute()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // test_vlstring_array_attribute()
/* Helper routine for test_vl_rewrite() */
static void write_scalar_dset(H5File& file, DataType& type, DataSpace& space,
- char *name, char *data)
+ char *name, char *data)
{
DataSet dset;
try {
- dset = file.createDataSet(name, type, space);
- dset.write(&data, type, space, space);
- dset.close();
+ dset = file.createDataSet(name, type, space);
+ dset.write(&data, type, space, space);
+ dset.close();
} // end try
catch (FileIException& ferr) {
- throw;
+ throw;
}
catch (DataSetIException& derr) {
- throw;
+ throw;
}
}
/* Helper routine for test_vl_rewrite() */
static void read_scalar_dset(H5File& file, DataType& type, DataSpace& space,
- char *name, char *data)
+ char *name, char *data)
{
char *data_read;
DataSet dset;
try {
- dset = file.openDataSet(name);
- dset.read(&data_read, type, space, space);
- dset.close();
+ dset = file.openDataSet(name);
+ dset.read(&data_read, type, space, space);
+ dset.close();
- if(HDstrcmp(data, data_read))
- TestErrPrintf("Expected %s for dataset %s but read %s\n", data, name, data_read);
+ if(HDstrcmp(data, data_read))
+ TestErrPrintf("Expected %s for dataset %s but read %s\n", data, name, data_read);
- HDfree(data_read);
+ HDfree(data_read);
} // end try
catch (FileIException& ferr) {
- throw;
+ throw;
}
catch (DataSetIException& derr) {
- throw;
+ throw;
}
}
/*-------------------------------------------------------------------------
- * Function: test_vl_rewrite
+ * Function: test_vl_rewrite
*
- * Purpose: Test I/O on VL strings when many objects in the file
- * have been linked/unlinked.
+ * Purpose Test I/O on VL strings when many objects in the file
+ * have been linked/unlinked.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler (use C version)
- * January, 2007
+ * Programmer Binh-Minh Ribler (use C version)
+ * January, 2007
*
*-------------------------------------------------------------------------
*/
@@ -878,75 +883,75 @@ static void test_vl_rewrite()
SUBTEST("I/O on VL strings with link/unlink");
try {
- // Create the files.
- H5File file1(FILENAME, H5F_ACC_TRUNC);
- H5File file2(FILENAME2, H5F_ACC_TRUNC);
-
- // Create the VL string datatype.
- StrType type(0, H5T_VARIABLE);
-
- // Create dataspace for the attribute.
- DataSpace space (H5S_SCALAR);
-
- // Create in file 1.
- int i;
- char name[256]; // Buffer for names & data
- for (i=0; i<REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
- write_scalar_dset(file1, type, space, name, name);
- }
-
- // Effectively copy data from file 1 to 2.
- for (i=0; i<REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
- read_scalar_dset(file1, type, space, name, name);
- write_scalar_dset(file2, type, space, name, name);
- }
-
- // Read back from file 2.
- for (i=0; i<REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
- read_scalar_dset(file2, type, space, name, name);
- }
-
- // Remove from file 2.
- for (i=0; i<REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
- file2.unlink(name);
- }
-
- // Effectively copy from file 1 to file 2.
- for (i=0; i<REWRITE_NDATASETS; i++) {
- sprintf(name, "/set_%d", i);
- read_scalar_dset(file1, type, space, name, name);
- write_scalar_dset(file2, type, space, name, name);
- }
-
- // Close objects and file.
- type.close();
- space.close();
- file1.close();
- file2.close();
-
- PASSED();
+ // Create the files.
+ H5File file1(FILENAME, H5F_ACC_TRUNC);
+ H5File file2(FILENAME2, H5F_ACC_TRUNC);
+
+ // Create the VL string datatype.
+ StrType type(0, H5T_VARIABLE);
+
+ // Create dataspace for the attribute.
+ DataSpace space (H5S_SCALAR);
+
+ // Create in file 1.
+ int i;
+ char name[256]; // Buffer for names & data
+ for (i=0; i<REWRITE_NDATASETS; i++) {
+ sprintf(name, "/set_%d", i);
+ write_scalar_dset(file1, type, space, name, name);
+ }
+
+ // Effectively copy data from file 1 to 2.
+ for (i=0; i<REWRITE_NDATASETS; i++) {
+ sprintf(name, "/set_%d", i);
+ read_scalar_dset(file1, type, space, name, name);
+ write_scalar_dset(file2, type, space, name, name);
+ }
+
+ // Read back from file 2.
+ for (i=0; i<REWRITE_NDATASETS; i++) {
+ sprintf(name, "/set_%d", i);
+ read_scalar_dset(file2, type, space, name, name);
+ }
+
+ // Remove from file 2.
+ for (i=0; i<REWRITE_NDATASETS; i++) {
+ sprintf(name, "/set_%d", i);
+ file2.unlink(name);
+ }
+
+ // Effectively copy from file 1 to file 2.
+ for (i=0; i<REWRITE_NDATASETS; i++) {
+ sprintf(name, "/set_%d", i);
+ read_scalar_dset(file1, type, space, name, name);
+ write_scalar_dset(file2, type, space, name, name);
+ }
+
+ // Close objects and file.
+ type.close();
+ space.close();
+ file1.close();
+ file2.close();
+
+ PASSED();
} // end try
// Catch all exceptions.
catch (Exception& E)
{
- issue_fail_msg("test_vl_rewrite()", __LINE__, __FILE__, E.getCDetailMsg());
+ issue_fail_msg("test_vl_rewrite()", __LINE__, __FILE__, E.getCDetailMsg());
}
} // end test_vl_rewrite()
/*-------------------------------------------------------------------------
- * Function: test_vlstrings
+ * Function: test_vlstrings
*
- * Purpose: VL string testing main routine.
+ * Purpose VL string testing main routine.
*
- * Return: None
+ * Return None
*
- * Programmer: Binh-Minh Ribler
- * January, 2007
+ * Programmer Binh-Minh Ribler
+ * January, 2007
*
*-------------------------------------------------------------------------
*/
@@ -977,13 +982,13 @@ void test_vlstrings()
/*-------------------------------------------------------------------------
- * Function: cleanup_vlstrings
+ * Function: cleanup_vlstrings
*
- * Purpose: Cleanup temporary test files
+ * Purpose Cleanup temporary test files
*
- * Return: none
+ * Return none
*
- * Programmer: Quincey Koziol
+ * Programmer Quincey Koziol
* September 10, 1999
*
* Modifications:
diff --git a/config/BlankForm b/config/BlankForm
index 31116bf..a452f92 100644
--- a/config/BlankForm
+++ b/config/BlankForm
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/config/COPYING b/config/COPYING
index 6903daf..6497ace 100644
--- a/config/COPYING
+++ b/config/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/config/Makefile.am.blank b/config/Makefile.am.blank
index a54c734..b01735b 100644
--- a/config/Makefile.am.blank
+++ b/config/Makefile.am.blank
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
diff --git a/config/apple b/config/apple
index 5203695..0b15dfe 100644
--- a/config/apple
+++ b/config/apple
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/config/cce-fflags b/config/cce-fflags
index 233f9ff..9a1c685 100644
--- a/config/cce-fflags
+++ b/config/cce-fflags
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/cce-flags b/config/cce-flags
index 8f3b2dc..acab73c 100644
--- a/config/cce-flags
+++ b/config/cce-flags
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/cmake/CMakeFindJavaCommon.cmake b/config/cmake/CMakeFindJavaCommon.cmake
index fcf0389..fe6ee82 100644
--- a/config/cmake/CMakeFindJavaCommon.cmake
+++ b/config/cmake/CMakeFindJavaCommon.cmake
@@ -17,25 +17,25 @@ set(_JAVA_HOME "")
if(JAVA_HOME AND IS_DIRECTORY "${JAVA_HOME}")
set(_JAVA_HOME "${JAVA_HOME}")
set(_JAVA_HOME_EXPLICIT 1)
-else()
+else ()
set(_ENV_JAVA_HOME "")
if(DEFINED ENV{JAVA_HOME})
file(TO_CMAKE_PATH "$ENV{JAVA_HOME}" _ENV_JAVA_HOME)
- endif()
+ endif ()
if(_ENV_JAVA_HOME AND IS_DIRECTORY "${_ENV_JAVA_HOME}")
set(_JAVA_HOME "${_ENV_JAVA_HOME}")
set(_JAVA_HOME_EXPLICIT 1)
- else()
+ else ()
set(_CMD_JAVA_HOME "")
if(APPLE AND EXISTS /usr/libexec/java_home)
execute_process(COMMAND /usr/libexec/java_home
OUTPUT_VARIABLE _CMD_JAVA_HOME OUTPUT_STRIP_TRAILING_WHITESPACE)
- endif()
+ endif ()
if(_CMD_JAVA_HOME AND IS_DIRECTORY "${_CMD_JAVA_HOME}")
set(_JAVA_HOME "${_CMD_JAVA_HOME}")
set(_JAVA_HOME_EXPLICIT 0)
- endif()
+ endif ()
unset(_CMD_JAVA_HOME)
- endif()
+ endif ()
unset(_ENV_JAVA_HOME)
-endif()
+endif ()
diff --git a/config/cmake/CTestCustom.cmake b/config/cmake/CTestCustom.cmake
index 0e1eb2e..7aff076 100644
--- a/config/cmake/CTestCustom.cmake
+++ b/config/cmake/CTestCustom.cmake
@@ -1,4 +1,20 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
set (CTEST_CUSTOM_MAXIMUM_NUMBER_OF_WARNINGS 3000)
+# Allow full output to go to CDash set to 0
+SET(CTEST_CUSTOM_MAXIMUM_PASSED_TEST_OUTPUT_SIZE 50000)
+SET(CTEST_CUSTOM_MAXIMUM_FAILED_TEST_OUTPUT_SIZE 50000)
+# WARNING! This could be a lot of output and could overwhelm CDash and the
+# MySQL DB so this might not be a good idea!
set (CTEST_CUSTOM_WARNING_EXCEPTION
${CTEST_CUSTOM_WARNING_EXCEPTION}
diff --git a/config/cmake/CTestScript.cmake b/config/cmake/CTestScript.cmake
new file mode 100755
index 0000000..a3779d6
--- /dev/null
+++ b/config/cmake/CTestScript.cmake
@@ -0,0 +1,163 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+cmake_minimum_required (VERSION 3.2.2 FATAL_ERROR)
+########################################################
+# For any comments please contact cdashhelp@hdfgroup.org
+#
+########################################################
+# -----------------------------------------------------------
+# -- Get environment
+# -----------------------------------------------------------
+if (NOT SITE_OS_NAME)
+ ## machine name not provided - attempt to discover with uname
+ ## -- set hostname
+ ## --------------------------
+ find_program (HOSTNAME_CMD NAMES hostname)
+ exec_program (${HOSTNAME_CMD} ARGS OUTPUT_VARIABLE HOSTNAME)
+ set (CTEST_SITE "${HOSTNAME}${CTEST_SITE_EXT}")
+ find_program (UNAME NAMES uname)
+ macro (getuname name flag)
+ exec_program ("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}")
+ endmacro ()
+
+ getuname (osname -s)
+ getuname (osrel -r)
+ getuname (cpu -m)
+ message (STATUS "Dashboard script uname output: ${osname}-${osrel}-${cpu}\n")
+
+ set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}")
+ if (SITE_BUILDNAME_SUFFIX)
+ set (CTEST_BUILD_NAME "${SITE_BUILDNAME_SUFFIX}-${CTEST_BUILD_NAME}")
+ endif ()
+ set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS}")
+else ()
+ ## machine name provided
+ ## --------------------------
+ if (CMAKE_HOST_UNIX)
+ set(CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_OS_BITS}-${SITE_COMPILER_NAME}-${SITE_COMPILER_VERSION}")
+ else ()
+ set(CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_COMPILER_NAME}")
+ endif ()
+ if (SITE_BUILDNAME_SUFFIX)
+ set(CTEST_BUILD_NAME "${CTEST_BUILD_NAME}-${SITE_BUILDNAME_SUFFIX}")
+ endif ()
+ set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}")
+endif ()
+
+#-----------------------------------------------------------------------------
+# MAC machines need special option
+#-----------------------------------------------------------------------------
+if (APPLE)
+ # Compiler choice
+ execute_process (COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
+ execute_process (COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set (ENV{CC} "${XCODE_CC}")
+ set (ENV{CXX} "${XCODE_CXX}")
+
+ if (NOT NO_MAC_FORTRAN)
+ # Shared fortran is not supported, build static
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
+ else ()
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF")
+ endif ()
+
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=OFF")
+endif ()
+
+#-----------------------------------------------------------------------------
+set (NEED_REPOSITORY_CHECKOUT 0)
+set (CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
+if (CTEST_USE_TAR_SOURCE)
+ ## Uncompress source if tar or zip file provided
+ ## --------------------------
+ if (WIN32)
+ message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip]")
+ execute_process (COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip RESULT_VARIABLE rv)
+ else ()
+ message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar]")
+ execute_process (COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar RESULT_VARIABLE rv)
+ endif ()
+
+ if (NOT rv EQUAL 0)
+ message (STATUS "extracting... [error-(${rv}) clean up]")
+ file (REMOVE_RECURSE "${CTEST_SOURCE_DIRECTORY}")
+ message (FATAL_ERROR "error: extract of ${CTEST_USE_TAR_SOURCE} failed")
+ endif ()
+endif ()
+
+#-----------------------------------------------------------------------------
+## Clear the build directory
+## --------------------------
+set (CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE)
+if (EXISTS "${CTEST_BINARY_DIRECTORY}" AND IS_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
+ ctest_empty_binary_directory (${CTEST_BINARY_DIRECTORY})
+else ()
+ file (MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
+endif ()
+
+# Use multiple CPU cores to build
+include (ProcessorCount)
+ProcessorCount (N)
+if (NOT N EQUAL 0)
+ if (NOT WIN32)
+ set (CTEST_BUILD_FLAGS -j${N})
+ endif ()
+ set (ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
+endif ()
+
+#-----------------------------------------------------------------------------
+# Initialize the CTEST commands
+#------------------------------
+set (CTEST_CONFIGURE_COMMAND
+ "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_SOURCE_DIRECTORY}\""
+)
+#-----------------------------------------------------------------------------
+
+#-----------------------------------------------------------------------------
+## -- set output to english
+set ($ENV{LC_MESSAGES} "en_EN")
+
+#-----------------------------------------------------------------------------
+ configure_file(${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
+ ctest_read_custom_files ("${CTEST_BINARY_DIRECTORY}")
+#-----------------------------------------------------------------------------
+ ## NORMAL process
+ ## -- LOCAL_SUBMIT reports to CDash server
+ ## --------------------------
+ ctest_start (Experimental)
+ ctest_configure (BUILD "${CTEST_BINARY_DIRECTORY}" RETURN_VALUE res)
+ if (LOCAL_SUBMIT)
+ ctest_submit (PARTS Configure Notes)
+ endif ()
+ if (${res} LESS 0 OR ${res} GREATER 0)
+ file (APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Configure: ${res}\n")
+ endif ()
+
+ ctest_build (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND RETURN_VALUE res NUMBER_ERRORS errval)
+ if (LOCAL_SUBMIT)
+ ctest_submit (PARTS Build)
+ endif ()
+ if (${res} LESS 0 OR ${res} GREATER 0 OR ${errval} GREATER 0)
+ file (APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed ${errval} Build: ${res}\n")
+ endif ()
+
+ ctest_test (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND ${ctest_test_args} RETURN_VALUE res)
+ if (LOCAL_SUBMIT)
+ ctest_submit (PARTS Test)
+ endif()
+ if (${res} LESS 0 OR ${res} GREATER 0)
+ file (APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Tests: ${res}\n")
+ endif ()
+ if (${res} LESS 0 OR ${res} GREATER 0)
+ message (FATAL_ERROR "tests FAILED")
+ endif ()
+#-----------------------------------------------------------------------------
diff --git a/config/cmake/ConfigureChecks.cmake b/config/cmake/ConfigureChecks.cmake
index ec0aecf..0848e66 100644
--- a/config/cmake/ConfigureChecks.cmake
+++ b/config/cmake/ConfigureChecks.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#-----------------------------------------------------------------------------
# Include all the necessary files for macros
#-----------------------------------------------------------------------------
@@ -6,7 +17,7 @@ include (${HDF_RESOURCES_EXT_DIR}/ConfigureChecks.cmake)
if (HDF5_ENABLE_USING_MEMCHECKER)
set (H5_USING_MEMCHECKER 1)
-endif (HDF5_ENABLE_USING_MEMCHECKER)
+endif ()
#-----------------------------------------------------------------------------
# Option for --enable-strict-format-checks
@@ -14,7 +25,7 @@ endif (HDF5_ENABLE_USING_MEMCHECKER)
option (HDF5_STRICT_FORMAT_CHECKS "Whether to perform strict file format checks" OFF)
if (HDF5_STRICT_FORMAT_CHECKS)
set (H5_STRICT_FORMAT_CHECKS 1)
-endif (HDF5_STRICT_FORMAT_CHECKS)
+endif ()
MARK_AS_ADVANCED (HDF5_STRICT_FORMAT_CHECKS)
#-----------------------------------------------------------------------------
@@ -23,7 +34,7 @@ MARK_AS_ADVANCED (HDF5_STRICT_FORMAT_CHECKS)
option (HDF5_METADATA_TRACE_FILE "Enable metadata trace file collection" OFF)
if (HDF5_METADATA_TRACE_FILE)
set (H5_METADATA_TRACE_FILE 1)
-endif (HDF5_METADATA_TRACE_FILE)
+endif ()
MARK_AS_ADVANCED (HDF5_METADATA_TRACE_FILE)
# ----------------------------------------------------------------------
@@ -35,7 +46,7 @@ MARK_AS_ADVANCED (HDF5_METADATA_TRACE_FILE)
option (HDF5_WANT_DATA_ACCURACY "IF data accuracy is guaranteed during data conversions" ON)
if (HDF5_WANT_DATA_ACCURACY)
set (H5_WANT_DATA_ACCURACY 1)
-endif (HDF5_WANT_DATA_ACCURACY)
+endif ()
MARK_AS_ADVANCED (HDF5_WANT_DATA_ACCURACY)
# ----------------------------------------------------------------------
@@ -47,7 +58,7 @@ MARK_AS_ADVANCED (HDF5_WANT_DATA_ACCURACY)
option (HDF5_WANT_DCONV_EXCEPTION "exception handling functions is checked during data conversions" ON)
if (HDF5_WANT_DCONV_EXCEPTION)
set (H5_WANT_DCONV_EXCEPTION 1)
-endif (HDF5_WANT_DCONV_EXCEPTION)
+endif ()
MARK_AS_ADVANCED (HDF5_WANT_DCONV_EXCEPTION)
# ----------------------------------------------------------------------
@@ -56,7 +67,7 @@ MARK_AS_ADVANCED (HDF5_WANT_DCONV_EXCEPTION)
option (HDF5_ENABLE_CODESTACK "Enable the function stack tracing (for developer debugging)." OFF)
if (HDF5_ENABLE_CODESTACK)
set (H5_HAVE_CODESTACK 1)
-endif (HDF5_ENABLE_CODESTACK)
+endif ()
MARK_AS_ADVANCED (HDF5_ENABLE_CODESTACK)
#-----------------------------------------------------------------------------
@@ -65,7 +76,7 @@ MARK_AS_ADVANCED (HDF5_ENABLE_CODESTACK)
option (HDF5_ENABLE_HSIZET "Enable datasets larger than memory" ON)
if (HDF5_ENABLE_HSIZET)
set (${HDF_PREFIX}_HAVE_LARGE_HSIZET 1)
-endif (HDF5_ENABLE_HSIZET)
+endif ()
# so far we have no check for this
set (H5_HAVE_TMPFILE 1)
@@ -79,10 +90,10 @@ set (H5_DEFAULT_VFD H5FD_SEC2)
if (NOT DEFINED "H5_DEFAULT_PLUGINDIR")
if (WINDOWS)
set (H5_DEFAULT_PLUGINDIR "%ALLUSERSPROFILE%\\\\hdf5\\\\lib\\\\plugin")
- else (WINDOWS)
+ else ()
set (H5_DEFAULT_PLUGINDIR "/usr/local/hdf5/lib/plugin")
- endif (WINDOWS)
-endif (NOT DEFINED "H5_DEFAULT_PLUGINDIR")
+ endif ()
+endif ()
if (WINDOWS)
set (H5_HAVE_WINDOWS 1)
@@ -91,7 +102,7 @@ if (WINDOWS)
# that is, "drive-letter:\" (e.g. "C:") or "drive-letter:/" (e.g. "C:/").
# (This flag should be _unset_ for all machines, except for Windows)
set (H5_HAVE_WINDOW_PATH 1)
-endif (WINDOWS)
+endif ()
# ----------------------------------------------------------------------
# END of WINDOWS Hard code Values
@@ -101,9 +112,9 @@ CHECK_FUNCTION_EXISTS (difftime H5_HAVE_DIFFTIME)
# Find the library containing clock_gettime()
if (NOT WINDOWS)
- CHECK_FUNCTION_EXISTS(clock_gettime CLOCK_GETTIME_IN_LIBC)
- CHECK_LIBRARY_EXISTS(rt clock_gettime "" CLOCK_GETTIME_IN_LIBRT)
- CHECK_LIBRARY_EXISTS(posix4 clock_gettime "" CLOCK_GETTIME_IN_LIBPOSIX4)
+ CHECK_FUNCTION_EXISTS (clock_gettime CLOCK_GETTIME_IN_LIBC)
+ CHECK_LIBRARY_EXISTS (rt clock_gettime "" CLOCK_GETTIME_IN_LIBRT)
+ CHECK_LIBRARY_EXISTS (posix4 clock_gettime "" CLOCK_GETTIME_IN_LIBPOSIX4)
if (CLOCK_GETTIME_IN_LIBC)
set (H5_HAVE_CLOCK_GETTIME 1)
elseif (CLOCK_GETTIME_IN_LIBRT)
@@ -112,8 +123,8 @@ if (NOT WINDOWS)
elseif (CLOCK_GETTIME_IN_LIBPOSIX4)
set (H5_HAVE_CLOCK_GETTIME 1)
list (APPEND LINK_LIBS posix4)
- endif (CLOCK_GETTIME_IN_LIBC)
-endif (NOT WINDOWS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
@@ -131,43 +142,43 @@ if (NOT WINDOWS)
OUTPUT_VARIABLE OUTPUT
)
if (TEST_DIRECT_VFD_WORKS_COMPILE)
- if (TEST_DIRECT_VFD_WORKS_RUN MATCHES 0)
+ if (TEST_DIRECT_VFD_WORKS_RUN MATCHES 0)
HDF_FUNCTION_TEST (HAVE_DIRECT)
set (CMAKE_REQUIRED_DEFINITIONS "${CMAKE_REQUIRED_DEFINITIONS} -D_GNU_SOURCE")
add_definitions ("-D_GNU_SOURCE")
- else (TEST_DIRECT_VFD_WORKS_RUN MATCHES 0)
+ else ()
set (TEST_DIRECT_VFD_WORKS "" CACHE INTERNAL ${msg})
message (STATUS "${msg}... no")
file (APPEND ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log
"Test TEST_DIRECT_VFD_WORKS Run failed with the following output and exit code:\n ${OUTPUT}\n"
)
- endif (TEST_DIRECT_VFD_WORKS_RUN MATCHES 0)
- else (TEST_DIRECT_VFD_WORKS_COMPILE )
+ endif ()
+ else ()
set (TEST_DIRECT_VFD_WORKS "" CACHE INTERNAL ${msg})
message (STATUS "${msg}... no")
file (APPEND ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log
"Test TEST_DIRECT_VFD_WORKS Compile failed with the following output:\n ${OUTPUT}\n"
)
- endif (TEST_DIRECT_VFD_WORKS_COMPILE)
- endif (HDF5_ENABLE_DIRECT_VFD)
-endif (NOT WINDOWS)
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Check if C has __float128 extension
#-----------------------------------------------------------------------------
CHECK_TYPE_SIZE("__float128" SIZEOF___FLOAT128)
-if(${HAVE_SIZEOF___FLOAT128})
- SET(H5_HAVE_FLOAT128 1)
-else (${HAVE_SIZEOF___FLOAT128})
- SET(H5_HAVE_FLOAT128 0)
- SET(SIZEOF___FLOAT128 0)
-endif(${HAVE_SIZEOF___FLOAT128})
+if (${HAVE_SIZEOF___FLOAT128})
+ set (H5_HAVE_FLOAT128 1)
+else ()
+ set (H5_HAVE_FLOAT128 0)
+ set (SIZEOF___FLOAT128 0)
+endif ()
#-----------------------------------------------------------------------------
# Macro to determine the various conversion capabilities
#-----------------------------------------------------------------------------
-MACRO (H5ConversionTests TEST msg)
+macro (H5ConversionTests TEST msg)
if ("${TEST}" MATCHES "^${TEST}$")
# message (STATUS "===> ${TEST}")
TRY_RUN (${TEST}_RUN ${TEST}_COMPILE
@@ -177,41 +188,41 @@ MACRO (H5ConversionTests TEST msg)
OUTPUT_VARIABLE OUTPUT
)
if (${TEST}_COMPILE)
- if (${TEST}_RUN MATCHES 0)
+ if (${TEST}_RUN MATCHES 0)
set (${TEST} 1 CACHE INTERNAL ${msg})
message (STATUS "${msg}... yes")
- else (${TEST}_RUN MATCHES 0)
+ else ()
set (${TEST} "" CACHE INTERNAL ${msg})
message (STATUS "${msg}... no")
file (APPEND ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log
"Test ${TEST} Run failed with the following output and exit code:\n ${OUTPUT}\n"
)
- endif (${TEST}_RUN MATCHES 0)
- else (${TEST}_COMPILE )
+ endif ()
+ else ()
set (${TEST} "" CACHE INTERNAL ${msg})
message (STATUS "${msg}... no")
file (APPEND ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log
"Test ${TEST} Compile failed with the following output:\n ${OUTPUT}\n"
)
- endif (${TEST}_COMPILE)
+ endif ()
- endif ("${TEST}" MATCHES "^${TEST}$")
-ENDMACRO (H5ConversionTests)
+ endif ()
+endmacro ()
#-----------------------------------------------------------------------------
# Macro to make some of the conversion tests easier to write/read
#-----------------------------------------------------------------------------
-MACRO (H5MiscConversionTest VAR TEST msg)
+macro (H5MiscConversionTest VAR TEST msg)
if ("${TEST}" MATCHES "^${TEST}$")
if (${VAR})
set (${TEST} 1 CACHE INTERNAL ${msg})
message (STATUS "${msg}... yes")
- else (${VAR})
+ else ()
set (${TEST} "" CACHE INTERNAL ${msg})
message (STATUS "${msg}... no")
- endif (${VAR})
- endif ("${TEST}" MATCHES "^${TEST}$")
-ENDMACRO (H5MiscConversionTest)
+ endif ()
+ endif ()
+endmacro ()
#-----------------------------------------------------------------------------
# Check various conversion capabilities
@@ -219,9 +230,9 @@ ENDMACRO (H5MiscConversionTest)
# ----------------------------------------------------------------------
# Set the flag to indicate that the machine is using a special algorithm to convert
-# 'long double' to '(unsigned) long' values. (This flag should only be set for
-# the IBM Power6 Linux. When the bit sequence of long double is
-# 0x4351ccf385ebc8a0bfcc2a3c3d855620, the converted value of (unsigned)long
+# 'long double' to '(unsigned) long' values. (This flag should only be set for
+# the IBM Power6 Linux. When the bit sequence of long double is
+# 0x4351ccf385ebc8a0bfcc2a3c3d855620, the converted value of (unsigned)long
# is 0x004733ce17af227f, not the same as the library's conversion to 0x004733ce17af2282.
# The machine's conversion gets the correct value. We define the macro and disable
# this kind of test until we figure out what algorithm they use.
@@ -229,10 +240,10 @@ ENDMACRO (H5MiscConversionTest)
H5ConversionTests (H5_LDOUBLE_TO_LONG_SPECIAL "Checking IF your system converts long double to (unsigned) long values with special algorithm")
# ----------------------------------------------------------------------
# Set the flag to indicate that the machine is using a special algorithm
-# to convert some values of '(unsigned) long' to 'long double' values.
-# (This flag should be off for all machines, except for IBM Power6 Linux,
-# when the bit sequences are 003fff..., 007fff..., 00ffff..., 01ffff...,
-# ..., 7fffff..., the compiler uses a unknown algorithm. We define a
+# to convert some values of '(unsigned) long' to 'long double' values.
+# (This flag should be off for all machines, except for IBM Power6 Linux,
+# when the bit sequences are 003fff..., 007fff..., 00ffff..., 01ffff...,
+# ..., 7fffff..., the compiler uses a unknown algorithm. We define a
# macro and skip the test for now until we know about the algorithm.
#
H5ConversionTests (H5_LONG_TO_LDOUBLE_SPECIAL "Checking IF your system can convert (unsigned) long to long double values with special algorithm")
@@ -261,7 +272,7 @@ H5ConversionTests (H5_NO_ALIGNMENT_RESTRICTIONS "Checking IF alignment restricti
# -----------------------------------------------------------------------
# wrapper script variables
-#
+#
set (prefix ${CMAKE_INSTALL_PREFIX})
set (exec_prefix "\${prefix}")
set (libdir "${exec_prefix}/lib")
@@ -272,4 +283,4 @@ set (CXX ${CMAKE_CXX_COMPILER})
set (FC ${CMAKE_Fortran_COMPILER})
foreach (LINK_LIB ${LINK_LIBS})
set (LIBS "${LIBS} -l${LINK_LIB}")
-endforeach (LINK_LIB ${LINK_LIBS})
+endforeach ()
diff --git a/config/cmake/ConversionTests.c b/config/cmake/ConversionTests.c
index b8946a8..082dbd4 100644
--- a/config/cmake/ConversionTests.c
+++ b/config/cmake/ConversionTests.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef H5_LDOUBLE_TO_LONG_SPECIAL_TEST
#include <string.h>
diff --git a/config/cmake/FindHDFJAVA.cmake.in b/config/cmake/FindHDFJAVA.cmake.in
index b822280..ed2b8c2 100644
--- a/config/cmake/FindHDFJAVA.cmake.in
+++ b/config/cmake/FindHDFJAVA.cmake.in
@@ -1,21 +1,32 @@
#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+#
# To be used by projects that make use of CMakeified hdf-java
#
#
# Find the HDFJAVA includes and get all installed hdf-java library settings from
-# HDFJAVA-config.cmake file : Requires a CMake compatible hdf-java-@HDFJAVA_PACKAGE_VERSION@ or later
+# HDFJAVA-config.cmake file : Requires a CMake compatible hdf-java-@HDFJAVA_PACKAGE_VERSION@ or later
# for this feature to work. The following vars are set if hdf-java is found.
#
# HDFJAVA_FOUND - True if found, otherwise all other vars are undefined
# HDFJAVA_VERSION_STRING - full version (e.g. @HDFJAVA_PACKAGE_VERSION@)
# HDFJAVA_VERSION_MAJOR - major part of version (e.g. @HDFJAVA_PACKAGE_VERSION_MAJOR@)
# HDFJAVA_VERSION_MINOR - minor part (e.g. @HDFJAVA_PACKAGE_VERSION_MINOR@)
-#
+#
# Target names that are valid (depending on enabled options)
# will be the following
#
-#
+#
# To aid in finding HDFJAVA as part of a subproject set
# HDFJAVA_ROOT_DIR_HINT to the location where @HDFJAVA_PACKAGE@@HDF_PACKAGE_EXT@-config.cmake lies
@@ -60,9 +71,9 @@ if (HDFJAVA_ROOT_DIR)
set (HDFJAVA_FOUND "YES")
INCLUDE (${HDFJAVA_ROOT_DIR}/@HDFJAVA_PACKAGE@@HDF_PACKAGE_EXT@-config.cmake)
set (HDFJAVA_LIBRARIES "${HDFJAVA_LIBRARY}")
- set (HDFJAVA_INCLUDE_DIRS
+ set (HDFJAVA_INCLUDE_DIRS
${HDFJAVA_LIBRARY}/jarhdf-@HDFJAVA_PACKAGE_VERSION@.jar
${HDFJAVA_LIBRARY}/jarhdf5-@HDFJAVA_PACKAGE_VERSION@.jar
)
-
-endif (HDFJAVA_ROOT_DIR)
+
+endif ()
diff --git a/config/cmake/FindJNI.cmake b/config/cmake/FindJNI.cmake
index 9d755fc..c82d308 100644
--- a/config/cmake/FindJNI.cmake
+++ b/config/cmake/FindJNI.cmake
@@ -137,7 +137,6 @@ JAVA_APPEND_LIBRARY_DIRECTORIES(JAVA_AWT_LIBRARY_DIRECTORIES
/usr/lib/java/jre/lib/{libarch}
/usr/lib64/java/jre/lib/{libarch}
/usr/lib/jvm/jre/lib/{libarch}
- /usr/lib64/jvm/jre/lib/{libarch}
/usr/local/lib/java/jre/lib/{libarch}
/usr/local/share/java/jre/lib/{libarch}
/usr/lib/j2sdk1.4-sun/jre/lib/{libarch}
@@ -147,13 +146,14 @@ JAVA_APPEND_LIBRARY_DIRECTORIES(JAVA_AWT_LIBRARY_DIRECTORIES
/usr/lib/jvm/java-1.5.0-sun/jre/lib/{libarch}
/usr/lib/jvm/java-6-sun-1.6.0.00/jre/lib/{libarch} # can this one be removed according to #8821 ? Alex
/usr/lib/jvm/java-6-openjdk/jre/lib/{libarch}
- /usr/lib/jvm/java-7-openjdk/jre/lib/{libarch}
- /usr/lib/jvm/java-7-openjdk-{libarch}/jre/lib/{libarch}
/usr/lib/jvm/java-1.6.0-openjdk-1.6.0.0/jre/lib/{libarch} # fedora
# Debian specific paths for default JVM
/usr/lib/jvm/default-java/jre/lib/{libarch}
/usr/lib/jvm/default-java/jre/lib
/usr/lib/jvm/default-java/lib
+ # Arch Linux specific paths for default JVM
+ /usr/lib/jvm/default/jre/lib/{libarch}
+ /usr/lib/jvm/default/lib/{libarch}
# Ubuntu specific paths for default JVM
/usr/lib/jvm/java-8-openjdk-{libarch}/jre/lib/{libarch} # Ubuntu 15.10
/usr/lib/jvm/java-7-openjdk-{libarch}/jre/lib/{libarch} # Ubuntu 15.10
@@ -199,14 +199,11 @@ JAVA_APPEND_LIBRARY_DIRECTORIES(JAVA_AWT_INCLUDE_DIRECTORIES
/usr/lib64/java/include
/usr/local/lib/java/include
/usr/lib/jvm/java/include
- /usr/lib64/jvm/java/include
/usr/lib/jvm/java-6-sun/include
/usr/lib/jvm/java-1.5.0-sun/include
/usr/lib/jvm/java-6-sun-1.6.0.00/include # can this one be removed according to #8821 ? Alex
/usr/lib/jvm/java-6-openjdk/include
/usr/lib/jvm/java-7-openjdk/include
- /usr/lib/jvm/java-7-openjdk-i386/include
- /usr/lib/jvm/java-7-openjdk-amd64/include
/usr/lib64/jvm/java-7-openjdk/include
/usr/lib64/jvm/java-7-openjdk-amd64/include
/usr/lib/jvm/java-8-openjdk-{libarch}/include # ubuntu 15.10
@@ -218,6 +215,8 @@ JAVA_APPEND_LIBRARY_DIRECTORIES(JAVA_AWT_INCLUDE_DIRECTORIES
/opt/sun-jdk-1.5.0.04/include
# Debian specific path for default JVM
/usr/lib/jvm/default-java/include
+ # Arch specific path for default JVM
+ /usr/lib/jvm/default/include
# OpenBSD specific path for default JVM
/usr/local/jdk-1.7.0/include
/usr/local/jdk-1.6.0/include
diff --git a/config/cmake/H5cxx_config.h.in b/config/cmake/H5cxx_config.h.in
index c4e1c03..82e85c6 100644
--- a/config/cmake/H5cxx_config.h.in
+++ b/config/cmake/H5cxx_config.h.in
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* src/H5cxx_config.h.in Created manually. */
/* Define if offsetof extension is present */
diff --git a/config/cmake/H5pubconf.h.in b/config/cmake/H5pubconf.h.in
index 5dad4f3..8c01dba 100644
--- a/config/cmake/H5pubconf.h.in
+++ b/config/cmake/H5pubconf.h.in
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* H5pubconf.h Generated By CMake during the configuration */
#ifndef H5_CONFIG_H_
@@ -41,17 +52,41 @@
/* As FC_FUNC, but for C identifiers containing underscores. */
#define @H5_FC_FUNC_@
-/* Define Fortran Maximum Real Decimal Precision */
-#cmakedefine H5_PAC_FC_MAX_REAL_PRECISION @H5_PAC_FC_MAX_REAL_PRECISION@
-
-/* Define C Maximum Real Decimal Precision */
-#cmakedefine H5_PAC_C_MAX_REAL_PRECISION @H5_PAC_C_MAX_REAL_PRECISION@
+/* Define if Fortran C_LONG_DOUBLE is different from C_DOUBLE */
+#cmakedefine H5_FORTRAN_C_LONG_DOUBLE_IS_UNIQUE @H5_FORTRAN_C_LONG_DOUBLE_IS_UNIQUE@
/* Define if we have Fortran C_LONG_DOUBLE */
#cmakedefine H5_FORTRAN_HAVE_C_LONG_DOUBLE @H5_FORTRAN_HAVE_C_LONG_DOUBLE@
-/* Determine if __float128 is available */
-#cmakedefine H5_HAVE_FLOAT128 @H5_HAVE_FLOAT128@
+/* Define if we have Fortran intrinsic C_SIZEOF */
+#cmakedefine H5_FORTRAN_HAVE_C_SIZEOF @H5_FORTRAN_HAVE_C_SIZEOF@
+
+/* Define if we have Fortran intrinsic SIZEOF */
+#cmakedefine H5_FORTRAN_HAVE_SIZEOF @H5_FORTRAN_HAVE_SIZEOF@
+
+/* Define if we have Fortran intrinsic STORAGE_SIZE */
+#cmakedefine H5_FORTRAN_HAVE_STORAGE_SIZE @H5_FORTRAN_HAVE_STORAGE_SIZE@
+
+/* Determine the size of C long double */
+#cmakedefine H5_FORTRAN_SIZEOF_LONG_DOUBLE @H5_FORTRAN_SIZEOF_LONG_DOUBLE@
+
+/* Define Fortran compiler ID */
+#cmakedefine H5_Fortran_COMPILER_ID @H5_Fortran_COMPILER_ID@
+
+/* Define valid Fortran INTEGER KINDs */
+#cmakedefine H5_H5CONFIG_F_IKIND @H5_HH5_H5CONFIG_F_NUM_RKIND5CONFIG_F_IKIND@
+
+/* Define number of valid Fortran INTEGER KINDs */
+#cmakedefine H5_H5CONFIG_F_NUM_IKIND @H5_H5CONFIG_F_NUM_IKIND@
+
+/* Define number of valid Fortran REAL KINDs */
+#cmakedefine H5_H5CONFIG_F_NUM_RKIND @H5_H5CONFIG_F_NUM_RKIND@
+
+/* Define valid Fortran REAL KINDs */
+#cmakedefine H5_H5CONFIG_F_RKIND @H5_H5CONFIG_F_RKIND@
+
+/* Define valid Fortran REAL KINDs Sizeof */
+#cmakedefine H5_H5CONFIG_F_RKIND_SIZEOF @H5_H5CONFIG_F_RKIND_SIZEOF@
/* Define to 1 if you have the `alarm' function. */
#cmakedefine H5_HAVE_ALARM @H5_HAVE_ALARM@
@@ -81,7 +116,7 @@
/* Define to 1 if you have the `difftime' function. */
#cmakedefine H5_HAVE_DIFFTIME @H5_HAVE_DIFFTIME@
-/* Define if the direct I/O virtual file driver should be compiled */
+/* Define if the direct I/O virtual file driver (VFD) should be compiled */
#cmakedefine H5_HAVE_DIRECT @H5_HAVE_DIRECT@
/* Define to 1 if you have the <dirent.h> header file. */
@@ -108,6 +143,9 @@
/* Define if support for szip filter is enabled */
#cmakedefine H5_HAVE_FILTER_SZIP @H5_HAVE_FILTER_SZIP@
+/* Determine if __float128 is available */
+#cmakedefine H5_HAVE_FLOAT128 @H5_HAVE_FLOAT128@
+
/* Define to 1 if you have the `flock' function. */
#cmakedefine H5_HAVE_FLOCK @H5_HAVE_FLOCK@
@@ -162,7 +200,7 @@
/* Define if the compiler understands inline */
#cmakedefine H5_HAVE_INLINE @H5_HAVE_INLINE@
-/* Define if library will contain instrumentation to detect correct
+/* Define if parallel library will contain instrumentation to detect correct
optimization operation */
#cmakedefine H5_HAVE_INSTRUMENTED_LIBRARY @H5_HAVE_INSTRUMENTED_LIBRARY@
@@ -244,8 +282,8 @@
/* Define to 1 if you have the <pthread.h> header file. */
#cmakedefine H5_HAVE_PTHREAD_H @H5_HAVE_PTHREAD_H@
-/* Define to 1 if you have the 'InitOnceExecuteOnce' function. */
-#cmakedefine H5_HAVE_WIN_THREADS @H5_HAVE_WIN_THREADS@
+/* Define to 1 if you have the <quadmath.h> header file. */
+#cmakedefine H5_HAVE_QUADMATH_H @H5_HAVE_QUADMATH_H@
/* Define to 1 if you have the `random' function. */
#cmakedefine H5_HAVE_RANDOM @H5_HAVE_RANDOM@
@@ -254,10 +292,10 @@
#cmakedefine H5_HAVE_RAND_R @H5_HAVE_RAND_R@
/* Define to 1 if you have the `round' function. */
-#cmakedefine H5_HAVE_ROUNDF @H5_HAVE_ROUNDF@
+#cmakedefine H5_HAVE_ROUND @H5_HAVE_ROUND@
/* Define to 1 if you have the `roundf' function. */
-#cmakedefine H5_HAVE_ROUND @H5_HAVE_ROUND@
+#cmakedefine H5_HAVE_ROUNDF @H5_HAVE_ROUNDF@
/* Define to 1 if you have the `setjmp' function. */
#cmakedefine H5_HAVE_SETJMP @H5_HAVE_SETJMP@
@@ -292,6 +330,9 @@
/* Define if `struct stat' has the `st_blocks' field */
#cmakedefine H5_HAVE_STAT_ST_BLOCKS @H5_HAVE_STAT_ST_BLOCKS@
+/* Define to 1 if you have the <stdbool.h> header file. */
+#cmakedefine H5_HAVE_STDBOOL_H @H5_HAVE_STDBOOL_H@
+
/* Define to 1 if you have the <stddef.h> header file. */
#cmakedefine H5_HAVE_STDDEF_H @H5_HAVE_STDDEF_H@
@@ -307,6 +348,12 @@
/* Define to 1 if you have the `strdup' function. */
#cmakedefine H5_HAVE_STRDUP @H5_HAVE_STRDUP@
+/* Define to 1 if you have the `strtoll' function. */
+#cmakedefine H5_HAVE_STRTOLL @H5_HAVE_STRTOLL@
+
+/* Define to 1 if you have the `strtoull' function. */
+#cmakedefine H5_HAVE_STRTOULL @H5_HAVE_STRTOULL@
+
/* Define to 1 if you have the <strings.h> header file. */
#cmakedefine H5_HAVE_STRINGS_H @H5_HAVE_STRINGS_H@
@@ -325,6 +372,9 @@
/* Define to 1 if you have the `system' function. */
#cmakedefine H5_HAVE_SYSTEM @H5_HAVE_SYSTEM@
+/* Define to 1 if you have the <sys/file.h> header file. */
+#cmakedefine H5_HAVE_SYS_FILE_H @H5_HAVE_SYS_FILE_H@
+
/* Define to 1 if you have the <sys/ioctl.h> header file. */
#cmakedefine H5_HAVE_SYS_IOCTL_H @H5_HAVE_SYS_IOCTL_H@
@@ -379,6 +429,9 @@
/* Define to 1 if you have the `waitpid' function. */
#cmakedefine H5_HAVE_WAITPID @H5_HAVE_WAITPID@
+/* Define to 1 if you have the 'InitOnceExecuteOnce' function. */
+#cmakedefine H5_HAVE_WIN_THREADS @H5_HAVE_WIN_THREADS@
+
/* Define if your system has window style path name. */
#cmakedefine H5_HAVE_WINDOW_PATH @H5_HAVE_WINDOW_PATH@
@@ -419,10 +472,12 @@
with special algorithm. */
#cmakedefine H5_LONG_TO_LDOUBLE_SPECIAL @H5_LONG_TO_LDOUBLE_SPECIAL@
-/* Define to the sub-directory in which libtool stores uninstalled libraries.
- */
+/* Define to the sub-directory where libtool stores uninstalled libraries. */
#cmakedefine H5_LT_OBJDIR @H5_LT_OBJDIR@
+/* Define to enable internal memory allocation sanity checking. */
+/* #cmakedefine H5_MEMORY_ALLOC_SANITY_CHECK @H5_MEMORY_ALLOC_SANITY_CHECK@ ** Define in CMakeLists.txt */
+
/* Define if the metadata trace file code is to be compiled in */
#cmakedefine H5_METADATA_TRACE_FILE @H5_METADATA_TRACE_FILE@
@@ -453,9 +508,18 @@
/* Define to the version of this package. */
#define H5_PACKAGE_VERSION "@HDF5_PACKAGE_VERSION_STRING@"
+/* Determine the maximum decimal precision in C */
+#define H5_PAC_C_MAX_REAL_PRECISION @H5_PAC_C_MAX_REAL_PRECISION@
+
+/* Define Fortran Maximum Real Decimal Precision */
+#cmakedefine H5_PAC_FC_MAX_REAL_PRECISION @H5_PAC_FC_MAX_REAL_PRECISION@
+
/* Width for printf() for type `long long' or `__int64', use `ll' */
#cmakedefine H5_PRINTF_LL_WIDTH @H5_PRINTF_LL_WIDTH@
+/* The size of `bool', as computed by sizeof. */
+#cmakedefine H5_SIZEOF_BOOL @H5_SIZEOF_BOOL@
+
/* The size of `char', as computed by sizeof. */
#cmakedefine H5_SIZEOF_CHAR @H5_SIZEOF_CHAR@
@@ -589,14 +653,14 @@
/* The size of `unsigned', as computed by sizeof. */
#cmakedefine H5_SIZEOF_UNSIGNED @H5_SIZEOF_UNSIGNED@
-/* The size of `__int64', as computed by sizeof. */
-#define H5_SIZEOF___INT64 @H5_SIZEOF___INT64@
+/* The size of `_Quad', as computed by sizeof. */
+#define H5_SIZEOF__QUAD @H5_SIZEOF__QUAD@
-/* Define to 1 if you have the <stdbool.h> header file. */
-#cmakedefine H5_HAVE_STDBOOL_H @H5_HAVE_STDBOOL_H@
+/* The size of `__float128', as computed by sizeof. */
+#define H5_SIZEOF___FLOAT128 @H5_SIZEOF___FLOAT128@
-/* The size of `bool', as computed by sizeof. */
-#cmakedefine H5_SIZEOF_BOOL @H5_SIZEOF_BOOL@
+/* The size of `__int64', as computed by sizeof. */
+#define H5_SIZEOF___INT64 @H5_SIZEOF___INT64@
/* Define to 1 if you have the ANSI C header files. */
#cmakedefine H5_STDC_HEADERS @H5_STDC_HEADERS@
diff --git a/config/cmake/HDF518_Examples.cmake.in b/config/cmake/HDF518_Examples.cmake.in
index e1cb781..33d3a40 100644
--- a/config/cmake/HDF518_Examples.cmake.in
+++ b/config/cmake/HDF518_Examples.cmake.in
@@ -1,4 +1,15 @@
-cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+cmake_minimum_required(VERSION 3.2.2 FATAL_ERROR)
###############################################################################################################
# This script will build and run the examples from a folder
# Execute from a command line:
@@ -12,7 +23,7 @@ set(CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY})
#INSTALLDIR - HDF5-1.8 root folder
#CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo
#CTEST_SOURCE_NAME - name of source folder; HDF4Examples
-#STATIC_LIBRARIES - Default is YES
+#STATIC_ONLY - Default is YES
#FORTRAN_LIBRARIES - Default is NO
##NO_MAC_FORTRAN - set to TRUE to allow shared libs on a Mac)
if(DEFINED CTEST_SCRIPT_ARG)
@@ -23,61 +34,61 @@ if(DEFINED CTEST_SCRIPT_ARG)
foreach(current_var ${script_args})
if ("${current_var}" MATCHES "^([^=]+)=(.+)$")
set("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
- endif()
- endforeach()
-endif()
+ endif ()
+ endforeach ()
+endif ()
if(NOT DEFINED INSTALLDIR)
set(INSTALLDIR "@CMAKE_INSTALL_PREFIX@")
-endif()
+endif ()
if(NOT DEFINED CTEST_CONFIGURATION_TYPE)
set(CTEST_CONFIGURATION_TYPE "Release")
-endif()
+endif ()
if(NOT DEFINED CTEST_SOURCE_NAME)
set(CTEST_SOURCE_NAME "HDF5Examples")
-endif()
-if(NOT DEFINED STATIC_LIBRARIES)
- set(STATICLIBRARIES "YES")
-else(NOT DEFINED STATIC_LIBRARIES)
- set(STATICLIBRARIES "NO")
-endif()
+endif ()
+if(NOT DEFINED STATIC_ONLY)
+ set(STATICONLYLIBRARIES "YES")
+else(NOT DEFINED STATIC_ONLY)
+ set(STATICONLYLIBRARIES "NO")
+endif ()
if(NOT DEFINED FORTRAN_LIBRARIES)
set(FORTRANLIBRARIES "NO")
else(NOT DEFINED FORTRAN_LIBRARIES)
set(FORTRANLIBRARIES "YES")
-endif()
+endif ()
if(NOT DEFINED HDF_LOCAL)
set(CDASH_LOCAL "NO")
else(NOT HDF_LOCAL)
set(CDASH_LOCAL "YES")
-endif()
+endif ()
if(NOT DEFINED CTEST_SITE)
set(CTEST_SITE "local")
-endif()
+endif ()
if(NOT DEFINED CTEST_BUILD_NAME)
set(CTEST_BUILD_NAME "examples")
-endif()
+endif ()
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}")
#TAR_SOURCE - name of tarfile
#if(NOT DEFINED TAR_SOURCE)
# set(CTEST_USE_TAR_SOURCE "HDF5Examples-1.2.1-Source")
-#endif()
+#endif ()
###############################################################################################################
# Adjust the following SET Commands as needed
###############################################################################################################
if(WIN32)
- if(${STATICLIBRARIES})
+ if(${STATICONLYLIBRARIES})
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
- endif()
+ endif ()
set(ENV{HDF5_DIR} "${INSTALLDIR}/cmake")
set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}\\build)
set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
else(WIN32)
- if(${STATICLIBRARIES})
+ if(${STATICONLYLIBRARIES})
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
- endif()
+ endif ()
set(ENV{HDF5_DIR} "${INSTALLDIR}/share/cmake")
set(ENV{LD_LIBRARY_PATH} "${INSTALLDIR}/lib")
set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}/build)
@@ -86,12 +97,12 @@ else(WIN32)
endif(WIN32)
if(${FORTRANLIBRARIES})
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=ON")
-else()
+else ()
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=OFF")
-endif()
+endif ()
if(${CDASH_LOCAL})
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCDASH_LOCAL:BOOL=ON")
-endif()
+endif ()
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@")
###############################################################################################################
@@ -111,11 +122,11 @@ if(APPLE)
if(NOT NO_MAC_FORTRAN)
# Shared fortran is not supported, build static
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
- else()
+ else ()
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=OFF")
- endif()
+ endif ()
set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=OFF")
-endif()
+endif ()
#-----------------------------------------------------------------------------
set(CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
@@ -126,16 +137,16 @@ if(CTEST_USE_TAR_SOURCE)
if(WIN32)
message(STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.zip]")
execute_process(COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip RESULT_VARIABLE rv)
- else()
+ else ()
message(STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.tar]")
execute_process(COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar RESULT_VARIABLE rv)
- endif()
+ endif ()
if(NOT rv EQUAL 0)
message(STATUS "extracting... [error-(${rv}) clean up]")
file(REMOVE_RECURSE "${CTEST_SOURCE_DIRECTORY}")
message(FATAL_ERROR "error: extract of ${CTEST_SOURCE_NAME} failed")
- endif()
+ endif ()
endif(CTEST_USE_TAR_SOURCE)
#-----------------------------------------------------------------------------
@@ -154,9 +165,9 @@ ProcessorCount(N)
if(NOT N EQUAL 0)
if(NOT WIN32)
set(CTEST_BUILD_FLAGS -j${N})
- endif()
+ endif ()
set(ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
-endif()
+endif ()
set (CTEST_CONFIGURE_COMMAND
"${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_SOURCE_DIRECTORY}\""
)
@@ -174,27 +185,27 @@ ctest_start (Experimental)
ctest_configure (BUILD "${CTEST_BINARY_DIRECTORY}" RETURN_VALUE res)
if(${res} LESS 0 OR ${res} GREATER 0)
file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Configure: ${res}\n")
-endif()
+endif ()
if(LOCAL_SUBMIT)
ctest_submit (PARTS Configure Notes)
-endif()
+endif ()
ctest_build (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND APPEND RETURN_VALUE res NUMBER_ERRORS errval)
if(${res} LESS 0 OR ${res} GREATER 0 OR ${errval} GREATER 0)
file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed ${errval} Build: ${res}\n")
-endif()
+endif ()
if(LOCAL_SUBMIT)
ctest_submit (PARTS Build)
-endif()
+endif ()
ctest_test (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND ${ctest_test_args} RETURN_VALUE res)
if(${res} LESS 0 OR ${res} GREATER 0)
file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Tests: ${res}\n")
-endif()
+endif ()
if(LOCAL_SUBMIT)
ctest_submit (PARTS Test)
-endif()
+endif ()
if(${res} LESS 0 OR ${res} GREATER 0)
message (FATAL_ERROR "tests FAILED")
-endif()
+endif ()
#-----------------------------------------------------------------------------
##############################################################################################################
message(STATUS "DONE") \ No newline at end of file
diff --git a/config/cmake/HDF5Macros.cmake b/config/cmake/HDF5Macros.cmake
index d88e672..f08349e 100644
--- a/config/cmake/HDF5Macros.cmake
+++ b/config/cmake/HDF5Macros.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#-------------------------------------------------------------------------------
macro (H5_SET_LIB_OPTIONS libtarget libname libtype)
set (LIB_OUT_NAME "${libname}")
@@ -5,21 +16,21 @@ macro (H5_SET_LIB_OPTIONS libtarget libname libtype)
if (${libtype} MATCHES "SHARED")
if (ARGN)
set (PACKAGE_SOVERSION ${ARGN})
- else (ARGN)
+ else ()
set (PACKAGE_SOVERSION ${HDF5_PACKAGE_SOVERSION})
- endif (ARGN)
+ endif ()
if (WIN32)
set (LIBHDF_VERSION ${HDF5_PACKAGE_VERSION_MAJOR})
- else (WIN32)
+ else ()
set (LIBHDF_VERSION ${HDF5_PACKAGE_VERSION})
- endif (WIN32)
+ endif ()
set_target_properties (${libtarget} PROPERTIES VERSION ${LIBHDF_VERSION})
if (WIN32)
set (${LIB_OUT_NAME} "${LIB_OUT_NAME}-${PACKAGE_SOVERSION}")
- else (WIN32)
+ else ()
set_target_properties (${libtarget} PROPERTIES SOVERSION ${PACKAGE_SOVERSION})
- endif (WIN32)
- endif (${libtype} MATCHES "SHARED")
+ endif ()
+ endif ()
HDF_SET_LIB_OPTIONS (${libtarget} ${LIB_OUT_NAME} ${libtype})
#-- Apple Specific install_name for libraries
@@ -31,7 +42,7 @@ macro (H5_SET_LIB_OPTIONS libtarget libname libtype)
INSTALL_NAME_DIR "${CMAKE_INSTALL_PREFIX}/lib"
BUILD_WITH_INSTALL_RPATH ${HDF5_BUILD_WITH_INSTALL_NAME}
)
- endif (HDF5_BUILD_WITH_INSTALL_NAME)
+ endif ()
if (HDF5_BUILD_FRAMEWORKS)
if (${libtype} MATCHES "SHARED")
# adapt target to build frameworks instead of dylibs
@@ -42,8 +53,8 @@ macro (H5_SET_LIB_OPTIONS libtarget libname libtype)
MACOSX_FRAMEWORK_IDENTIFIER org.hdfgroup.${libtarget}
MACOSX_FRAMEWORK_SHORT_VERSION_STRING ${HDF5_PACKAGE_VERSION_MAJOR}
MACOSX_FRAMEWORK_BUNDLE_VERSION ${HDF5_PACKAGE_VERSION_MAJOR})
- endif (${libtype} MATCHES "SHARED")
- endif (HDF5_BUILD_FRAMEWORKS)
- endif (APPLE)
+ endif ()
+ endif ()
+ endif ()
-endmacro (H5_SET_LIB_OPTIONS)
+endmacro ()
diff --git a/config/cmake/HDF5UseFortran.cmake b/config/cmake/HDF5UseFortran.cmake
index eba448c..3152aaa 100644
--- a/config/cmake/HDF5UseFortran.cmake
+++ b/config/cmake/HDF5UseFortran.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
## Check for non-standard extenstion quadmath.h
@@ -7,7 +18,7 @@ if (${C_HAVE_QUADMATH})
set(HAVE_QUADMATH 1)
else ()
set(HAVE_QUADMATH 0)
-endif()
+endif ()
#
# This file provides functions for HDF5 specific Fortran support.
@@ -18,16 +29,16 @@ ENABLE_LANGUAGE (Fortran)
# The provided CMake Fortran macros don't provide a general compile/run function
# so this one is used.
#-----------------------------------------------------------------------------
-MACRO (FORTRAN_RUN FUNCTION CODE RUN_RESULT_VAR1 COMPILE_RESULT_VAR RETURN)
+macro (FORTRAN_RUN FUNCTION CODE RUN_RESULT_VAR1 COMPILE_RESULT_VAR RETURN)
#
-# if (NOT DEFINED ${RUN_RESULT_VAR})
+# if (NOT DEFINED ${RUN_RESULT_VAR})
message (STATUS "Detecting Fortran ${FUNCTION}")
if (CMAKE_REQUIRED_LIBRARIES)
set (CHECK_FUNCTION_EXISTS_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
- else (CMAKE_REQUIRED_LIBRARIES)
+ else ()
set (CHECK_FUNCTION_EXISTS_ADD_LIBRARIES)
- endif (CMAKE_REQUIRED_LIBRARIES)
+ endif ()
file (WRITE
${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testFortranCompiler1.f90
"${CODE}"
@@ -40,7 +51,7 @@ MACRO (FORTRAN_RUN FUNCTION CODE RUN_RESULT_VAR1 COMPILE_RESULT_VAR RETURN)
)
set(${RETURN} ${OUTPUT})
-
+
#message ( "Test result1 ${RETURN} ")
#message ( "Test result3 ${RESULT} ")
#message ( "Test result2 ${CMAKE_MATCH_0} ")
@@ -65,15 +76,15 @@ MACRO (FORTRAN_RUN FUNCTION CODE RUN_RESULT_VAR1 COMPILE_RESULT_VAR RETURN)
"${OUTPUT}\n\n")
endif ()
endif ()
-# endif (NOT DEFINED ${RUN_RESULT_VAR})
-ENDMACRO (FORTRAN_RUN)
+# endif ()
+endmacro ()
# Read source line beginning at the line matching Input:"START" and ending at the line matching Input:"END"
-MACRO (READ_SOURCE START END RETURN)
- file(READ "${HDF5_SOURCE_DIR}/m4/aclocal_fc.f90" CODE)
- string(REGEX MATCH "${START}[\\\t\\\n\\\r[].+]*${END}" CODE ${CODE})
- set(RETURN "${CODE}")
-ENDMACRO (READ_SOURCE START END RETURN)
+macro (READ_SOURCE START END RETURN)
+ file (READ "${HDF5_SOURCE_DIR}/m4/aclocal_fc.f90" CODE)
+ string (REGEX MATCH "${START}[\\\t\\\n\\\r[].+]*${END}" CODE ${CODE})
+ set (RETURN "${CODE}")
+endmacro ()
#-----------------------------------------------------------------------------
# Check to see C_LONG_DOUBLE is available
@@ -85,10 +96,10 @@ CHECK_FORTRAN_FEATURE(c_long_double
)
if (${FORTRAN_HAVE_C_LONG_DOUBLE})
- set(FORTRAN_HAVE_C_LONG_DOUBLE 1)
+ set (FORTRAN_HAVE_C_LONG_DOUBLE 1)
else ()
- set(FORTRAN_HAVE_C_LONG_DOUBLE 0)
-endif()
+ set (FORTRAN_HAVE_C_LONG_DOUBLE 0)
+endif ()
# Check to see C_LONG_DOUBLE is different from C_DOUBLE
@@ -98,30 +109,30 @@ CHECK_FORTRAN_FEATURE(c_long_double
FORTRAN_C_LONG_DOUBLE_IS_UNIQUE
)
if (${FORTRAN_C_LONG_DOUBLE_IS_UNIQUE})
- set(FORTRAN_C_LONG_DOUBLE_IS_UNIQUE 1)
+ set (FORTRAN_C_LONG_DOUBLE_IS_UNIQUE 1)
else ()
- set(FORTRAN_C_LONG_DOUBLE_IS_UNIQUE 0)
-endif()
+ set (FORTRAN_C_LONG_DOUBLE_IS_UNIQUE 0)
+endif ()
## Set the sizeof function for use later in the fortran tests
-if(FORTRAN_HAVE_STORAGE_SIZE)
- set(FC_SIZEOF_A "STORAGE_SIZE(a, c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t)")
- set(FC_SIZEOF_B "STORAGE_SIZE(b, c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t)")
- set(FC_SIZEOF_C "STORAGE_SIZE(c, c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t)")
-elseif(FORTRAN_HAVE_C_SIZEOF)
- set(FC_SIZEOF_A "SIZEOF(a)")
- set(FC_SIZEOF_B "SIZEOF(b)")
- set(FC_SIZEOF_C "SIZEOF(c)")
-else(FORTRAN_HAVE_STORAGE_SIZE)
+if (FORTRAN_HAVE_STORAGE_SIZE)
+ set (FC_SIZEOF_A "STORAGE_SIZE(a, c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t)")
+ set (FC_SIZEOF_B "STORAGE_SIZE(b, c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t)")
+ set (FC_SIZEOF_C "STORAGE_SIZE(c, c_size_t)/STORAGE_SIZE(c_char_'a',c_size_t)")
+elseif (FORTRAN_HAVE_C_SIZEOF)
+ set (FC_SIZEOF_A "SIZEOF(a)")
+ set (FC_SIZEOF_B "SIZEOF(b)")
+ set (FC_SIZEOF_C "SIZEOF(c)")
+else ()
message (FATAL_ERROR "Fortran compiler requires either intrinsic functions SIZEOF or STORAGE_SIZE")
-endif(FORTRAN_HAVE_STORAGE_SIZE)
+endif ()
#-----------------------------------------------------------------------------
# Determine the available KINDs for REALs and INTEGERs
#-----------------------------------------------------------------------------
-READ_SOURCE("PROGRAM FC_AVAIL_KINDS" "END PROGRAM FC_AVAIL_KINDS" CODE)
-FORTRAN_RUN("REAL and INTEGER KINDs"
+READ_SOURCE ("PROGRAM FC_AVAIL_KINDS" "END PROGRAM FC_AVAIL_KINDS" CODE)
+FORTRAN_RUN ("REAL and INTEGER KINDs"
"${CODE}"
XX
YY
@@ -134,35 +145,35 @@ FORTRAN_RUN("REAL and INTEGER KINDs"
# dnl -- LINE 4 -- number of valid integer kinds
# dnl -- LINE 5 -- number of valid real kinds
-file(READ "${CMAKE_BINARY_DIR}/pac_fconftest.out" PROG_OUTPUT)
+file (READ "${CMAKE_BINARY_DIR}/pac_fconftest.out" PROG_OUTPUT)
# Convert the string to a list of strings by replacing the carriage return with a semicolon
-string(REGEX REPLACE "\n" ";" PROG_OUTPUT "${PROG_OUTPUT}")
+string (REGEX REPLACE "\n" ";" PROG_OUTPUT "${PROG_OUTPUT}")
-list(GET PROG_OUTPUT 0 pac_validIntKinds)
-list(GET PROG_OUTPUT 1 pac_validRealKinds)
-list(GET PROG_OUTPUT 2 H5_PAC_FC_MAX_REAL_PRECISION)
+list (GET PROG_OUTPUT 0 pac_validIntKinds)
+list (GET PROG_OUTPUT 1 pac_validRealKinds)
+list (GET PROG_OUTPUT 2 H5_PAC_FC_MAX_REAL_PRECISION)
# If the lists are empty then something went wrong.
-if( NOT pac_validIntKinds)
+if (NOT pac_validIntKinds)
message (FATAL_ERROR "Failed to find available INTEGER KINDs for Fortran")
-endif()
-if( NOT pac_validRealKinds)
+endif ()
+if (NOT pac_validRealKinds)
message (FATAL_ERROR "Failed to find available REAL KINDs for Fortran")
-endif()
-if( NOT H5_PAC_FC_MAX_REAL_PRECISION)
+endif ()
+if (NOT H5_PAC_FC_MAX_REAL_PRECISION)
message (FATAL_ERROR "No output from Fortran decimal precision program")
-endif()
+endif ()
-set(PAC_FC_ALL_INTEGER_KINDS "\{${pac_validIntKinds}\}")
-set(PAC_FC_ALL_REAL_KINDS "\{${pac_validRealKinds}\}")
+set (PAC_FC_ALL_INTEGER_KINDS "\{${pac_validIntKinds}\}")
+set (PAC_FC_ALL_REAL_KINDS "\{${pac_validRealKinds}\}")
-list(GET PROG_OUTPUT 3 NUM_IKIND)
-list(GET PROG_OUTPUT 4 NUM_RKIND)
+list (GET PROG_OUTPUT 3 NUM_IKIND)
+list (GET PROG_OUTPUT 4 NUM_RKIND)
-set(PAC_FORTRAN_NUM_INTEGER_KINDS "${NUM_IKIND}")
+set (PAC_FORTRAN_NUM_INTEGER_KINDS "${NUM_IKIND}")
-set(H5CONFIG_F_NUM_IKIND "INTEGER, PARAMETER :: num_ikinds = ${NUM_IKIND}")
-set(H5CONFIG_F_IKIND "INTEGER, DIMENSION(1:num_ikinds) :: ikind = (/${pac_validIntKinds}/)")
+set (H5CONFIG_F_NUM_IKIND "INTEGER, PARAMETER :: num_ikinds = ${NUM_IKIND}")
+set (H5CONFIG_F_IKIND "INTEGER, DIMENSION(1:num_ikinds) :: ikind = (/${pac_validIntKinds}/)")
message (STATUS "....NUMBER OF INTEGER KINDS FOUND ${PAC_FORTRAN_NUM_INTEGER_KINDS}")
message (STATUS "....REAL KINDS FOUND ${PAC_FC_ALL_REAL_KINDS}")
@@ -175,10 +186,10 @@ message (STATUS "....MAX DECIMAL PRECISION ${H5_PAC_FC_MAX_REAL_PRECISION}")
# **********
# INTEGERS
# **********
-string(REGEX REPLACE "," ";" VAR "${pac_validIntKinds}")
+string (REGEX REPLACE "," ";" VAR "${pac_validIntKinds}")
-foreach( KIND ${VAR} )
- set(PROG_SRC
+foreach (KIND ${VAR} )
+ set (PROG_SRC
"
PROGRAM main
USE ISO_C_BINDING
@@ -193,37 +204,36 @@ foreach( KIND ${VAR} )
YY
PROG_OUTPUT1
)
- string(REGEX REPLACE "\n" "" PROG_OUTPUT1 "${PROG_OUTPUT1}")
- set(pack_int_sizeof "${pack_int_sizeof} ${PROG_OUTPUT1},")
-endforeach(KIND)
+ string (REGEX REPLACE "\n" "" PROG_OUTPUT1 "${PROG_OUTPUT1}")
+ set (pack_int_sizeof "${pack_int_sizeof} ${PROG_OUTPUT1},")
+endforeach ()
if (pack_int_sizeof STREQUAL "")
message (FATAL_ERROR "Failed to find available INTEGER KINDs for Fortran")
-endif()
-
-string(STRIP ${pack_int_sizeof} pack_int_sizeof)
+endif ()
+string (STRIP ${pack_int_sizeof} pack_int_sizeof)
#Remove trailing comma
-string(REGEX REPLACE ",$" "" pack_int_sizeof "${pack_int_sizeof}")
+string (REGEX REPLACE ",$" "" pack_int_sizeof "${pack_int_sizeof}")
#Remove spaces
-string(REGEX REPLACE " " "" pack_int_sizeof "${pack_int_sizeof}")
+string (REGEX REPLACE " " "" pack_int_sizeof "${pack_int_sizeof}")
-set(PAC_FC_ALL_INTEGER_KINDS_SIZEOF "\{${pack_int_sizeof}\}")
+set (PAC_FC_ALL_INTEGER_KINDS_SIZEOF "\{${pack_int_sizeof}\}")
-message(STATUS "....FOUND SIZEOF for INTEGER KINDs ${PAC_FC_ALL_INTEGER_KINDS_SIZEOF}")
+message (STATUS "....FOUND SIZEOF for INTEGER KINDs ${PAC_FC_ALL_INTEGER_KINDS_SIZEOF}")
# **********
# REALS
# **********
-string(REGEX REPLACE "," ";" VAR "${pac_validRealKinds}")
+string (REGEX REPLACE "," ";" VAR "${pac_validRealKinds}")
#find the maximum kind of the real
-list(LENGTH VAR LEN_VAR)
-MATH (EXPR _LEN "${LEN_VAR}-1")
-list(GET VAR ${_LEN} max_real_fortran_kind)
+list (LENGTH VAR LEN_VAR)
+math (EXPR _LEN "${LEN_VAR}-1")
+list (GET VAR ${_LEN} max_real_fortran_kind)
-foreach( KIND ${VAR} )
- set(PROG_SRC
+foreach (KIND ${VAR} )
+ set (PROG_SRC
"
PROGRAM main
USE ISO_C_BINDING
@@ -233,42 +243,42 @@ foreach( KIND ${VAR} )
END
"
)
- FORTRAN_RUN("REAL KIND SIZEOF" ${PROG_SRC}
+ FORTRAN_RUN ("REAL KIND SIZEOF" ${PROG_SRC}
XX
YY
PROG_OUTPUT1
)
- string(REGEX REPLACE "\n" "" PROG_OUTPUT1 "${PROG_OUTPUT1}")
- set(pack_real_sizeof "${pack_real_sizeof} ${PROG_OUTPUT1},")
-endforeach(KIND)
+ string (REGEX REPLACE "\n" "" PROG_OUTPUT1 "${PROG_OUTPUT1}")
+ set (pack_real_sizeof "${pack_real_sizeof} ${PROG_OUTPUT1},")
+endforeach ()
if (pack_int_sizeof STREQUAL "")
message (FATAL_ERROR "Failed to find available REAL KINDs for Fortran")
-endif()
+endif ()
string(STRIP ${pack_real_sizeof} pack_real_sizeof)
#Remove trailing comma
-string(REGEX REPLACE ",$" "" pack_real_sizeof "${pack_real_sizeof}")
+string (REGEX REPLACE ",$" "" pack_real_sizeof "${pack_real_sizeof}")
#Remove spaces
-string(REGEX REPLACE " " "" pack_real_sizeof "${pack_real_sizeof}")
+string (REGEX REPLACE " " "" pack_real_sizeof "${pack_real_sizeof}")
-set(H5CONFIG_F_RKIND_SIZEOF "INTEGER, DIMENSION(1:num_rkinds) :: rkind_sizeof = (/${pack_real_sizeof}/)")
+set (H5CONFIG_F_RKIND_SIZEOF "INTEGER, DIMENSION(1:num_rkinds) :: rkind_sizeof = (/${pack_real_sizeof}/)")
-message(STATUS "....FOUND SIZEOF for REAL KINDs \{${pack_real_sizeof}\}")
+message (STATUS "....FOUND SIZEOF for REAL KINDs \{${pack_real_sizeof}\}")
-set(PAC_FC_ALL_REAL_KINDS_SIZEOF "\{${pack_real_sizeof}\}")
+set (PAC_FC_ALL_REAL_KINDS_SIZEOF "\{${pack_real_sizeof}\}")
#find the maximum kind of the real
-string(REGEX REPLACE "," ";" VAR "${pack_real_sizeof}")
-list(LENGTH VAR LEN_VAR)
-MATH (EXPR _LEN "${LEN_VAR}-1")
-list(GET VAR ${_LEN} max_real_fortran_sizeof)
+string (REGEX REPLACE "," ";" VAR "${pack_real_sizeof}")
+list (LENGTH VAR LEN_VAR)
+math (EXPR _LEN "${LEN_VAR}-1")
+list (GET VAR ${_LEN} max_real_fortran_sizeof)
#-----------------------------------------------------------------------------
# Find sizeof of native kinds
#-----------------------------------------------------------------------------
-FORTRAN_RUN("SIZEOF NATIVE KINDs"
+FORTRAN_RUN ("SIZEOF NATIVE KINDs"
"
PROGRAM main
USE ISO_C_BINDING
@@ -277,9 +287,9 @@ FORTRAN_RUN("SIZEOF NATIVE KINDs"
REAL b
DOUBLE PRECISION c
WRITE(*,*) ${FC_SIZEOF_A}
- WRITE(*,*) kind(a)
- WRITE(*,*) ${FC_SIZEOF_B}
- WRITE(*,*) kind(b)
+ WRITE(*,*) kind(a)
+ WRITE(*,*) ${FC_SIZEOF_B}
+ WRITE(*,*) kind(b)
WRITE(*,*) ${FC_SIZEOF_C}
WRITE(*,*) kind(c)
END
@@ -297,63 +307,63 @@ FORTRAN_RUN("SIZEOF NATIVE KINDs"
# dnl -- LINE 6 -- kind of DOUBLE PRECISION
# Convert the string to a list of strings by replacing the carriage return with a semicolon
-string(REGEX REPLACE "\n" ";" PROG_OUTPUT "${PROG_OUTPUT}")
+string (REGEX REPLACE "\n" ";" PROG_OUTPUT "${PROG_OUTPUT}")
-list(GET PROG_OUTPUT 0 PAC_FORTRAN_NATIVE_INTEGER_SIZEOF)
-list(GET PROG_OUTPUT 1 PAC_FORTRAN_NATIVE_INTEGER_KIND)
-list(GET PROG_OUTPUT 2 PAC_FORTRAN_NATIVE_REAL_SIZEOF)
-list(GET PROG_OUTPUT 3 PAC_FORTRAN_NATIVE_REAL_KIND)
-list(GET PROG_OUTPUT 4 PAC_FORTRAN_NATIVE_DOUBLE_SIZEOF)
-list(GET PROG_OUTPUT 5 PAC_FORTRAN_NATIVE_DOUBLE_KIND)
+list (GET PROG_OUTPUT 0 PAC_FORTRAN_NATIVE_INTEGER_SIZEOF)
+list (GET PROG_OUTPUT 1 PAC_FORTRAN_NATIVE_INTEGER_KIND)
+list (GET PROG_OUTPUT 2 PAC_FORTRAN_NATIVE_REAL_SIZEOF)
+list (GET PROG_OUTPUT 3 PAC_FORTRAN_NATIVE_REAL_KIND)
+list (GET PROG_OUTPUT 4 PAC_FORTRAN_NATIVE_DOUBLE_SIZEOF)
+list (GET PROG_OUTPUT 5 PAC_FORTRAN_NATIVE_DOUBLE_KIND)
if (NOT PAC_FORTRAN_NATIVE_INTEGER_SIZEOF)
message (FATAL_ERROR "Failed to find SIZEOF NATIVE INTEGER KINDs for Fortran")
-endif()
+endif ()
if (NOT PAC_FORTRAN_NATIVE_REAL_SIZEOF)
message (FATAL_ERROR "Failed to find SIZEOF NATIVE REAL KINDs for Fortran")
-endif()
+endif ()
if (NOT PAC_FORTRAN_NATIVE_DOUBLE_SIZEOF)
message (FATAL_ERROR "Failed to find SIZEOF NATIVE DOUBLE KINDs for Fortran")
-endif()
+endif ()
if (NOT PAC_FORTRAN_NATIVE_INTEGER_KIND)
message (FATAL_ERROR "Failed to find KIND of NATIVE INTEGER for Fortran")
-endif()
+endif ()
if (NOT PAC_FORTRAN_NATIVE_REAL_KIND)
message (FATAL_ERROR "Failed to find KIND of NATIVE REAL for Fortran")
-endif()
+endif ()
if (NOT PAC_FORTRAN_NATIVE_DOUBLE_KIND)
message (FATAL_ERROR "Failed to find KIND of NATIVE DOUBLE for Fortran")
-endif()
+endif ()
-set(FORTRAN_SIZEOF_LONG_DOUBLE ${${HDF_PREFIX}_SIZEOF_LONG_DOUBLE})
-#set(H5_SIZEOF_LONG_DOUBLE ${${HDF_PREFIX}_SIZEOF_LONG_DOUBLE})
+set (FORTRAN_SIZEOF_LONG_DOUBLE ${${HDF_PREFIX}_SIZEOF_LONG_DOUBLE})
+#set (H5_SIZEOF_LONG_DOUBLE ${${HDF_PREFIX}_SIZEOF_LONG_DOUBLE})
# remove the invalid kind from the list
-if(NOT(${SIZEOF___FLOAT128} EQUAL 0))
- if(NOT(${SIZEOF___FLOAT128} EQUAL ${max_real_fortran_sizeof})
- AND NOT(${FORTRAN_SIZEOF_LONG_DOUBLE} EQUAL ${max_real_fortran_sizeof})
- # account for the fact that the C compiler can have 16-byte __float128 and the fortran compiler only has 8-byte doubles,
- # so we don't want to remove the 8-byte fortran doubles.
- AND NOT(${PAC_FORTRAN_NATIVE_DOUBLE_SIZEOF} EQUAL ${max_real_fortran_sizeof}))
- message(WARNING "
+if (NOT(${SIZEOF___FLOAT128} EQUAL 0))
+ if (NOT(${SIZEOF___FLOAT128} EQUAL ${max_real_fortran_sizeof})
+ AND NOT(${FORTRAN_SIZEOF_LONG_DOUBLE} EQUAL ${max_real_fortran_sizeof})
+ # account for the fact that the C compiler can have 16-byte __float128 and the fortran compiler only has 8-byte doubles,
+ # so we don't want to remove the 8-byte fortran doubles.
+ AND NOT(${PAC_FORTRAN_NATIVE_DOUBLE_SIZEOF} EQUAL ${max_real_fortran_sizeof}))
+ message (WARNING "
Fortran REAL(KIND=${max_real_fortran_kind}) is $max_real_fortran_sizeof Bytes, but no corresponding C float type exists of that size
!!! Fortran interfaces will not be generated for REAL(KIND=${max_real_fortran_kind}) !!!")
- string(REGEX REPLACE ",[0-9]+}" "}" PAC_FC_ALL_REAL_KINDS ${PAC_FC_ALL_REAL_KINDS})
- string(REGEX REPLACE ",[0-9]+}" "}" PAC_FC_ALL_REAL_KINDS_SIZEOF ${PAC_FC_ALL_REAL_KINDS_SIZEOF})
- MATH (EXPR NUM_RKIND "${NUM_RKIND} - 1")
- endif()
-endif(NOT(${SIZEOF___FLOAT128} EQUAL 0))
+ string (REGEX REPLACE ",[0-9]+}" "}" PAC_FC_ALL_REAL_KINDS ${PAC_FC_ALL_REAL_KINDS})
+ string (REGEX REPLACE ",[0-9]+}" "}" PAC_FC_ALL_REAL_KINDS_SIZEOF ${PAC_FC_ALL_REAL_KINDS_SIZEOF})
+ math (EXPR NUM_RKIND "${NUM_RKIND} - 1")
+ endif ()
+endif ()
-set(H5CONFIG_F_NUM_RKIND "INTEGER, PARAMETER :: num_rkinds = ${NUM_RKIND}")
+set (H5CONFIG_F_NUM_RKIND "INTEGER, PARAMETER :: num_rkinds = ${NUM_RKIND}")
-string(REGEX REPLACE "{" "" OUT_VAR ${PAC_FC_ALL_REAL_KINDS})
-string(REGEX REPLACE "}" "" OUT_VAR ${OUT_VAR})
-set(H5CONFIG_F_RKIND "INTEGER, DIMENSION(1:num_rkinds) :: rkind = (/${OUT_VAR}/)")
+string (REGEX REPLACE "{" "" OUT_VAR ${PAC_FC_ALL_REAL_KINDS})
+string (REGEX REPLACE "}" "" OUT_VAR ${OUT_VAR})
+set (H5CONFIG_F_RKIND "INTEGER, DIMENSION(1:num_rkinds) :: rkind = (/${OUT_VAR}/)")
-string(REGEX REPLACE "{" "" OUT_VAR ${PAC_FC_ALL_REAL_KINDS_SIZEOF})
-string(REGEX REPLACE "}" "" OUT_VAR ${OUT_VAR})
-set(H5CONFIG_F_RKIND_SIZEOF "INTEGER, DIMENSION(1:num_rkinds) :: rkind_sizeof = (/${OUT_VAR}/)")
+string (REGEX REPLACE "{" "" OUT_VAR ${PAC_FC_ALL_REAL_KINDS_SIZEOF})
+string (REGEX REPLACE "}" "" OUT_VAR ${OUT_VAR})
+set (H5CONFIG_F_RKIND_SIZEOF "INTEGER, DIMENSION(1:num_rkinds) :: rkind_sizeof = (/${OUT_VAR}/)")
ENABLE_LANGUAGE (C)
@@ -361,14 +371,14 @@ ENABLE_LANGUAGE (C)
# The provided CMake C macros don't provide a general compile/run function
# so this one is used.
#-----------------------------------------------------------------------------
-MACRO (C_RUN FUNCTION CODE RETURN)
+macro (C_RUN FUNCTION CODE RETURN)
message (STATUS "Detecting C ${FUNCTION}")
if (CMAKE_REQUIRED_LIBRARIES)
set (CHECK_FUNCTION_EXISTS_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
- else (CMAKE_REQUIRED_LIBRARIES)
+ else ()
set (CHECK_FUNCTION_EXISTS_ADD_LIBRARIES)
- endif (CMAKE_REQUIRED_LIBRARIES)
+ endif ()
file (WRITE
${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testCCompiler1.c
${CODE}
@@ -380,8 +390,8 @@ MACRO (C_RUN FUNCTION CODE RETURN)
RUN_OUTPUT_VARIABLE OUTPUT
)
- set(${RETURN} ${OUTPUT})
-
+ set (${RETURN} ${OUTPUT})
+
#message ( "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ")
#message ( "Test COMPILE_RESULT_VAR ${COMPILE_RESULT_VAR} ")
#message ( "* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * ")
@@ -405,10 +415,10 @@ MACRO (C_RUN FUNCTION CODE RETURN)
endif ()
else ()
message (FATAL_ERROR "Compilation of C ${FUNCTION} - Failed")
- endif()
-ENDMACRO (C_RUN)
+ endif ()
+endmacro ()
-set(PROG_SRC
+set (PROG_SRC
"
#include <float.h>
#include <stdio.h>
@@ -426,7 +436,7 @@ set(PROG_SRC
#define C_FLT128_DIG 0
#endif
#if defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L
-#define C_LDBL_DIG DECIMAL_DIG
+#define C_LDBL_DIG DECIMAL_DIG
#else
#define C_LDBL_DIG LDBL_DIG
#endif
@@ -435,33 +445,33 @@ set(PROG_SRC
return 1\\\;
}
"
- )
+)
-C_RUN("maximum decimal precision for C" ${PROG_SRC} PROG_OUTPUT)
+C_RUN ("maximum decimal precision for C" ${PROG_SRC} PROG_OUTPUT)
# dnl The output from the above program will be:
# dnl -- LINE 1 -- long double decimal precision
# dnl -- LINE 2 -- __float128 decimal precision
# Convert the string to a list of strings by replacing the carriage return with a semicolon
-string(REGEX REPLACE "\n" ";" PROG_OUTPUT "${PROG_OUTPUT}")
+string (REGEX REPLACE "\n" ";" PROG_OUTPUT "${PROG_OUTPUT}")
-list(GET PROG_OUTPUT 0 LDBL_DIG)
-list(GET PROG_OUTPUT 1 FLT128_DIG)
+list (GET PROG_OUTPUT 0 LDBL_DIG)
+list (GET PROG_OUTPUT 1 FLT128_DIG)
-if(SIZEOF___FLOAT128 EQUAL 0 OR FLT128_DIG EQUAL 0)
- SET(H5_HAVE_FLOAT128 0)
- SET(SIZEOF___FLOAT128 0)
- set(H5_PAC_C_MAX_REAL_PRECISION ${LDBL_DIG})
+if (SIZEOF___FLOAT128 EQUAL 0 OR FLT128_DIG EQUAL 0)
+ set (H5_HAVE_FLOAT128 0)
+ set (SIZEOF___FLOAT128 0)
+ set (H5_PAC_C_MAX_REAL_PRECISION ${LDBL_DIG})
else ()
set(H5_PAC_C_MAX_REAL_PRECISION ${FLT128_DIG})
-endif()
+endif ()
# Setting definition if there is a 16 byte fortran integer
-string(FIND ${PAC_FC_ALL_INTEGER_KINDS_SIZEOF} "16" pos)
-if(${pos} EQUAL -1)
- set(HAVE_Fortran_INTEGER_SIZEOF_16 0)
+string (FIND ${PAC_FC_ALL_INTEGER_KINDS_SIZEOF} "16" pos)
+if (${pos} EQUAL -1)
+ set (HAVE_Fortran_INTEGER_SIZEOF_16 0)
else ()
- set(HAVE_Fortran_INTEGER_SIZEOF_16 1)
+ set (HAVE_Fortran_INTEGER_SIZEOF_16 1)
endif ()
diff --git a/config/cmake/HDF5_Examples.cmake.in b/config/cmake/HDF5_Examples.cmake.in
index 058dd7a..6918bb3 100644
--- a/config/cmake/HDF5_Examples.cmake.in
+++ b/config/cmake/HDF5_Examples.cmake.in
@@ -1,211 +1,103 @@
-cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+cmake_minimum_required(VERSION 3.2.2 FATAL_ERROR)
###############################################################################################################
# This script will build and run the examples from a folder
# Execute from a command line:
# ctest -S HDF5_Examples.cmake,OPTION=VALUE -C Release -V -O test.log
###############################################################################################################
-set(CTEST_CMAKE_GENERATOR "@CMAKE_GENERATOR@")
-set(CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY})
+set (CTEST_CMAKE_GENERATOR "@CMAKE_GENERATOR@")
+set (CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY})
# handle input parameters to script.
#INSTALLDIR - HDF5 root folder
#CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo
-#CTEST_SOURCE_NAME - name of source folder; HDF4Examples
-#STATIC_LIBRARIES - Default is YES
-#FORTRAN_LIBRARIES - Default is NO
-#JAVA_LIBRARIES - Default is NO
-##NO_MAC_FORTRAN - set to TRUE to allow shared libs on a Mac)
-if(DEFINED CTEST_SCRIPT_ARG)
- # transform ctest script arguments of the form
- # script.ctest,var1=value1,var2=value2
- # to variables with the respective names set to the respective values
- string(REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
- foreach(current_var ${script_args})
- if ("${current_var}" MATCHES "^([^=]+)=(.+)$")
- set("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
- endif()
- endforeach()
-endif()
-if(NOT DEFINED INSTALLDIR)
- set(INSTALLDIR "@CMAKE_INSTALL_PREFIX@")
-endif()
-if(NOT DEFINED CTEST_CONFIGURATION_TYPE)
- set(CTEST_CONFIGURATION_TYPE "Release")
-endif()
-if(NOT DEFINED CTEST_SOURCE_NAME)
- set(CTEST_SOURCE_NAME "HDF5Examples")
-endif()
-if(NOT DEFINED STATIC_LIBRARIES)
- set(STATICLIBRARIES "YES")
-else(NOT DEFINED STATIC_LIBRARIES)
- set(STATICLIBRARIES "NO")
-endif()
-if(NOT DEFINED FORTRAN_LIBRARIES)
- set(FORTRANLIBRARIES "NO")
-else(NOT DEFINED FORTRAN_LIBRARIES)
- set(FORTRANLIBRARIES "YES")
-endif()
-if(NOT DEFINED JAVA_LIBRARIES)
- set(JAVALIBRARIES "NO")
-else(NOT DEFINED JAVA_LIBRARIES)
- set(JAVALIBRARIES "YES")
-endif()
-if(NOT DEFINED HDF_LOCAL)
- set(CDASH_LOCAL "NO")
-else(NOT HDF_LOCAL)
- set(CDASH_LOCAL "YES")
-endif()
+#CTEST_SOURCE_NAME - name of source folder; HDF5Examples
+if (DEFINED CTEST_SCRIPT_ARG)
+ # transform ctest script arguments of the form
+ # script.ctest,var1=value1,var2=value2
+ # to variables with the respective names set to the respective values
+ string (REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
+ foreach (current_var ${script_args})
+ if ("${current_var}" MATCHES "^([^=]+)=(.+)$")
+ set ("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
+ endif ()
+ endforeach ()
+endif ()
+
+###################################################################
+### Following Line is one of [Release, RelWithDebInfo, Debug] #####
+set (CTEST_CONFIGURATION_TYPE "$ENV{CMAKE_CONFIG_TYPE}")
+if (NOT DEFINED CTEST_CONFIGURATION_TYPE)
+ set (CTEST_CONFIGURATION_TYPE "Release")
+endif ()
+set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCTEST_CONFIGURATION_TYPE:STRING=${CTEST_CONFIGURATION_TYPE}")
+##################################################################
+
+if (NOT DEFINED INSTALLDIR)
+ set (INSTALLDIR "@CMAKE_INSTALL_PREFIX@")
+endif ()
+
+if (NOT DEFINED CTEST_SOURCE_NAME)
+ set (CTEST_SOURCE_NAME "HDF5Examples")
+endif ()
+
+if (NOT DEFINED HDF_LOCAL)
+ set (CDASH_LOCAL "NO")
+else ()
+ set (CDASH_LOCAL "YES")
+endif ()
if(NOT DEFINED CTEST_SITE)
- set(CTEST_SITE "local")
-endif()
-if(NOT DEFINED CTEST_BUILD_NAME)
- set(CTEST_BUILD_NAME "examples")
-endif()
-set(BUILD_OPTIONS "${BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}")
+ set (CTEST_SITE "local")
+endif ()
+if (NOT DEFINED CTEST_BUILD_NAME)
+ set (CTEST_BUILD_NAME "examples")
+endif ()
+set (BUILD_OPTIONS "${BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}")
#TAR_SOURCE - name of tarfile
-#if(NOT DEFINED TAR_SOURCE)
-# set(CTEST_USE_TAR_SOURCE "HDF5Examples-1.10.1-Source")
-#endif()
+#if (NOT DEFINED TAR_SOURCE)
+# set (CTEST_USE_TAR_SOURCE "HDF5Examples-1.10.5-Source")
+#endif ()
###############################################################################################################
-# Adjust the following SET Commands as needed
-###############################################################################################################
-if(WIN32)
- if(${STATICLIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
- endif()
- set(ENV{HDF5_DIR} "${INSTALLDIR}/cmake")
- set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}\\build)
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
-else(WIN32)
- if(${STATICLIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
- endif()
- set(ENV{HDF5_DIR} "${INSTALLDIR}/share/cmake")
- set(ENV{LD_LIBRARY_PATH} "${INSTALLDIR}/lib")
- set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}/build)
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
-endif(WIN32)
-if(${FORTRANLIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=ON")
-else()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=OFF")
-endif()
-if(${JAVALIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=ON")
-else()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=OFF")
-endif()
-if(${CDASH_LOCAL})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCDASH_LOCAL:BOOL=ON")
-endif()
-set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@")
+if (WIN32)
+ set (SITE_OS_NAME "Windows")
+ set (ENV{HDF5_DIR} "${INSTALLDIR}/cmake")
+ set (CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}\\build)
+ set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
+ set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
+else ()
+ set (ENV{HDF5_DIR} "${INSTALLDIR}/share/cmake")
+ set (ENV{LD_LIBRARY_PATH} "${INSTALLDIR}/lib")
+ set (CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}/build)
+ set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
+ set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
+endif ()
+if (${CDASH_LOCAL})
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCDASH_LOCAL:BOOL=ON")
+endif ()
+set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@")
###############################################################################################################
# For any comments please contact cdashhelp@hdfgroup.org
#
###############################################################################################################
-#-----------------------------------------------------------------------------
-# MAC machines need special option
-#-----------------------------------------------------------------------------
-if(APPLE)
- # Compiler choice
- execute_process(COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
- execute_process(COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
- set(ENV{CC} "${XCODE_CC}")
- set(ENV{CXX} "${XCODE_CXX}")
- if(NOT NO_MAC_FORTRAN)
- # Shared fortran is not supported, build static
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
- else()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=OFF")
- endif()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=OFF")
-endif()
-
-#-----------------------------------------------------------------------------
-set(CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
-## --------------------------
-if(CTEST_USE_TAR_SOURCE)
- ## Uncompress source if tar or zip file provided
- ## --------------------------
- if(WIN32)
- message(STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.zip]")
- execute_process(COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip RESULT_VARIABLE rv)
- else()
- message(STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.tar]")
- execute_process(COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar RESULT_VARIABLE rv)
- endif()
-
- if(NOT rv EQUAL 0)
- message(STATUS "extracting... [error-(${rv}) clean up]")
- file(REMOVE_RECURSE "${CTEST_SOURCE_DIRECTORY}")
- message(FATAL_ERROR "error: extract of ${CTEST_SOURCE_NAME} failed")
- endif()
-endif(CTEST_USE_TAR_SOURCE)
-
-#-----------------------------------------------------------------------------
-## Clear the build directory
-## --------------------------
-set(CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE)
-if (EXISTS "${CTEST_BINARY_DIRECTORY}" AND IS_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
- ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY})
+if (WIN32)
+ include (${CTEST_SCRIPT_DIRECTORY}\\HDF5_Examples_options.cmake)
+ include (${CTEST_SCRIPT_DIRECTORY}\\CTestScript.cmake)
else ()
- file(MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
+ include (${CTEST_SCRIPT_DIRECTORY}/HDF5_Examples_options.cmake)
+ include (${CTEST_SCRIPT_DIRECTORY}/CTestScript.cmake)
endif ()
-
-# Use multiple CPU cores to build
-include(ProcessorCount)
-ProcessorCount(N)
-if(NOT N EQUAL 0)
- if(NOT WIN32)
- set(CTEST_BUILD_FLAGS -j${N})
- endif()
- set(ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
-endif()
-set (CTEST_CONFIGURE_COMMAND
- "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_SOURCE_DIRECTORY}\""
-)
-
-#-----------------------------------------------------------------------------
-## -- set output to english
-set($ENV{LC_MESSAGES} "en_EN")
-
-#-----------------------------------------------------------------------------
-configure_file(${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
-ctest_read_custom_files ("${CTEST_BINARY_DIRECTORY}")
-## NORMAL process
-## --------------------------
-ctest_start (Experimental)
-ctest_configure (BUILD "${CTEST_BINARY_DIRECTORY}" RETURN_VALUE res)
-if(${res} LESS 0 OR ${res} GREATER 0)
- file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Configure: ${res}\n")
-endif()
-if(LOCAL_SUBMIT)
- ctest_submit (PARTS Configure Notes)
-endif()
-ctest_build (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND APPEND RETURN_VALUE res NUMBER_ERRORS errval)
-if(${res} LESS 0 OR ${res} GREATER 0 OR ${errval} GREATER 0)
- file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed ${errval} Build: ${res}\n")
-endif()
-if(LOCAL_SUBMIT)
- ctest_submit (PARTS Build)
-endif()
-ctest_test (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND ${ctest_test_args} RETURN_VALUE res)
-if(${res} LESS 0 OR ${res} GREATER 0)
- file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Tests: ${res}\n")
-endif()
-if(LOCAL_SUBMIT)
- ctest_submit (PARTS Test)
-endif()
-if(${res} LESS 0 OR ${res} GREATER 0)
- message (FATAL_ERROR "tests FAILED")
-endif()
-#-----------------------------------------------------------------------------
-##############################################################################################################
-message(STATUS "DONE") \ No newline at end of file
diff --git a/config/cmake/HDF5_Examples_options.cmake b/config/cmake/HDF5_Examples_options.cmake
new file mode 100755
index 0000000..386e99c
--- /dev/null
+++ b/config/cmake/HDF5_Examples_options.cmake
@@ -0,0 +1,59 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+#############################################################################################
+#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
+#### format: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ") ###
+#### DEFAULT: ###
+#### BUILD_SHARED_LIBS:BOOL=OFF ###
+#### HDF_BUILD_C:BOOL=ON ###
+#### HDF_BUILD_CXX:BOOL=OFF ###
+#### HDF_BUILD_FORTRAN:BOOL=OFF ###
+#### HDF_BUILD_JAVA:BOOL=OFF ###
+#### BUILD_TESTING:BOOL=OFF ###
+#### HDF_ENABLE_PARALLEL:BOOL=OFF ###
+#### HDF_ENABLE_THREADSAFE:BOOL=OFF ###
+#############################################################################################
+
+### uncomment/comment and change the following lines for other configuration options
+### build with shared libraries
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=ON")
+
+#############################################################################################
+#### languages ####
+### disable C builds
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_C:BOOL=OFF")
+
+### enable C++ builds
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_CXX:BOOL=ON")
+
+### enable Fortran builds
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=ON")
+
+### enable JAVA builds
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=ON")
+
+#############################################################################################
+### enable parallel program builds
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_ENABLE_PARALLEL:BOOL=ON")
+
+
+#############################################################################################
+### enable threadsafe program builds
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF_ENABLE_THREADSAFE:BOOL=ON")
+
+
+#############################################################################################
+### enable test program builds, requires reference files in testfiles subdirectory
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_TESTING:BOOL=ON")
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCOMPARE_TESTING:BOOL=ON")
+
+#############################################################################################
diff --git a/config/cmake/HDF5_Process_Flex_Files.cmake b/config/cmake/HDF5_Process_Flex_Files.cmake
index 2306e93..2595c39 100644
--- a/config/cmake/HDF5_Process_Flex_Files.cmake
+++ b/config/cmake/HDF5_Process_Flex_Files.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# post process flex/bison files
message (STATUS "File: ${GEN_DIR} ${FILE_PARSE} ${FILE_ANALYZE}")
@@ -49,7 +60,7 @@ if (FILE_PARSE)
message (STATUS "processed pragma in ${FILE_PARSE}")
file (READ ${GEN_DIR}/${FILE_PARSE}.h TEST_STREAM)
file (WRITE ${FILE_PARSE}.h "${TEST_STREAM}")
-endif (FILE_PARSE)
+endif ()
if (FILE_ANALYZE)
# Add code that disables warnings in the flex/bison-generated code.
@@ -81,4 +92,4 @@ if (FILE_ANALYZE)
")
file (APPEND ${FILE_ANALYZE} "${TEST_STREAM}")
message (STATUS "processed pragma in ${FILE_ANALYZE}")
-endif (FILE_ANALYZE)
+endif ()
diff --git a/config/cmake/HDFCompilerFlags.cmake b/config/cmake/HDFCompilerFlags.cmake
index 665db32..5f71f33 100644
--- a/config/cmake/HDFCompilerFlags.cmake
+++ b/config/cmake/HDFCompilerFlags.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#-----------------------------------------------------------------------------
# Compiler specific flags : Shouldn't there be compiler tests for these
#-----------------------------------------------------------------------------
@@ -12,21 +23,21 @@ if (CMAKE_COMPILER_IS_GNUCC)
if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 5.0)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fstdarg-opt")
endif ()
- endif (CMAKE_BUILD_TYPE MATCHES Debug)
-endif (CMAKE_COMPILER_IS_GNUCC)
-if (CMAKE_COMPILER_IS_GNUCXX)
+ endif ()
+endif ()
+if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_LOADED)
if (CMAKE_BUILD_TYPE MATCHES Debug)
set (CMAKE_CXX_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_CXX_FLAGS}")
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -Og -ftrapv -fno-common")
endif ()
- else (CMAKE_BUILD_TYPE MATCHES Debug)
+ else ()
set (CMAKE_CXX_FLAGS "${CMAKE_ANSI_CFLAGS} ${CMAKE_CXX_FLAGS}")
if (NOT CMAKE_CXX_COMPILER_VERSION VERSION_LESS 5.0)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fstdarg-opt")
endif ()
- endif (CMAKE_BUILD_TYPE MATCHES Debug)
-endif (CMAKE_COMPILER_IS_GNUCXX)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to allow the user to disable compiler warnings
@@ -41,22 +52,22 @@ if (HDF5_DISABLE_COMPILER_WARNINGS)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /w")
string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /w")
- endif (MSVC)
+ endif ()
if (WIN32)
add_definitions (-D_CRT_SECURE_NO_WARNINGS)
- endif (WIN32)
+ endif ()
# Borland uses -w- to suppress warnings.
if (BORLAND)
set (HDF5_WARNINGS_BLOCKED 1)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w-")
- endif (BORLAND)
+ endif ()
# Most compilers use -w to suppress warnings.
if (NOT HDF5_WARNINGS_BLOCKED)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -w")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -w")
- endif (NOT HDF5_WARNINGS_BLOCKED)
-endif (HDF5_DISABLE_COMPILER_WARNINGS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# CDash is configured to only allow 3000 warnings, so
@@ -64,10 +75,22 @@ endif (HDF5_DISABLE_COMPILER_WARNINGS)
#-----------------------------------------------------------------------------
if (NOT MSVC AND CMAKE_COMPILER_IS_GNUCC)
if (NOT ${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
- set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wundef -Wshadow -Wpointer-arith -Wbad-function-cast -Wcast-qual -Wcast-align -Wwrite-strings -Wconversion -Waggregate-return -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -Wredundant-decls -Wnested-externs -Winline")
- else (NOT ${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
+ set (CMAKE_C_FLAGS_5 "${CMAKE_C_FLAGS_5} -Wcast-qual")
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wundef -Wshadow -Wpointer-arith -Wbad-function-cast -Wcast-align -Wwrite-strings -Wconversion -Waggregate-return -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -Wredundant-decls -Wnested-externs")
+ else ()
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -erroff=%none -DBSD_COMP")
- endif (NOT ${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
+ endif ()
+
+ #-----------------------------------------------------------------------------
+ # Option to allow the user to enable developer warnings
+ #-----------------------------------------------------------------------------
+ option (HDF5_ENABLE_DEV_WARNINGS "Enable HDF5 developer group warnings" OFF)
+ if (HDF5_ENABLE_DEV_WARNINGS)
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Winline -Waggregate-return")
+ else ()
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wno-unused-parameter -Wno-inline -Wno-aggregate-return")
+ endif ()
+
# Append warning flags
# Don't use the '-Wtraditional' flag, we're way past having K&R C code
# set (H5_CFLAGS "${H5_CFLAGS} -Wtraditional")
@@ -83,75 +106,102 @@ if (NOT MSVC AND CMAKE_COMPILER_IS_GNUCC)
set (H5_CFLAGS1 "${H5_CFLAGS1} -Wfloat-equal -Wmissing-format-attribute")
# Append warning flags from gcc-3.2* case
- set (H5_CFLAGS1 "${H5_CFLAGS1} -Wmissing-noreturn -Wpacked -Wdisabled-optimization")
+ set (H5_CFLAGS1 "${H5_CFLAGS1} -Wpacked -Wdisabled-optimization")
+ if (HDF5_ENABLE_DEV_WARNINGS)
+ set (H5_CFLAGS1 "${H5_CFLAGS1} -Wmissing-noreturn")
+ else ()
+ set (H5_CFLAGS1 "${H5_CFLAGS1} -Wno-missing-noreturn")
+ endif ()
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
- set (H5_CFLAGS1 "${H5_CFLAGS1} -Wformat=2")
-
- # The "unreachable code" warning appears to be reliable now...
- # (this warning was removed in gcc 4.5+)
- if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.7)
- set (H5_CFLAGS1 "${H5_CFLAGS1} -Wunreachable-code")
- endif()
+ set (H5_CFLAGS1_5 "${H5_CFLAGS1_5} -Wformat=2")
# Append warning flags from gcc-3.3* case
set (H5_CFLAGS1 "${H5_CFLAGS1} -Wendif-labels")
# Append warning flags from gcc-3.4* case
- set (H5_CFLAGS2 "${H5_CFLAGS2} -Wdeclaration-after-statement -Wold-style-definition -Winvalid-pch")
+ set (H5_CFLAGS1 "${H5_CFLAGS1} -Wdeclaration-after-statement -Wold-style-definition -Winvalid-pch")
# Append more extra warning flags that only gcc4.0+ know about
set (H5_CFLAGS2 "${H5_CFLAGS2} -Wvariadic-macros -Winit-self -Wmissing-include-dirs -Wswitch-default -Wswitch-enum -Wunused-macros")
# Append more extra warning flags that only gcc 4.1+ know about
- set (H5_CFLAGS3 "${H5_CFLAGS3} -Wunsafe-loop-optimizations -Wc++-compat")
+ set (H5_CFLAGS2_5 "${H5_CFLAGS2_5} -Wunsafe-loop-optimizations")
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wc++-compat")
# Append more extra warning flags that only gcc 4.2+ know about
- set (H5_CFLAGS3 "${H5_CFLAGS3} -Wstrict-overflow")
+ set (H5_CFLAGS2_5 "${H5_CFLAGS2_5} -Wstrict-overflow")
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wno-strict-overflow")
# Append more extra warning flags that only gcc 4.3+ know about
#
# Technically, variable-length arrays are part of the C99 standard, but
# we should approach them a bit cautiously... -QAK
- set (H5_CFLAGS3 "${H5_CFLAGS3} -Wlogical-op -Wlarger-than=2048 -Wvla")
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wlogical-op -Wlarger-than=2048 -Wvla")
# Append more extra warning flags that only gcc 4.4+ know about
- set (H5_CFLAGS4 "${H5_CFLAGS4} -Wsync-nand -Wframe-larger-than=16384 -Wpacked-bitfield-compat")
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wsync-nand -Wframe-larger-than=16384 -Wpacked-bitfield-compat")
# Append more extra warning flags that only gcc 4.5+ know about
if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.5)
- set (H5_CFLAGS4 "${H5_CFLAGS4} -Wstrict-overflow=5 -Wjump-misses-init -Wunsuffixed-float-constants")
- endif()
+ set (H5_CFLAGS2_5 "${H5_CFLAGS2_5} -Wstrict-overflow=5 -Wjump-misses-init -Wunsuffixed-float-constants")
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wjump-misses-init -Wunsuffixed-float-constants")
+ endif ()
# Append more extra warning flags that only gcc 4.6+ know about
if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.6)
- set (H5_CFLAGS5 "${H5_CFLAGS5} -Wdouble-promotion -Wsuggest-attribute=const -Wtrampolines")
- endif()
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wdouble-promotion -Wtrampolines")
+ if (HDF5_ENABLE_DEV_WARNINGS)
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wsuggest-attribute=const")
+ else ()
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wno-suggest-attribute=const")
+ endif ()
+ endif ()
+
+ # The "unreachable code" warning appears to be reliable now...
+ # (this warning was removed in gcc 4.5+)
+ if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.7)
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wunreachable-code")
+ endif ()
# Append more extra warning flags that only gcc 4.7+ know about
if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.7)
- set (H5_CFLAGS5 "${H5_CFLAGS5} -Wstack-usage=8192 -Wvector-operation-performance -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn")
- endif()
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wstack-usage=8192 -Wvector-operation-performance")
+ if (HDF5_ENABLE_DEV_WARNINGS)
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn")
+ else ()
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wno-suggest-attribute=pure -Wno-suggest-attribute=noreturn")
+ endif ()
+ endif ()
# Append more extra warning flags that only gcc 4.8+ know about
if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.8)
- set (H5_CFLAGS5 "${H5_CFLAGS5} -Wsuggest-attribute=format")
- endif()
+ if (HDF5_ENABLE_DEV_WARNINGS)
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wsuggest-attribute=format")
+ else ()
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wno-suggest-attribute=format")
+ endif ()
+ endif ()
# Append more extra warning flags that only gcc 4.9+ know about
if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 4.9)
- set (H5_CFLAGS5 "${H5_CFLAGS5} -Wdate-time -Wopenmp-simd")
- endif()
+ set (H5_CFLAGS2 "${H5_CFLAGS2} -Wdate-time -Wopenmp-simd")
+ endif ()
# (There was no release of gcc 5.0)
# Append more extra warning flags that only gcc 5.1+ know about
if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 5.1)
- set (H5_CFLAGS6 "${H5_CFLAGS6} -Warray-bounds=2 -Wc99-c11-compat")
- endif()
+ set (H5_CFLAGS3 "${H5_CFLAGS3} -Warray-bounds=2 -Wc99-c11-compat")
+ endif ()
+
+ # Append more extra warning flags that only gcc 6.x+ know about
+ if (NOT CMAKE_C_COMPILER_VERSION VERSION_LESS 6.0)
+ set (H5_CFLAGS4 "${H5_CFLAGS4} -Wnull-dereference -Wunused-const-variable -Wduplicated-cond -Whsa")
+ endif ()
-endif (NOT MSVC AND CMAKE_COMPILER_IS_GNUCC)
+endif ()
#-----------------------------------------------------------------------------
# Option to allow the user to enable all warnings
@@ -159,16 +209,23 @@ endif (NOT MSVC AND CMAKE_COMPILER_IS_GNUCC)
option (HDF5_ENABLE_ALL_WARNINGS "Enable all warnings" OFF)
if (HDF5_ENABLE_ALL_WARNINGS)
if (MSVC)
- string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
- set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Wall")
- string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
- set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Wall")
- else (MSVC)
+ if (HDF5_ENABLE_DEV_WARNINGS)
+ string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /Wall /wd4668")
+ string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Wall /wd4668")
+ else ()
+ string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_C_FLAGS "${CMAKE_C_FLAGS}")
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W3")
+ string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
+ set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W3")
+ endif ()
+ else ()
if (CMAKE_COMPILER_IS_GNUCC)
- set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -pedantic ${H5_CFLAGS1} ${H5_CFLAGS2} ${H5_CFLAGS3} ${H5_CFLAGS4}")
- endif (CMAKE_COMPILER_IS_GNUCC)
- endif (MSVC)
-endif (HDF5_ENABLE_ALL_WARNINGS)
+ set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -pedantic ${H5_CFLAGS1} ${H5_CFLAGS2}")
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to allow the user to enable warnings by groups
@@ -180,12 +237,12 @@ if (HDF5_ENABLE_GROUPZERO_WARNINGS)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W1")
string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W1")
- else (MSVC)
+ else ()
if (CMAKE_COMPILER_IS_GNUCC)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -Wall -Wextra -pedantic")
- endif (CMAKE_COMPILER_IS_GNUCC)
- endif (MSVC)
-endif (HDF5_ENABLE_GROUPZERO_WARNINGS)
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to allow the user to enable warnings by groups
@@ -197,10 +254,10 @@ if (HDF5_ENABLE_GROUPONE_WARNINGS)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W2")
string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W2")
- else (MSVC)
+ else ()
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${H5_CFLAGS1}")
- endif (MSVC)
-endif (HDF5_ENABLE_GROUPONE_WARNINGS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to allow the user to enable warnings by groups
@@ -212,10 +269,10 @@ if (HDF5_ENABLE_GROUPTWO_WARNINGS)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W3")
string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W3")
- else (MSVC)
+ else ()
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${H5_CFLAGS2}")
- endif (MSVC)
-endif (HDF5_ENABLE_GROUPTWO_WARNINGS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to allow the user to enable warnings by groups
@@ -227,10 +284,10 @@ if (HDF5_ENABLE_GROUPTHREE_WARNINGS)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} /W4")
string (REGEX REPLACE "(^| )([/-])W[0-9]( |$)" " " CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS}")
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /W4")
- else (MSVC)
+ else ()
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${H5_CFLAGS3}")
- endif (MSVC)
-endif (HDF5_ENABLE_GROUPTHREE_WARNINGS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Option to allow the user to enable warnings by groups
@@ -239,28 +296,8 @@ option (HDF5_ENABLE_GROUPFOUR_WARNINGS "Enable group four warnings" OFF)
if (HDF5_ENABLE_GROUPFOUR_WARNINGS)
if (NOT MSVC)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${H5_CFLAGS4}")
- endif (NOT MSVC)
-endif (HDF5_ENABLE_GROUPFOUR_WARNINGS)
-
-#-----------------------------------------------------------------------------
-# Option to allow the user to enable warnings by groups
-#-----------------------------------------------------------------------------
-option (HDF5_ENABLE_GROUPFIVE_WARNINGS "Enable group five warnings" OFF)
-if (HDF5_ENABLE_GROUPFIVE_WARNINGS)
- if (NOT MSVC)
- set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${H5_CFLAGS5}")
- endif (NOT MSVC)
-endif (HDF5_ENABLE_GROUPFIVE_WARNINGS)
-
-#-----------------------------------------------------------------------------
-# Option to allow the user to enable warnings by groups
-#-----------------------------------------------------------------------------
-option (HDF5_ENABLE_GROUPSIX_WARNINGS "Enable group six warnings" OFF)
-if (HDF5_ENABLE_GROUPSIX_WARNINGS)
- if (NOT MSVC)
- set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} ${H5_CFLAGS6}")
- endif (NOT MSVC)
-endif (HDF5_ENABLE_GROUPSIX_WARNINGS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# This is in here to help some of the GCC based IDES like Eclipse
@@ -268,7 +305,7 @@ endif (HDF5_ENABLE_GROUPSIX_WARNINGS)
#-----------------------------------------------------------------------------
if (CMAKE_COMPILER_IS_GNUCC)
set (CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fmessage-length=0")
-endif (CMAKE_COMPILER_IS_GNUCC)
-if (CMAKE_COMPILER_IS_GNUCXX)
+endif ()
+if (CMAKE_COMPILER_IS_GNUCXX AND CMAKE_CXX_COMPILER_LOADED)
set (CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fmessage-length=0")
-endif (CMAKE_COMPILER_IS_GNUCXX)
+endif ()
diff --git a/config/cmake/README.txt.cmake.in b/config/cmake/README.txt.cmake.in
index 7ab3c35..ceeda49 100644
--- a/config/cmake/README.txt.cmake.in
+++ b/config/cmake/README.txt.cmake.in
@@ -35,6 +35,8 @@ To test the installation with the examples;
Create a directory to run the examples.
Copy HDF5Examples folder to this directory.
Copy HDF5_Examples.cmake to this directory.
+ Copy HDF5_Examples_options.cmake to this directory.
+ Copy CTestScript.cmake to this directory.
The default source folder is defined as "HDF5Examples". It can be changed
with the CTEST_SOURCE_NAME script option.
The default installation folder is defined as "@CMAKE_INSTALL_PREFIX@".
@@ -43,8 +45,8 @@ To test the installation with the examples;
with the CTEST_CONFIGURATION_TYPE script option. Note that this must
be the same as the value used with the -C command line option.
The default build configuration is defined to build and use static libraries.
- Shared libraries can be used with the STATICLIBRARIES script option set to "NO".
- Other options can be changed by editing the HDF5_Examples.cmake file.
+ Shared libraries can be used with the STATICONLYLIBRARIES script option set to "NO".
+ Other options can be changed by editing the HDF5_Examples_options.cmake file.
If the defaults are okay, execute from this directory:
ctest -S HDF5_Examples.cmake -C Release -V -O test.log
diff --git a/config/cmake/UseJava.cmake b/config/cmake/UseJava.cmake
index 6391c63..3e74d4f 100644
--- a/config/cmake/UseJava.cmake
+++ b/config/cmake/UseJava.cmake
@@ -1250,7 +1250,7 @@ function(create_javadoc _target)
else ()
set(_overview ${_overview}:${_path})
endif ()
- endforeach()
+ endforeach ()
set(_javadoc_options ${_javadoc_options} -overview ${_overview})
endif ()
diff --git a/config/cmake/UserMacros/Windows_MT.cmake b/config/cmake/UserMacros/Windows_MT.cmake
index a54f22c..b6cc513 100644
--- a/config/cmake/UserMacros/Windows_MT.cmake
+++ b/config/cmake/UserMacros/Windows_MT.cmake
@@ -1,16 +1,27 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
########################################################
# Include file for user options
########################################################
# To use this option, copy both the macro and option code
-# into the root UserMacros.cmake file.
+# into the root UserMacros.cmake file.
# OR add an include to the root UserMacros.cmake file:
# INCLUDE(path_to_file/WINDOWS_MT.cmake)
#-----------------------------------------------------------------------------
# Option to Build with Static CRT libraries on Windows
#-------------------------------------------------------------------------------
-MACRO (TARGET_STATIC_CRT_FLAGS)
+macro (TARGET_STATIC_CRT_FLAGS)
if (MSVC AND NOT BUILD_SHARED_LIBS)
foreach (flag_var
CMAKE_C_FLAGS CMAKE_C_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE
@@ -19,23 +30,22 @@ MACRO (TARGET_STATIC_CRT_FLAGS)
CMAKE_CXX_FLAGS_MINSIZEREL CMAKE_CXX_FLAGS_RELWITHDEBINFO)
if (${flag_var} MATCHES "/MD")
string (REGEX REPLACE "/MD" "/MT" ${flag_var} "${${flag_var}}")
- endif (${flag_var} MATCHES "/MD")
- endforeach (flag_var)
+ endif ()
+ endforeach ()
foreach (flag_var
CMAKE_Fortran_FLAGS CMAKE_Fortran_FLAGS_DEBUG CMAKE_Fortran_FLAGS_RELEASE
CMAKE_Fortran_FLAGS_MINSIZEREL CMAKE_Fortran_FLAGS_RELWITHDEBINFO)
if (${flag_var} MATCHES "/libs:dll")
string (REGEX REPLACE "/libs:dll" "/libs:static" ${flag_var} "${${flag_var}}")
- endif (${flag_var} MATCHES "/libs:dll")
- endforeach (flag_var)
+ endif ()
+ endforeach ()
set (WIN_COMPILE_FLAGS "")
set (WIN_LINK_FLAGS "/NODEFAULTLIB:MSVCRT")
- endif (MSVC AND NOT BUILD_SHARED_LIBS)
-ENDMACRO (TARGET_STATIC_CRT_FLAGS)
+ endif ()
+endmacro ()
#-----------------------------------------------------------------------------
option (BUILD_STATIC_CRT_LIBS "Build With Static CRT Libraries" OFF)
if (BUILD_STATIC_CRT_LIBS)
TARGET_STATIC_CRT_FLAGS ()
-endif (BUILD_STATIC_CRT_LIBS)
- \ No newline at end of file
+endif ()
diff --git a/config/cmake/cacheinit.cmake b/config/cmake/cacheinit.cmake
index 24ae95c..6a66c8a 100644
--- a/config/cmake/cacheinit.cmake
+++ b/config/cmake/cacheinit.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# This is the CMakeCache file.
########################
@@ -6,8 +17,6 @@
set (CMAKE_INSTALL_FRAMEWORK_PREFIX "Library/Frameworks" CACHE STRING "Frameworks installation directory" FORCE)
-set (HDF5_GENERATE_HEADERS ON CACHE BOOL "Rebuild Generated Files" FORCE)
-
set (HDF_PACKAGE_EXT "" CACHE STRING "Name of HDF package extension" FORCE)
set (HDF5_BUILD_FORTRAN ON CACHE BOOL "Build FORTRAN support" FORCE)
@@ -42,6 +51,8 @@ set (HDF5_MEMORY_ALLOC_SANITY_CHECK OFF CACHE BOOL "Indicate that internal memor
set (HDF5_DISABLE_COMPILER_WARNINGS OFF CACHE BOOL "Disable compiler warnings" FORCE)
+set (HDF5_ENABLE_ALL_WARNINGS ON CACHE BOOL "Enable all warnings" FORCE)
+
set (HDF5_USE_FOLDERS ON CACHE BOOL "Enable folder grouping of projects in IDEs." FORCE)
set (HDF5_USE_16_API_DEFAULT OFF CACHE BOOL "Use the HDF5 1.6.x API by default" FORCE)
diff --git a/config/cmake/hdf5-config-version.cmake.in b/config/cmake/hdf5-config-version.cmake.in
index 5911fa7..8e16725 100644
--- a/config/cmake/hdf5-config-version.cmake.in
+++ b/config/cmake/hdf5-config-version.cmake.in
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#-----------------------------------------------------------------------------
# HDF5 Version file for install directory
#-----------------------------------------------------------------------------
@@ -12,36 +23,36 @@ set (PACKAGE_VERSION "@HDF5_VERSION_STRING@")
if("${PACKAGE_VERSION}" VERSION_LESS "${PACKAGE_FIND_VERSION}" )
set(PACKAGE_VERSION_COMPATIBLE FALSE)
-else()
+else ()
if ("${PACKAGE_FIND_VERSION_MAJOR}" STREQUAL "@H5_VERS_MAJOR@")
-
+
# exact match for version @H5_VERS_MAJOR@.@H5_VERS_MINOR@
if ("${PACKAGE_FIND_VERSION_MINOR}" STREQUAL "@H5_VERS_MINOR@")
-
+
# compatible with any version @H5_VERS_MAJOR@.@H5_VERS_MINOR@.x
- set (PACKAGE_VERSION_COMPATIBLE TRUE)
-
+ set (PACKAGE_VERSION_COMPATIBLE TRUE)
+
if ("${PACKAGE_FIND_VERSION_PATCH}" STREQUAL "@H5_VERS_RELEASE@")
set (PACKAGE_VERSION_EXACT TRUE)
-
+
if ("${PACKAGE_FIND_VERSION_TWEAK}" STREQUAL "@H5_VERS_SUBRELEASE@")
# not using this yet
- endif ("${PACKAGE_FIND_VERSION_TWEAK}" STREQUAL "@H5_VERS_SUBRELEASE@")
- endif ("${PACKAGE_FIND_VERSION_PATCH}" STREQUAL "@H5_VERS_RELEASE@")
- else ("${PACKAGE_FIND_VERSION_MINOR}" STREQUAL "@H5_VERS_MINOR@")
- set (PACKAGE_VERSION_COMPATIBLE FALSE)
- endif ("${PACKAGE_FIND_VERSION_MINOR}" STREQUAL "@H5_VERS_MINOR@")
- endif ("${PACKAGE_FIND_VERSION_MAJOR}" STREQUAL "@H5_VERS_MAJOR@")
-endif()
+ endif ()
+ endif ()
+ else ()
+ set (PACKAGE_VERSION_COMPATIBLE FALSE)
+ endif ()
+ endif ()
+endif ()
# if the installed or the using project don't have CMAKE_SIZEOF_VOID_P set, ignore it:
if("${CMAKE_SIZEOF_VOID_P}" STREQUAL "" OR "@CMAKE_SIZEOF_VOID_P@" STREQUAL "")
return()
-endif()
+endif ()
# check that the installed version has the same 32/64bit-ness as the one which is currently searching:
if(NOT "${CMAKE_SIZEOF_VOID_P}" STREQUAL "@CMAKE_SIZEOF_VOID_P@")
math(EXPR installedBits "@CMAKE_SIZEOF_VOID_P@ * 8")
set(PACKAGE_VERSION "${PACKAGE_VERSION} (${installedBits}bit)")
set(PACKAGE_VERSION_UNSUITABLE TRUE)
-endif()
+endif ()
diff --git a/config/cmake/hdf5-config.cmake.in b/config/cmake/hdf5-config.cmake.in
index 41fc726..c54391f 100644
--- a/config/cmake/hdf5-config.cmake.in
+++ b/config/cmake/hdf5-config.cmake.in
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#-----------------------------------------------------------------------------
# HDF5 Config file for compiling against hdf5 build/install directory
#-----------------------------------------------------------------------------
@@ -50,7 +61,7 @@ if (${HDF5_PACKAGE_NAME}_BUILD_JAVA)
)
set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARY "@PACKAGE_CURRENT_BUILD_DIR@/lib")
set (${HDF5_PACKAGE_NAME}_JAVA_LIBRARIES "${${HDF5_PACKAGE_NAME}_JAVA_LIBRARY}")
-endif()
+endif ()
#-----------------------------------------------------------------------------
# Directories
diff --git a/config/cmake/jrunTest.cmake b/config/cmake/jrunTest.cmake
index 59f4c7b..7af0590 100644
--- a/config/cmake/jrunTest.cmake
+++ b/config/cmake/jrunTest.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# runTest.cmake executes a command and captures the output in a file. File is then compared
# against a reference file. Exit status of command can also be compared.
cmake_policy(SET CMP0007 NEW)
@@ -5,44 +16,53 @@ cmake_policy(SET CMP0007 NEW)
# arguments checking
if (NOT TEST_TESTER)
message (FATAL_ERROR "Require TEST_TESTER to be defined")
-endif (NOT TEST_TESTER)
+endif ()
if (NOT TEST_PROGRAM)
message (FATAL_ERROR "Require TEST_PROGRAM to be defined")
-endif (NOT TEST_PROGRAM)
+endif ()
if (NOT TEST_LIBRARY_DIRECTORY)
message (STATUS "Require TEST_LIBRARY_DIRECTORY to be defined")
-endif (NOT TEST_LIBRARY_DIRECTORY)
+endif ()
if (NOT TEST_FOLDER)
message ( FATAL_ERROR "Require TEST_FOLDER to be defined")
-endif (NOT TEST_FOLDER)
+endif ()
if (NOT TEST_OUTPUT)
message (FATAL_ERROR "Require TEST_OUTPUT to be defined")
-endif (NOT TEST_OUTPUT)
+endif ()
if (NOT TEST_CLASSPATH)
message (STATUS "Require TEST_CLASSPATH to be defined")
-endif (NOT TEST_CLASSPATH)
+endif ()
if (NOT TEST_REFERENCE)
message (FATAL_ERROR "Require TEST_REFERENCE to be defined")
-endif (NOT TEST_REFERENCE)
+endif ()
+if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+ file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
+endif ()
+
+if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+ file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+endif ()
+
+# if there is not an error reference file add the error output to the stdout file
if (NOT TEST_ERRREF)
if (NOT SKIP_APPEND)
# append error file since skip was not defined
set (ERROR_APPEND 1)
- endif(NOT SKIP_APPEND)
-endif (NOT TEST_ERRREF)
+ endif ()
+endif ()
if (NOT TEST_LOG_LEVEL)
set (LOG_LEVEL "info")
-else (NOT TEST_LOG_LEVEL)
+else ()
set (LOG_LEVEL "${TEST_LOG_LEVEL}")
-endif (NOT TEST_LOG_LEVEL)
+endif ()
message (STATUS "COMMAND: ${TEST_TESTER} -Xmx1024M -Dorg.slf4j.simpleLogger.defaultLog=${LOG_LEVEL} -Djava.library.path=\"${TEST_LIBRARY_DIRECTORY}\" -cp \"${TEST_CLASSPATH}\" ${TEST_ARGS} ${TEST_PROGRAM} ${ARGN}")
if (WIN32 AND NOT MINGW)
set (ENV{PATH} "$ENV{PATH}\\;${TEST_LIBRARY_DIRECTORY}")
-endif (WIN32 AND NOT MINGW)
+endif ()
# run the test program, capture the stdout/stderr and the result var
execute_process (
@@ -60,29 +80,31 @@ execute_process (
message (STATUS "COMMAND Result: ${TEST_RESULT}")
+# if the .err file exists and ERRROR_APPEND is enabled
if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
if (TEST_MASK_FILE)
STRING(REGEX REPLACE "CurrentDir is [^\n]+\n" "CurrentDir is (dir name)\n" TEST_STREAM "${TEST_STREAM}")
- endif (TEST_MASK_FILE)
+ endif ()
if (NOT ERROR_APPEND)
# append error output to the stdout output file
file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT}.err "${TEST_STREAM}")
- else (NOT ERROR_APPEND)
+ else ()
# write back to original .err file
file (APPEND ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
- endif (NOT ERROR_APPEND)
-endif (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+ endif ()
+endif ()
+# if the output file or the .err file needs to mask out error stack info
if (TEST_MASK_ERROR)
if (NOT TEST_ERRREF)
# the error stack has been appended to the output file
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
- else (NOT TEST_ERRREF)
+ else ()
# the error stack remains in the .err file
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
- endif (NOT TEST_ERRREF)
+ endif ()
string (REGEX REPLACE "Time:[^\n]+\n" "Time: XXXX\n" TEST_STREAM "${TEST_STREAM}")
string (REGEX REPLACE "thread [0-9]*:" "thread (IDs):" TEST_STREAM "${TEST_STREAM}")
string (REGEX REPLACE ": ([^\n]*)[.]c " ": (file name) " TEST_STREAM "${TEST_STREAM}")
@@ -94,16 +116,16 @@ if (TEST_MASK_ERROR)
# write back the changes to the original files
if (NOT TEST_ERRREF)
file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
- else (NOT TEST_ERRREF)
+ else ()
file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT}.err "${TEST_STREAM}")
- endif (NOT TEST_ERRREF)
-endif (TEST_MASK_ERROR)
+ endif ()
+endif ()
# if the return value is !=0 bail out
if (NOT ${TEST_RESULT} STREQUAL ${TEST_EXPECT})
message (STATUS "ERROR OUTPUT: ${TEST_STREAM}")
message (FATAL_ERROR "Failed: Test program ${TEST_PROGRAM} exited != 0.\n${TEST_ERROR}")
-endif (NOT ${TEST_RESULT} STREQUAL ${TEST_EXPECT})
+endif ()
message (STATUS "COMMAND Error: ${TEST_ERROR}")
@@ -112,7 +134,7 @@ if (NOT TEST_SKIP_COMPARE)
if (WIN32 AND NOT MINGW)
file (READ ${TEST_FOLDER}/${TEST_REFERENCE} TEST_STREAM)
file (WRITE ${TEST_FOLDER}/${TEST_REFERENCE} "${TEST_STREAM}")
- endif (WIN32 AND NOT MINGW)
+ endif ()
# now compare the output with the reference
execute_process (
@@ -120,42 +142,49 @@ if (NOT TEST_SKIP_COMPARE)
RESULT_VARIABLE TEST_RESULT
)
if (NOT ${TEST_RESULT} STREQUAL 0)
- set (TEST_RESULT 0)
- file (STRINGS ${TEST_FOLDER}/${TEST_OUTPUT} test_act)
- list (LENGTH test_act len_act)
- file (STRINGS ${TEST_FOLDER}/${TEST_REFERENCE} test_ref)
- list (LENGTH test_ref len_ref)
- if (NOT ${len_act} STREQUAL "0")
- MATH (EXPR _FP_LEN "${len_ref} - 1")
- foreach (line RANGE 0 ${_FP_LEN})
- list (GET test_act ${line} str_act)
- list (GET test_ref ${line} str_ref)
- if (NOT "${str_act}" STREQUAL "${str_ref}")
- if (NOT "${str_act}" STREQUAL "")
- set (TEST_RESULT 1)
- message ("line = ${line}\n***ACTUAL: ${str_act}\n****REFER: ${str_ref}\n")
- endif (NOT "${str_act}" STREQUAL "")
- endif (NOT "${str_act}" STREQUAL "${str_ref}")
- endforeach (line RANGE 0 ${_FP_LEN})
- endif (NOT ${len_act} STREQUAL "0")
- if (NOT ${len_act} STREQUAL ${len_ref})
- set (TEST_RESULT 1)
- endif (NOT ${len_act} STREQUAL ${len_ref})
- endif (NOT ${TEST_RESULT} STREQUAL 0)
+ set (TEST_RESULT 0)
+ file (STRINGS ${TEST_FOLDER}/${TEST_OUTPUT} test_act)
+ list (LENGTH test_act len_act)
+ file (STRINGS ${TEST_FOLDER}/${TEST_REFERENCE} test_ref)
+ list (LENGTH test_ref len_ref)
+ if (NOT ${len_act} STREQUAL "0" AND NOT ${len_ref} STREQUAL "0")
+ math (EXPR _FP_LEN "${len_ref} - 1")
+ foreach (line RANGE 0 ${_FP_LEN})
+ list (GET test_act ${line} str_act)
+ list (GET test_ref ${line} str_ref)
+ if (NOT "${str_act}" STREQUAL "${str_ref}")
+ if (NOT "${str_act}" STREQUAL "")
+ set (TEST_RESULT 1)
+ message ("line = ${line}\n***ACTUAL: ${str_act}\n****REFER: ${str_ref}\n")
+ endif ()
+ endif ()
+ endforeach ()
+ else ()
+ if (${len_act} STREQUAL "0")
+ message (STATUS "COMPARE Failed: ${TEST_FOLDER}/${TEST_OUTPUT} is empty")
+ endif ()
+ if (${len_ref} STREQUAL "0")
+ message (STATUS "COMPARE Failed: ${TEST_FOLDER}/${TEST_REFERENCE} is empty")
+ endif ()
+ endif ()
+ if (NOT ${len_act} STREQUAL ${len_ref})
+ set (TEST_RESULT 1)
+ endif ()
+ endif ()
message (STATUS "COMPARE Result: ${TEST_RESULT}")
# again, if return value is !=0 scream and shout
if (NOT ${TEST_RESULT} STREQUAL 0)
message (FATAL_ERROR "Failed: The output of ${TEST_OUTPUT} did not match ${TEST_REFERENCE}")
- endif (NOT ${TEST_RESULT} STREQUAL 0)
+ endif ()
# now compare the .err file with the error reference, if supplied
if (TEST_ERRREF)
if (WIN32 AND NOT MINGW)
file (READ ${TEST_FOLDER}/${TEST_ERRREF} TEST_STREAM)
file (WRITE ${TEST_FOLDER}/${TEST_ERRREF} "${TEST_STREAM}")
- endif (WIN32 AND NOT MINGW)
+ endif ()
# now compare the error output with the error reference
execute_process (
@@ -163,38 +192,45 @@ if (NOT TEST_SKIP_COMPARE)
RESULT_VARIABLE TEST_RESULT
)
if (NOT ${TEST_RESULT} STREQUAL 0)
- set (TEST_RESULT 0)
- file (STRINGS ${TEST_FOLDER}/${TEST_OUTPUT}.err test_act)
- list (LENGTH test_act len_act)
- file (STRINGS ${TEST_FOLDER}/${TEST_ERRREF} test_ref)
- list (LENGTH test_ref len_ref)
- MATH (EXPR _FP_LEN "${len_ref} - 1")
- if (NOT ${len_act} STREQUAL "0")
- MATH (EXPR _FP_LEN "${len_ref} - 1")
- foreach (line RANGE 0 ${_FP_LEN})
- list (GET test_act ${line} str_act)
- list (GET test_ref ${line} str_ref)
- if (NOT "${str_act}" STREQUAL "${str_ref}")
- if (NOT "${str_act}" STREQUAL "")
- set (TEST_RESULT 1)
- message ("line = ${line}\n***ACTUAL: ${str_act}\n****REFER: ${str_ref}\n")
- endif (NOT "${str_act}" STREQUAL "")
- endif (NOT "${str_act}" STREQUAL "${str_ref}")
- endforeach (line RANGE 0 ${_FP_LEN})
- endif (NOT ${len_act} STREQUAL "0")
- if (NOT ${len_act} STREQUAL ${len_ref})
- set (TEST_RESULT 1)
- endif (NOT ${len_act} STREQUAL ${len_ref})
- endif (NOT ${TEST_RESULT} STREQUAL 0)
+ set (TEST_RESULT 0)
+ file (STRINGS ${TEST_FOLDER}/${TEST_OUTPUT}.err test_act)
+ list (LENGTH test_act len_act)
+ file (STRINGS ${TEST_FOLDER}/${TEST_ERRREF} test_ref)
+ list (LENGTH test_ref len_ref)
+ math (EXPR _FP_LEN "${len_ref} - 1")
+ if (NOT ${len_act} STREQUAL "0" AND NOT ${len_ref} STREQUAL "0")
+ math (EXPR _FP_LEN "${len_ref} - 1")
+ foreach (line RANGE 0 ${_FP_LEN})
+ list (GET test_act ${line} str_act)
+ list (GET test_ref ${line} str_ref)
+ if (NOT "${str_act}" STREQUAL "${str_ref}")
+ if (NOT "${str_act}" STREQUAL "")
+ set (TEST_RESULT 1)
+ message ("line = ${line}\n***ACTUAL: ${str_act}\n****REFER: ${str_ref}\n")
+ endif ()
+ endif ()
+ endforeach ()
+ else ()
+ if (${len_act} STREQUAL "0")
+ message (STATUS "COMPARE Failed: ${TEST_FOLDER}/${TEST_OUTPUT}.err is empty")
+ endif ()
+ if (${len_ref} STREQUAL "0")
+ message (STATUS "COMPARE Failed: ${TEST_FOLDER}/${TEST_ERRREF} is empty")
+ endif ()
+ endif()
+ if (NOT ${len_act} STREQUAL ${len_ref})
+ set (TEST_RESULT 1)
+ endif ()
+ endif ()
message (STATUS "COMPARE Result: ${TEST_RESULT}")
# again, if return value is !=0 scream and shout
if (NOT ${TEST_RESULT} STREQUAL 0)
message (FATAL_ERROR "Failed: The error output of ${TEST_OUTPUT}.err did not match ${TEST_ERRREF}")
- endif (NOT ${TEST_RESULT} STREQUAL 0)
- endif (TEST_ERRREF)
-endif (NOT TEST_SKIP_COMPARE)
+ endif ()
+ endif ()
+endif ()
if (TEST_GREP_COMPARE)
# now grep the output with the reference
@@ -205,7 +241,7 @@ if (TEST_GREP_COMPARE)
string (COMPARE EQUAL "${TEST_REFERENCE}" "${TEST_MATCH}" TEST_RESULT)
if (${TEST_RESULT} STREQUAL "0")
message (FATAL_ERROR "Failed: The output of ${TEST_PROGRAM} did not contain ${TEST_REFERENCE}")
- endif (${TEST_RESULT} STREQUAL "0")
+ endif ()
string (REGEX MATCH "${TEST_FILTER}" TEST_MATCH ${TEST_STREAM})
if (${TEST_EXPECT} STREQUAL "1")
@@ -213,9 +249,9 @@ if (TEST_GREP_COMPARE)
string (LENGTH "${TEST_MATCH}" TEST_RESULT)
if (NOT ${TEST_RESULT} STREQUAL "0")
message (FATAL_ERROR "Failed: The output of ${TEST_PROGRAM} did contain ${TEST_FILTER}")
- endif (NOT ${TEST_RESULT} STREQUAL "0")
- endif (${TEST_EXPECT} STREQUAL "0")
-endif (TEST_GREP_COMPARE)
+ endif ()
+ endif ()
+endif ()
# everything went fine...
message ("${TEST_PROGRAM} Passed")
diff --git a/config/cmake/mccacheinit.cmake b/config/cmake/mccacheinit.cmake
index 7dee5b5..3f0dd4f 100644
--- a/config/cmake/mccacheinit.cmake
+++ b/config/cmake/mccacheinit.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# This is the CMakeCache file.
########################
@@ -6,8 +17,6 @@
set (CMAKE_INSTALL_FRAMEWORK_PREFIX "Library/Frameworks" CACHE STRING "Frameworks installation directory" FORCE)
-set (HDF5_GENERATE_HEADERS ON CACHE BOOL "Rebuild Generated Files" FORCE)
-
set (BUILD_SHARED_LIBS OFF CACHE BOOL "Build Shared Libraries" FORCE)
set (BUILD_TESTING ON CACHE BOOL "Build HDF5 Unit Testing" FORCE)
diff --git a/config/cmake/scripts/CTestScript.cmake b/config/cmake/scripts/CTestScript.cmake
index f2675c3..3cb9a5e 100755
--- a/config/cmake/scripts/CTestScript.cmake
+++ b/config/cmake/scripts/CTestScript.cmake
@@ -1,4 +1,15 @@
-cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+cmake_minimum_required (VERSION 3.2.2 FATAL_ERROR)
########################################################
# This dashboard is maintained by The HDF Group
# For any comments please contact cdashhelp@hdfgroup.org
@@ -7,225 +18,209 @@ cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
# -----------------------------------------------------------
# -- Get environment
# -----------------------------------------------------------
-if(NOT SITE_OS_NAME)
+if (NOT SITE_OS_NAME)
## machine name not provided - attempt to discover with uname
## -- set hostname
## --------------------------
- find_program(HOSTNAME_CMD NAMES hostname)
- exec_program(${HOSTNAME_CMD} ARGS OUTPUT_VARIABLE HOSTNAME)
- set(CTEST_SITE "${HOSTNAME}${CTEST_SITE_EXT}")
- find_program(UNAME NAMES uname)
- macro(getuname name flag)
- exec_program("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}")
- endmacro(getuname)
+ find_program (HOSTNAME_CMD NAMES hostname)
+ exec_program (${HOSTNAME_CMD} ARGS OUTPUT_VARIABLE HOSTNAME)
+ set (CTEST_SITE "${HOSTNAME}${CTEST_SITE_EXT}")
+ find_program (UNAME NAMES uname)
+ macro (getuname name flag)
+ exec_program ("${UNAME}" ARGS "${flag}" OUTPUT_VARIABLE "${name}")
+ endmacro ()
- getuname(osname -s)
- getuname(osrel -r)
- getuname(cpu -m)
- message(STATUS "Dashboard script uname output: ${osname}-${osrel}-${cpu}\n")
+ getuname (osname -s)
+ getuname (osrel -r)
+ getuname (cpu -m)
+ message (STATUS "Dashboard script uname output: ${osname}-${osrel}-${cpu}\n")
- set(CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}")
- if(USE_AUTOTOOLS)
- set(CTEST_BUILD_NAME "AT-${CTEST_BUILD_NAME}")
- endif()
- if(SITE_BUILDNAME_SUFFIX)
- set(CTEST_BUILD_NAME "${CTEST_BUILD_NAME}-${SITE_BUILDNAME_SUFFIX}")
- endif()
- set(BUILD_OPTIONS "${ADD_BUILD_OPTIONS}")
-else(NOT SITE_OS_NAME)
+ set (CTEST_BUILD_NAME "${osname}-${osrel}-${cpu}")
+ if (SITE_BUILDNAME_SUFFIX)
+ set (CTEST_BUILD_NAME "${SITE_BUILDNAME_SUFFIX}-${CTEST_BUILD_NAME}")
+ endif ()
+ set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS}")
+else ()
## machine name provided
## --------------------------
- if(CMAKE_HOST_UNIX)
- set(CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_OS_BITS}-${SITE_COMPILER_NAME}-${SITE_COMPILER_VERSION}")
- else()
- set(CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_COMPILER_NAME}")
- endif()
- if(SITE_BUILDNAME_SUFFIX)
- set(CTEST_BUILD_NAME "${CTEST_BUILD_NAME}-${SITE_BUILDNAME_SUFFIX}")
- endif()
- set(BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}")
-endif(NOT SITE_OS_NAME)
+ if (CMAKE_HOST_UNIX)
+ set (CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_OS_BITS}-${SITE_COMPILER_NAME}-${SITE_COMPILER_VERSION}")
+ else ()
+ set (CTEST_BUILD_NAME "${SITE_OS_NAME}-${SITE_OS_VERSION}-${SITE_COMPILER_NAME}")
+ endif ()
+ if (SITE_BUILDNAME_SUFFIX)
+ set (CTEST_BUILD_NAME "${CTEST_BUILD_NAME}-${SITE_BUILDNAME_SUFFIX}")
+ endif ()
+ set (BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSITE:STRING=${CTEST_SITE} -DBUILDNAME:STRING=${CTEST_BUILD_NAME}")
+endif ()
#-----------------------------------------------------------------------------
# MAC machines need special option
#-----------------------------------------------------------------------------
-if(APPLE)
+if (APPLE)
# Compiler choice
- execute_process(COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
- execute_process(COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
- set(ENV{CC} "${XCODE_CC}")
- set(ENV{CXX} "${XCODE_CXX}")
+ execute_process (COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
+ execute_process (COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set (ENV{CC} "${XCODE_CC}")
+ set (ENV{CXX} "${XCODE_CXX}")
- if(NOT NO_MAC_FORTRAN)
- # Shared fortran is not supported, build static
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
- else(NOT NO_MAC_FORTRAN)
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF")
- endif(NOT NO_MAC_FORTRAN)
+ if (NOT NO_MAC_FORTRAN)
+ # Shared fortran is not supported, build static
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
+ else ()
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF")
+ endif ()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=OFF")
-endif(APPLE)
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=OFF")
+endif ()
#-----------------------------------------------------------------------------
-set(NEED_REPOSITORY_CHECKOUT 0)
-set(CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
-if(CTEST_USE_TAR_SOURCE)
+set (NEED_REPOSITORY_CHECKOUT 0)
+set (CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
+if (CTEST_USE_TAR_SOURCE)
## Uncompress source if tar file provided
## --------------------------
- if(WIN32)
- message(STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} x ${CTEST_USE_TAR_SOURCE}.zip]")
- execute_process(COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip RESULT_VARIABLE rv)
- else()
- message(STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.tar]")
- execute_process(COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar RESULT_VARIABLE rv)
- endif()
+ if (WIN32)
+ message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} x ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip]")
+ execute_process (COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip RESULT_VARIABLE rv)
+ else ()
+ message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar]")
+ execute_process (COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar RESULT_VARIABLE rv)
+ endif ()
- if(NOT rv EQUAL 0)
- message(STATUS "extracting... [error-(${rv}) clean up]")
- file(REMOVE_RECURSE "${CTEST_SOURCE_DIRECTORY}")
- message(FATAL_ERROR "error: extract of ${CTEST_USE_TAR_SOURCE} failed")
- endif()
+ if (NOT rv EQUAL 0)
+ message (STATUS "extracting... [error-(${rv}) clean up]")
+ file (REMOVE_RECURSE "${CTEST_SOURCE_DIRECTORY}")
+ message (FATAL_ERROR "error: extract of ${CTEST_USE_TAR_SOURCE} failed")
+ endif ()
- file(RENAME ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE} ${CTEST_SOURCE_DIRECTORY})
- set(LOCAL_SKIP_UPDATE "TRUE")
-else(CTEST_USE_TAR_SOURCE)
- if(LOCAL_UPDATE)
- if(CTEST_USE_GIT_SOURCE)
- find_program(CTEST_GIT_COMMAND NAMES git git.cmd)
- set(CTEST_GIT_UPDATE_OPTIONS)
+ file (RENAME ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE} ${CTEST_SOURCE_DIRECTORY})
+ set (LOCAL_SKIP_UPDATE "TRUE")
+else ()
+ if (LOCAL_UPDATE)
+ if (CTEST_USE_GIT_SOURCE)
+ find_program (CTEST_GIT_COMMAND NAMES git git.cmd)
+ set (CTEST_GIT_UPDATE_OPTIONS)
- if(NOT EXISTS "${CTEST_SOURCE_DIRECTORY}")
- set(NEED_REPOSITORY_CHECKOUT 1)
- endif()
+ if (NOT EXISTS "${CTEST_SOURCE_DIRECTORY}")
+ set (NEED_REPOSITORY_CHECKOUT 1)
+ endif ()
- if(${NEED_REPOSITORY_CHECKOUT})
- if(REPOSITORY_BRANCH)
- set(CTEST_GIT_options "clone \"${REPOSITORY_URL}\" --branch \"${REPOSITORY_BRANCH}\" --single-branch \"${CTEST_SOURCE_DIRECTORY}\" --recurse-submodules")
- else()
- set(CTEST_GIT_options "clone \"${REPOSITORY_URL}\" \"${CTEST_SOURCE_DIRECTORY}\" --recurse-submodules")
- endif()
- set(CTEST_CHECKOUT_COMMAND "${CTEST_GIT_COMMAND} ${CTEST_GIT_options}")
- else()
- set(CTEST_GIT_options "pull")
- endif()
- set(CTEST_UPDATE_COMMAND "${CTEST_GIT_COMMAND}")
- else(CTEST_USE_GIT_SOURCE)
+ if (${NEED_REPOSITORY_CHECKOUT})
+ if (REPOSITORY_BRANCH)
+ set (CTEST_GIT_options "clone \"${REPOSITORY_URL}\" --branch \"${REPOSITORY_BRANCH}\" --single-branch \"${CTEST_SOURCE_DIRECTORY}\" --recurse-submodules")
+ else ()
+ set (CTEST_GIT_options "clone \"${REPOSITORY_URL}\" \"${CTEST_SOURCE_DIRECTORY}\" --recurse-submodules")
+ endif ()
+ set (CTEST_CHECKOUT_COMMAND "${CTEST_GIT_COMMAND} ${CTEST_GIT_options}")
+ else ()
+ set (CTEST_GIT_options "pull")
+ endif ()
+ set (CTEST_UPDATE_COMMAND "${CTEST_GIT_COMMAND}")
+ else ()
## --------------------------
## use subversion to get source
#-----------------------------------------------------------------------------
## cygwin does not handle the find_package() call
## --------------------------
- set(CTEST_UPDATE_COMMAND "SVNCommand")
- if(NOT SITE_CYGWIN})
+ set (CTEST_UPDATE_COMMAND "SVNCommand")
+ if (NOT SITE_CYGWIN})
find_package (Subversion)
- set(CTEST_SVN_COMMAND "${Subversion_SVN_EXECUTABLE}")
- set(CTEST_UPDATE_COMMAND "${Subversion_SVN_EXECUTABLE}")
- else()
- set(CTEST_SVN_COMMAND "/usr/bin/svn")
- set(CTEST_UPDATE_COMMAND "/usr/bin/svn")
- endif()
+ set (CTEST_SVN_COMMAND "${Subversion_SVN_EXECUTABLE}")
+ set (CTEST_UPDATE_COMMAND "${Subversion_SVN_EXECUTABLE}")
+ else ()
+ set (CTEST_SVN_COMMAND "/usr/bin/svn")
+ set (CTEST_UPDATE_COMMAND "/usr/bin/svn")
+ endif ()
- if(NOT EXISTS "${CTEST_SOURCE_DIRECTORY}")
- set(NEED_REPOSITORY_CHECKOUT 1)
- endif()
+ if (NOT EXISTS "${CTEST_SOURCE_DIRECTORY}")
+ set (NEED_REPOSITORY_CHECKOUT 1)
+ endif ()
- if(NOT CTEST_REPO_VERSION)
- set(CTEST_REPO_VERSION "HEAD")
- endif()
- if(${NEED_REPOSITORY_CHECKOUT})
- set(CTEST_CHECKOUT_COMMAND
+ if (NOT CTEST_REPO_VERSION)
+ set (CTEST_REPO_VERSION "HEAD")
+ endif ()
+ if (${NEED_REPOSITORY_CHECKOUT})
+ set (CTEST_CHECKOUT_COMMAND
"\"${CTEST_SVN_COMMAND}\" co ${REPOSITORY_URL} \"${CTEST_SOURCE_DIRECTORY}\" -r ${CTEST_REPO_VERSION}")
- else()
- if(CTEST_REPO_VERSION)
- set(CTEST_SVN_UPDATE_OPTIONS "-r ${CTEST_REPO_VERSION}")
- endif()
- endif()
- endif(CTEST_USE_GIT_SOURCE)
- endif(LOCAL_UPDATE)
-endif(CTEST_USE_TAR_SOURCE)
+ else ()
+ if (CTEST_REPO_VERSION)
+ set (CTEST_SVN_UPDATE_OPTIONS "-r ${CTEST_REPO_VERSION}")
+ endif ()
+ endif ()
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
## Clear the build directory
## --------------------------
-set(CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE)
-if(NOT EXISTS "${CTEST_BINARY_DIRECTORY}")
- file(MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
-else()
- ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY})
-endif()
+set (CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE)
+if (NOT EXISTS "${CTEST_BINARY_DIRECTORY}")
+ file (MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
+else ()
+ ctest_empty_binary_directory (${CTEST_BINARY_DIRECTORY})
+endif ()
# Use multiple CPU cores to build
-include(ProcessorCount)
-ProcessorCount(N)
-if(NOT N EQUAL 0)
- if(NOT WIN32)
- set(CTEST_BUILD_FLAGS -j${N})
- endif()
- set(ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
-endif()
+include (ProcessorCount)
+ProcessorCount (N)
+if (NOT N EQUAL 0)
+ if (NOT WIN32)
+ set (CTEST_BUILD_FLAGS -j${N})
+ endif ()
+ set (ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
+endif ()
#-----------------------------------------------------------------------------
# Send the main script as a note.
-if(USE_AUTOTOOLS)
- ## autotools builds need to use make and does not use the cacheinit.cmake file
- ## -- make command
- ## -----------------
- find_program(MAKE NAMES make)
- list(APPEND CTEST_NOTES_FILES
- "${CTEST_SCRIPT_DIRECTORY}/${CTEST_SCRIPT_NAME}"
- "${CMAKE_CURRENT_LIST_FILE}"
- )
-else()
- list(APPEND CTEST_NOTES_FILES
- "${CTEST_SCRIPT_DIRECTORY}/${CTEST_SCRIPT_NAME}"
- "${CMAKE_CURRENT_LIST_FILE}"
- "${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake"
- )
-endif()
+list (APPEND CTEST_NOTES_FILES
+ "${CTEST_SCRIPT_DIRECTORY}/${CTEST_SCRIPT_NAME}"
+ "${CMAKE_CURRENT_LIST_FILE}"
+ "${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake"
+)
#-----------------------------------------------------------------------------
# Check for required variables.
# --------------------------
-foreach(req
+foreach (req
CTEST_CMAKE_GENERATOR
CTEST_SITE
CTEST_BUILD_NAME
)
- if(NOT DEFINED ${req})
- message(FATAL_ERROR "The containing script must set ${req}")
- endif()
-endforeach(req)
+ if (NOT DEFINED ${req})
+ message (FATAL_ERROR "The containing script must set ${req}")
+ endif ()
+endforeach ()
#-----------------------------------------------------------------------------
# Initialize the CTEST commands
#------------------------------
-if(USE_AUTOTOOLS)
- set(CTEST_CONFIGURE_COMMAND "${CTEST_SOURCE_DIRECTORY}/configure ${ADD_BUILD_OPTIONS}")
- set(CTEST_BUILD_COMMAND "${MAKE} ${CTEST_BUILD_FLAGS}")
- configure_file(${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
- file(WRITE ${CTEST_BINARY_DIRECTORY}/CTestTestfile.cmake "ADD_TEST(makecheck \"${MAKE}\" \"${CTEST_BUILD_FLAGS}\" \"-i\" \"check\")")
-else(USE_AUTOTOOLS)
- if(LOCAL_MEMCHECK_TEST)
- find_program(CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
- set (CTEST_CONFIGURE_COMMAND
- "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/mccacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_SOURCE_DIRECTORY}\""
- )
- else()
- if(LOCAL_COVERAGE_TEST)
- find_program(CTEST_COVERAGE_COMMAND NAMES gcov)
- endif()
- set (CTEST_CONFIGURE_COMMAND
- "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_SOURCE_DIRECTORY}\""
- )
- endif()
+if(CMAKE_GENERATOR_TOOLSET)
+ set(CTEST_CONFIGURE_TOOLSET "-T${CMAKE_GENERATOR_TOOLSET}")
+else ()
+ set(CTEST_CONFIGURE_TOOLSET "")
endif()
-
+if (LOCAL_MEMCHECK_TEST)
+ find_program (CTEST_MEMORYCHECK_COMMAND NAMES valgrind)
+ set (CTEST_CONFIGURE_COMMAND
+ "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/mccacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_CONFIGURE_TOOLSET}\" \"${CTEST_SOURCE_DIRECTORY}\""
+ )
+else ()
+ if (LOCAL_COVERAGE_TEST)
+ find_program (CTEST_COVERAGE_COMMAND NAMES gcov)
+ endif ()
+ set (CTEST_CONFIGURE_COMMAND
+ "${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_CONFIGURE_TOOLSET}\" \"${CTEST_SOURCE_DIRECTORY}\""
+ )
+endif ()
+
#-----------------------------------------------------------------------------
## -- set output to english
-set($ENV{LC_MESSAGES} "en_EN")
+set ($ENV{LC_MESSAGES} "en_EN")
# Print summary information.
-foreach(v
+foreach (v
CTEST_SITE
CTEST_BUILD_NAME
CTEST_SOURCE_DIRECTORY
@@ -238,9 +233,9 @@ foreach(v
CTEST_SCRIPT_DIRECTORY
CTEST_USE_LAUNCHERS
)
- set(vars "${vars} ${v}=[${${v}}]\n")
-endforeach(v)
-message(STATUS "Dashboard script configuration:\n${vars}\n")
+ set (vars "${vars} ${v}=[${${v}}]\n")
+endforeach ()
+message (STATUS "Dashboard script configuration:\n${vars}\n")
#-----------------------------------------------------------------------------
#-----------------------------------------------------------------------------
@@ -252,63 +247,63 @@ message(STATUS "Dashboard script configuration:\n${vars}\n")
## -- LOCAL_COVERAGE_TEST executes code coverage process
## --------------------------
ctest_start (${MODEL} TRACK ${MODEL})
- if(LOCAL_UPDATE)
+ if (LOCAL_UPDATE)
ctest_update (SOURCE "${CTEST_SOURCE_DIRECTORY}")
- endif()
- configure_file(${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
+ endif ()
+ configure_file (${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
ctest_read_custom_files ("${CTEST_BINARY_DIRECTORY}")
ctest_configure (BUILD "${CTEST_BINARY_DIRECTORY}" RETURN_VALUE res)
- if(LOCAL_SUBMIT)
+ if (LOCAL_SUBMIT)
ctest_submit (PARTS Update Configure Notes)
- endif()
- if(${res} LESS 0 OR ${res} GREATER 0)
- file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Configure: ${res}\n")
- endif()
+ endif ()
+ if (${res} LESS 0 OR ${res} GREATER 0)
+ file (APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Configure: ${res}\n")
+ endif ()
ctest_build (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND RETURN_VALUE res NUMBER_ERRORS errval)
- if(LOCAL_SUBMIT)
+ if (LOCAL_SUBMIT)
ctest_submit (PARTS Build)
- endif()
- if(${res} LESS 0 OR ${res} GREATER 0 OR ${errval} GREATER 0)
- file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed ${errval} Build: ${res}\n")
- endif()
+ endif ()
+ if (${res} LESS 0 OR ${res} GREATER 0 OR ${errval} GREATER 0)
+ file (APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed ${errval} Build: ${res}\n")
+ endif ()
- if(NOT LOCAL_SKIP_TEST)
- if(NOT LOCAL_MEMCHECK_TEST)
+ if (NOT LOCAL_SKIP_TEST)
+ if (NOT LOCAL_MEMCHECK_TEST)
ctest_test (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND ${ctest_test_args} RETURN_VALUE res)
- if(LOCAL_SUBMIT)
+ if (LOCAL_SUBMIT)
ctest_submit (PARTS Test)
- endif()
- if(${res} LESS 0 OR ${res} GREATER 0)
- file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Tests: ${res}\n")
- endif()
- else()
+ endif ()
+ if (${res} LESS 0 OR ${res} GREATER 0)
+ file (APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed Tests: ${res}\n")
+ endif ()
+ else ()
ctest_memcheck (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND ${ctest_test_args})
- if(LOCAL_SUBMIT)
+ if (LOCAL_SUBMIT)
ctest_submit (PARTS MemCheck)
- endif()
- endif()
- if(LOCAL_COVERAGE_TEST)
+ endif ()
+ endif ()
+ if (LOCAL_COVERAGE_TEST)
ctest_coverage (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND)
- if(LOCAL_SUBMIT)
+ if (LOCAL_SUBMIT)
ctest_submit (PARTS Coverage)
- endif()
- endif()
- endif(NOT LOCAL_SKIP_TEST)
+ endif ()
+ endif ()
+ endif ()
- if(NOT LOCAL_MEMCHECK_TEST AND NOT LOCAL_NO_PACKAGE AND NOT LOCAL_SKIP_BUILD)
+ if (NOT LOCAL_MEMCHECK_TEST AND NOT LOCAL_NO_PACKAGE AND NOT LOCAL_SKIP_BUILD)
##-----------------------------------------------
## Package the product
##-----------------------------------------------
- execute_process(COMMAND cpack -C ${CTEST_CONFIGURATION_TYPE} -V
+ execute_process (COMMAND cpack -C ${CTEST_CONFIGURATION_TYPE} -V
WORKING_DIRECTORY ${CTEST_BINARY_DIRECTORY}
RESULT_VARIABLE cpackResult
OUTPUT_VARIABLE cpackLog
ERROR_VARIABLE cpackLog.err
)
- file(WRITE ${CTEST_BINARY_DIRECTORY}/cpack.log "${cpackLog.err}" "${cpackLog}")
- if(cpackResult GREATER 0)
- file(APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed packaging: ${cpackResult}:${cpackLog.err} \n")
- endif()
- endif()
+ file (WRITE ${CTEST_BINARY_DIRECTORY}/cpack.log "${cpackLog.err}" "${cpackLog}")
+ if (cpackResult GREATER 0)
+ file (APPEND ${CTEST_SCRIPT_DIRECTORY}/FailedCTest.txt "Failed packaging: ${cpackResult}:${cpackLog.err} \n")
+ endif ()
+ endif ()
#-----------------------------------------------------------------------------
diff --git a/config/cmake/scripts/HDF5config.cmake b/config/cmake/scripts/HDF5config.cmake
index 2b1b097..e500f14 100755
--- a/config/cmake/scripts/HDF5config.cmake
+++ b/config/cmake/scripts/HDF5config.cmake
@@ -1,10 +1,21 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#############################################################################################
### ${CTEST_SCRIPT_ARG} is of the form OPTION=VALUE ###
### BUILD_GENERATOR required [Unix, VS2015, VS201564, VS2013, VS201364, VS2012, VS201264] ###
-### ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201264 -C Release -V -O hdf5.log ###
+### ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201264 -C Release -VV -O hdf5.log ###
#############################################################################################
-cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
+cmake_minimum_required (VERSION 3.2.2 FATAL_ERROR)
############################################################################
# Usage:
# ctest -S HDF5config.cmake,OPTION=VALUE -C Release -VV -O test.log
@@ -21,14 +32,14 @@ cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
# INSTALLDIR - root folder where hdf5 is installed
# CTEST_CONFIGURATION_TYPE - Release, Debug, etc
# CTEST_SOURCE_NAME - source folder
-# STATIC_LIBRARIES - Build/use static libraries
+# STATIC_ONLY - Build/use static libraries
# FORTRAN_LIBRARIES - Build/use fortran libraries
# JAVA_LIBRARIES - Build/use java libraries
# NO_MAC_FORTRAN - Yes to be SHARED on a Mac
##############################################################################
-set(CTEST_SOURCE_VERSION 1.10.0)
-set(CTEST_SOURCE_VERSEXT "")
+set (CTEST_SOURCE_VERSION "1.11.0")
+set (CTEST_SOURCE_VERSEXT "")
##############################################################################
# handle input parameters to script.
@@ -36,245 +47,244 @@ set(CTEST_SOURCE_VERSEXT "")
#INSTALLDIR - HDF5-1.10.0 root folder
#CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo
#CTEST_SOURCE_NAME - name of source folder; HDF5-1.10.0
-#STATIC_LIBRARIES - Default is YES
+#STATIC_ONLY - Default is YES
#FORTRAN_LIBRARIES - Default is NO
#JAVA_LIBRARIES - Default is NO
#NO_MAC_FORTRAN - set to TRUE to allow shared libs on a Mac
-if(DEFINED CTEST_SCRIPT_ARG)
+if (DEFINED CTEST_SCRIPT_ARG)
# transform ctest script arguments of the form
# script.ctest,var1=value1,var2=value2
# to variables with the respective names set to the respective values
- string(REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
- foreach(current_var ${script_args})
+ string (REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
+ foreach (current_var ${script_args})
if ("${current_var}" MATCHES "^([^=]+)=(.+)$")
- set("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
- endif()
- endforeach()
-endif()
+ set ("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
+ endif ()
+ endforeach ()
+endif ()
# build generator must be defined
-if(NOT DEFINED BUILD_GENERATOR)
- message(FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2015, VS201564, VS2013, VS201364, VS2012, or VS201264")
-else()
- if(${BUILD_GENERATOR} STREQUAL "Unix")
- set(CTEST_CMAKE_GENERATOR "Unix Makefiles")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2015")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 14 2015")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201564")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 14 2015 Win64")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2013")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 12 2013")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201364")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 12 2013 Win64")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2012")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 11 2012")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201264")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 11 2012 Win64")
- else()
- message(FATAL_ERROR "Invalid BUILD_GENERATOR must be - Unix, VS2015, VS201564, VS2013, VS201364, VS2012, or VS201264")
- endif()
-endif()
+if (NOT DEFINED BUILD_GENERATOR)
+ message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2015, VS201564, VS2013, VS201364, VS2012, or VS201264")
+else ()
+ if (${BUILD_GENERATOR} STREQUAL "Unix")
+ set (CTEST_CMAKE_GENERATOR "Unix Makefiles")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2015")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 14 2015")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201564")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 14 2015 Win64")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2013")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 12 2013")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201364")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 12 2013 Win64")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2012")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 11 2012")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201264")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 11 2012 Win64")
+ else ()
+ message (FATAL_ERROR "Invalid BUILD_GENERATOR must be - Unix, VS2015, VS201564, VS2013, VS201364, VS2012, or VS201264")
+ endif ()
+endif ()
###################################################################
### Following Line is one of [Release, RelWithDebInfo, Debug] #####
-set(CTEST_CONFIGURATION_TYPE "$ENV{CMAKE_CONFIG_TYPE}")
+set (CTEST_CONFIGURATION_TYPE "$ENV{CMAKE_CONFIG_TYPE}")
###################################################################
-if(NOT DEFINED INSTALLDIR)
- if(WIN32)
- set(INSTALLDIR "C:/Program Files/HDF_Group/HDF5/${CTEST_SOURCE_VERSION}")
- else()
- set(INSTALLDIR "${CTEST_SCRIPT_DIRECTORY}/HDF_Group/HDF5/${CTEST_SOURCE_VERSION}")
- endif()
-endif()
-if(NOT DEFINED CTEST_CONFIGURATION_TYPE)
- set(CTEST_CONFIGURATION_TYPE "Release")
-endif()
-if(NOT DEFINED CTEST_SOURCE_NAME)
- set(CTEST_SOURCE_NAME "hdf5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}")
-endif()
-if(NOT DEFINED STATIC_LIBRARIES)
- set(STATICLIBRARIES "YES")
-else()
- set(STATICLIBRARIES "NO")
-endif()
-if(NOT DEFINED FORTRAN_LIBRARIES)
- set(FORTRANLIBRARIES "NO")
-else()
- set(FORTRANLIBRARIES "YES")
-endif()
-if(NOT DEFINED JAVA_LIBRARIES)
- set(JAVALIBRARIES "NO")
-else()
- set(JAVALIBRARIES "YES")
-endif()
+if (NOT DEFINED INSTALLDIR)
+ if (WIN32)
+ set (INSTALLDIR "C:/Program Files/HDF_Group/HDF5/${CTEST_SOURCE_VERSION}")
+ else ()
+ set (INSTALLDIR "${CTEST_SCRIPT_DIRECTORY}/HDF_Group/HDF5/${CTEST_SOURCE_VERSION}")
+ endif ()
+endif ()
+if (NOT DEFINED CTEST_CONFIGURATION_TYPE)
+ set (CTEST_CONFIGURATION_TYPE "Release")
+endif ()
+if (NOT DEFINED CTEST_SOURCE_NAME)
+ set (CTEST_SOURCE_NAME "hdf5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}")
+endif ()
+if (NOT DEFINED STATIC_ONLY)
+ set (STATICONLYLIBRARIES "YES")
+else ()
+ set (STATICONLYLIBRARIES "NO")
+endif ()
+if (NOT DEFINED FORTRAN_LIBRARIES)
+ set (FORTRANLIBRARIES "NO")
+else ()
+ set(FORTRANLIBRARIES "YES")
+endif ()
+if (NOT DEFINED JAVA_LIBRARIES)
+ set (JAVALIBRARIES "NO")
+else ()
+ set (JAVALIBRARIES "YES")
+endif ()
-set(CTEST_BINARY_NAME "build")
-set(CTEST_DASHBOARD_ROOT "${CTEST_SCRIPT_DIRECTORY}")
-if(WIN32)
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
-else()
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
-endif()
+set (CTEST_BINARY_NAME "build")
+set (CTEST_DASHBOARD_ROOT "${CTEST_SCRIPT_DIRECTORY}")
+if (WIN32)
+ set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
+ set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
+else ()
+ set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
+ set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
+endif ()
###################################################################
######### Following describes compiler ############
-if(WIN32)
- set(SITE_OS_NAME "Windows")
- set(SITE_OS_VERSION "WIN7")
- if(${BUILD_GENERATOR} STREQUAL "VS201564")
- set(SITE_OS_BITS "64")
- set(SITE_COMPILER_NAME "vs2015")
- set(SITE_COMPILER_VERSION "14")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2015")
- set(SITE_OS_BITS "32")
- set(SITE_COMPILER_NAME "vs2015")
- set(SITE_COMPILER_VERSION "14")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201364")
- set(SITE_OS_BITS "64")
- set(SITE_COMPILER_NAME "vs2013")
- set(SITE_COMPILER_VERSION "12")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2013")
- set(SITE_OS_BITS "32")
- set(SITE_COMPILER_NAME "vs2013")
- set(SITE_COMPILER_VERSION "12")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201264")
- set(SITE_OS_BITS "64")
- set(SITE_COMPILER_NAME "vs2012")
- set(SITE_COMPILER_VERSION "11")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2012")
- set(SITE_OS_BITS "32")
- set(SITE_COMPILER_NAME "vs2012")
- set(SITE_COMPILER_VERSION "11")
- endif()
+if (WIN32)
+ set (SITE_OS_NAME "Windows")
+ set (SITE_OS_VERSION "WIN7")
+ if (${BUILD_GENERATOR} STREQUAL "VS201564")
+ set (SITE_OS_BITS "64")
+ set (SITE_COMPILER_NAME "vs2015")
+ set (SITE_COMPILER_VERSION "14")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2015")
+ set (SITE_OS_BITS "32")
+ set (SITE_COMPILER_NAME "vs2015")
+ set (SITE_COMPILER_VERSION "14")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201364")
+ set (SITE_OS_BITS "64")
+ set (SITE_COMPILER_NAME "vs2013")
+ set (SITE_COMPILER_VERSION "12")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2013")
+ set (SITE_OS_BITS "32")
+ set (SITE_COMPILER_NAME "vs2013")
+ set (SITE_COMPILER_VERSION "12")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201264")
+ set (SITE_OS_BITS "64")
+ set (SITE_COMPILER_NAME "vs2012")
+ set (SITE_COMPILER_VERSION "11")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2012")
+ set (SITE_OS_BITS "32")
+ set (SITE_COMPILER_NAME "vs2012")
+ set (SITE_COMPILER_VERSION "11")
+ endif ()
## Set the following to unique id your computer ##
- set(CTEST_SITE "WIN7${BUILD_GENERATOR}.XXXX")
-else()
+ set (CTEST_SITE "WIN7${BUILD_GENERATOR}.XXXX")
+else ()
+ set (CTEST_CMAKE_GENERATOR "Unix Makefiles")
## Set the following to unique id your computer ##
- if(APPLE)
- set(CTEST_SITE "MAC.XXXX")
- else()
- set(CTEST_SITE "LINUX.XXXX")
- endif()
-endif()
+ if (APPLE)
+ set (CTEST_SITE "MAC.XXXX")
+ else ()
+ set (CTEST_SITE "LINUX.XXXX")
+ endif ()
+ if (APPLE)
+ execute_process (COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
+ execute_process (COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set (ENV{CC} "${XCODE_CC}")
+ set (ENV{CXX} "${XCODE_CXX}")
+ set (CTEST_USE_LAUNCHERS 1)
+ set (RR_WARNINGS_COMMON "-Wno-format-nonliteral -Wno-cast-align -Wno-unused -Wno-unused-variable -Wno-unused-function -Wno-self-assign -Wno-unused-parameter -Wno-sign-compare")
+ set (RR_WARNINGS_C "${RR_WARNINGS_COMMON} -Wno-deprecated-declarations -Wno-uninitialized")
+ set (RR_WARNINGS_CXX "${RR_WARNINGS_COMMON} -Woverloaded-virtual -Wshadow -Wwrite-strings -Wc++11-compat")
+ set (RR_FLAGS_COMMON "-g -O0 -fstack-protector-all -D_FORTIFY_SOURCE=2")
+ set (RR_FLAGS_C "${RR_FLAGS_COMMON}")
+ set (RR_FLAGS_CXX "${RR_FLAGS_COMMON}")
+ set (ENV{CFLAGS} "${RR_WARNINGS_C} ${RR_FLAGS_C}")
+ set (ENV{CXXFLAGS} "${RR_WARNINGS_CXX} ${RR_FLAGS_CXX}")
+ endif ()
+endif ()
###################################################################
###################################################################
######### Following is for submission to CDash ############
###################################################################
-set(MODEL "Experimental")
+set (MODEL "Experimental")
###################################################################
###################################################################
##### Following controls CDash submission #####
-#set(LOCAL_SUBMIT "TRUE")
+#set (LOCAL_SUBMIT "TRUE")
##### Following controls test process #####
-#set(LOCAL_SKIP_TEST "TRUE")
-#set(LOCAL_MEMCHECK_TEST "TRUE")
-#set(LOCAL_COVERAGE_TEST "TRUE")
+#set (LOCAL_SKIP_TEST "TRUE")
+#set (LOCAL_MEMCHECK_TEST "TRUE")
+#set (LOCAL_COVERAGE_TEST "TRUE")
##### Following controls cpack command #####
-#set(LOCAL_NO_PACKAGE "TRUE")
+#set (LOCAL_NO_PACKAGE "TRUE")
##### Following controls source update #####
-#set(LOCAL_UPDATE "TRUE")
-set(REPOSITORY_URL "http://svn.hdfgroup.uiuc.edu/hdf5/trunk")
+#set (LOCAL_UPDATE "TRUE")
+set (REPOSITORY_URL "https://git@bitbucket.hdfgroup.org/scm/hdffv/hdf5.git")
+set (REPOSITORY_BRANCH "develop")
+
#uncomment to use a compressed source file: *.tar on linux or mac *.zip on windows
#set(CTEST_USE_TAR_SOURCE "${CTEST_SOURCE_VERSION}")
###################################################################
###################################################################
-#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
-#### format: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ")
-
-###################################################################
-if(${STATICLIBRARIES})
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
+if (${STATICONLYLIBRARIES})
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
######### Following describes computer ############
## following is optional to describe build ##
- set(SITE_BUILDNAME_SUFFIX "STATIC")
-endif()
+ set (SITE_BUILDNAME_SUFFIX "STATIC")
+endif ()
###################################################################
-
-### uncomment/comment and change the following lines for other configuration options
-
-#### ext libraries ####
-### ext libs from tgz
-set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=TGZ -DTGZPATH:PATH=${CTEST_SCRIPT_DIRECTORY}")
-### ext libs from svn
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=SVN")
-### ext libs on system
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_LIBRARY:FILEPATH=some_location/lib/zlib.lib -DZLIB_INCLUDE_DIR:PATH=some_location/include")
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSZIP_LIBRARY:FILEPATH=some_location/lib/szlib.lib -DSZIP_INCLUDE_DIR:PATH=some_location/include")
-### disable ext libs building
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=OFF")
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF")
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=OFF")
#### fortran ####
-if(${FORTRANLIBRARIES})
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=ON")
-else()
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF")
-endif()
+if (${FORTRANLIBRARIES})
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=ON")
+ ### enable Fortran 2003 depends on HDF5_BUILD_FORTRAN
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_F2003:BOOL=ON")
+else ()
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF")
+ ### enable Fortran 2003 depends on HDF5_BUILD_FORTRAN
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_F2003:BOOL=OFF")
+endif ()
#### java ####
-if(${JAVALIBRARIES})
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON")
-else()
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=OFF")
-endif()
-
-### disable test program builds
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_TESTING:BOOL=OFF")
-
-### disable packaging
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_NO_PACKAGES:BOOL=ON")
-### Create install package with external libraries (szip, zlib, jpeg)
-set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_EXTLIBS:BOOL=ON")
+if (${JAVALIBRARIES})
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON")
+else ()
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=OFF")
+endif ()
### change install prefix
-set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCMAKE_INSTALL_PREFIX:PATH=${INSTALLDIR}")
-set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_CONFIGURATION_TYPE:STRING=$ENV{CMAKE_CONFIG_TYPE}")
+set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_INSTALL_PREFIX:PATH=${INSTALLDIR}")
+set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCTEST_CONFIGURATION_TYPE:STRING=$ENV{CMAKE_CONFIG_TYPE}")
###################################################################
-if(WIN32)
- include(${CTEST_SCRIPT_DIRECTORY}\\CTestScript.cmake)
- if(EXISTS "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.exe")
- file(COPY "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.exe" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.msi")
- file(COPY "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.msi" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.zip")
- file(COPY "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.zip" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
-else()
- include(${CTEST_SCRIPT_DIRECTORY}/CTestScript.cmake)
- if(APPLE)
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.dmg")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.dmg" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.tar.gz")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.sh")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- else()
- if(CYGWIN)
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-CYGWIN.sh")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-CYGWIN.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-CYGWIN.tar.gz")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-CYGWIN.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- else()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Linux.sh")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Linux.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Linux.tar.gz")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Linux.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- endif()
- endif()
-endif()
+if (WIN32)
+ set (BINFILEBASE "HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}")
+ include (${CTEST_SCRIPT_DIRECTORY}\\HDF5options.cmake)
+ include (${CTEST_SCRIPT_DIRECTORY}\\CTestScript.cmake)
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.exe")
+ file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.exe" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.msi")
+ file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.msi" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.zip")
+ file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.zip" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+else ()
+ set (BINFILEBASE "HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}")
+ include (${CTEST_SCRIPT_DIRECTORY}/HDF5options.cmake)
+ include (${CTEST_SCRIPT_DIRECTORY}/CTestScript.cmake)
+ if (APPLE)
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.dmg")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.dmg" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.tar.gz")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.sh")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ else ()
+ if (CYGWIN)
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.sh")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.tar.gz")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ else ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.sh")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.tar.gz")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ endif ()
+ endif ()
+endif ()
diff --git a/config/cmake/scripts/HDF5options.cmake b/config/cmake/scripts/HDF5options.cmake
new file mode 100755
index 0000000..b090434
--- /dev/null
+++ b/config/cmake/scripts/HDF5options.cmake
@@ -0,0 +1,52 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+#############################################################################################
+#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
+#### format: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ") ###
+#############################################################################################
+
+### uncomment/comment and change the following lines for other configuration options
+
+#############################################################################################
+#### alternate toolsets ####
+#set(CMAKE_GENERATOR_TOOLSET "Intel C++ Compiler 17.0")
+
+#############################################################################################
+#### ext libraries ####
+
+### ext libs from tgz
+set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=TGZ -DTGZPATH:PATH=${CTEST_SCRIPT_DIRECTORY}")
+### ext libs from git
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=GIT")
+### ext libs on system
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_LIBRARY:FILEPATH=some_location/lib/zlib.lib -DZLIB_INCLUDE_DIR:PATH=some_location/include")
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSZIP_LIBRARY:FILEPATH=some_location/lib/szlib.lib -DSZIP_INCLUDE_DIR:PATH=some_location/include")
+
+### disable ext zlib building
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=OFF")
+### disable ext szip building
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF")
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=OFF")
+
+#############################################################################################
+### disable test program builds
+
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_TESTING:BOOL=OFF")
+
+#############################################################################################
+### disable packaging
+
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_NO_PACKAGES:BOOL=ON")
+### Create install package with external libraries (szip, zlib)
+set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_EXTLIBS:BOOL=ON")
+
+#############################################################################################
diff --git a/config/cmake/userblockTest.cmake b/config/cmake/userblockTest.cmake
index 0775cbe..9af7e5b 100644
--- a/config/cmake/userblockTest.cmake
+++ b/config/cmake/userblockTest.cmake
@@ -1,31 +1,42 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# userblockTest.cmake executes a command and captures the output in a file. File is then compared
# against a reference file. Exit status of command can also be compared.
# arguments checking
if (NOT TEST_PROGRAM)
message (FATAL_ERROR "Require TEST_PROGRAM tellub to be defined")
-endif (NOT TEST_PROGRAM)
+endif ()
if (NOT TEST_GET_PROGRAM)
message (FATAL_ERROR "Require TEST_GET_PROGRAM getub to be defined")
-endif (NOT TEST_GET_PROGRAM)
+endif ()
if (NOT TEST_FOLDER)
message ( FATAL_ERROR "Require TEST_FOLDER to be defined")
-endif (NOT TEST_FOLDER)
+endif ()
if (NOT TEST_HFILE)
message (FATAL_ERROR "Require TEST_HFILE the hdf file to be defined")
-endif (NOT TEST_HFILE)
+endif ()
if (NOT TEST_UFILE)
message (FATAL_ERROR "Require TEST_UFILE the ub file to be defined")
-endif (NOT TEST_UFILE)
+endif ()
if (NOT TEST_CHECKUB)
message (STATUS "Require TEST_CHECKUB - YES or NO - to be defined")
-endif (NOT TEST_CHECKUB)
+endif ()
#if (NOT TEST_EXPECT)
# message (STATUS "Require TEST_EXPECT to be defined")
-#endif (NOT TEST_EXPECT)
+#endif ()
#if (NOT TEST_OFILE)
# message (FATAL_ERROR "Require TEST_OFILE the original hdf file to be defined")
-#endif (NOT TEST_OFILE)
+#endif ()
set (TEST_U_STRING_LEN 0)
set (TEST_O_STRING_LEN 0)
@@ -53,11 +64,11 @@ if (TEST_CHECKUB STREQUAL "YES")
)
if (NOT ${TEST_RESULT} STREQUAL "0")
message (FATAL_ERROR "Failed: The output of ${TEST_PROGRAM} ${TEST_OFILE} is: ${TEST_ERROR}")
- endif (NOT ${TEST_RESULT} STREQUAL "0")
+ endif ()
file (READ ${TEST_HFILE}.len.txt TEST_O_STRING_LEN)
- endif (TEST_OFILE)
+ endif ()
- MATH( EXPR TEST_STRING_SIZE "${TEST_U_STRING_LEN} + ${TEST_O_STRING_LEN}" )
+ math( EXPR TEST_STRING_SIZE "${TEST_U_STRING_LEN} + ${TEST_O_STRING_LEN}" )
if (NOT TEST_O_STRING_LEN STREQUAL "0")
#$JAM_BIN/getub -c $s2 $origfile > $cmpfile
@@ -73,10 +84,10 @@ if (TEST_CHECKUB STREQUAL "YES")
#cat $ufile >> $cmpfile
file (STRINGS ${TEST_UFILE} TEST_STREAM NEWLINE_CONSUME)
file (APPEND ${TEST_HFILE}-ub.cmp "${TEST_STREAM}")
- else (NOT TEST_O_STRING_LEN STREQUAL "0")
+ else ()
file (STRINGS ${TEST_UFILE} TEST_STREAM NEWLINE_CONSUME)
file (WRITE ${TEST_HFILE}-ub.cmp ${TEST_STREAM})
- endif (NOT TEST_O_STRING_LEN STREQUAL "0")
+ endif ()
#$JAM_BIN/getub -c $size $hfile > $tfile
EXECUTE_PROCESS (
@@ -99,8 +110,8 @@ if (TEST_CHECKUB STREQUAL "YES")
# if the return value is !=${TEST_EXPECT} bail out
if (NOT ${TEST_RESULT} STREQUAL ${TEST_EXPECT})
message (FATAL_ERROR "Failed: The output of ${TEST_HFILE}-ub did not match ${TEST_HFILE}.\n${TEST_ERROR}")
- endif (NOT ${TEST_RESULT} STREQUAL ${TEST_EXPECT})
-else (TEST_CHECKUB STREQUAL "YES")
+ endif ()
+else ()
# call 'ubsize' to get the size of the user block
#ubsize=`$JAM_BIN/tellub $hfile`
EXECUTE_PROCESS (
@@ -112,8 +123,8 @@ else (TEST_CHECKUB STREQUAL "YES")
)
if (NOT TEST_H_STRING_LEN STREQUAL "0")
message (FATAL_ERROR "Failed: The output of ${TEST_HFILE} was NOT empty")
- endif (NOT TEST_H_STRING_LEN STREQUAL "0")
-endif (TEST_CHECKUB STREQUAL "YES")
+ endif ()
+endif ()
# everything went fine...
message ("Passed: The output of CHECK matched expectation")
diff --git a/config/cmake/vfdTest.cmake b/config/cmake/vfdTest.cmake
index 10f0a7b..67a2566 100644
--- a/config/cmake/vfdTest.cmake
+++ b/config/cmake/vfdTest.cmake
@@ -1,32 +1,50 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# vfdTest.cmake executes a command and captures the output in a file. Command uses specified VFD.
# Exit status of command can also be compared.
# arguments checking
if (NOT TEST_PROGRAM)
message (FATAL_ERROR "Require TEST_PROGRAM to be defined")
-endif (NOT TEST_PROGRAM)
-#if (NOT TEST_ARGS)
-# message (STATUS "Require TEST_ARGS to be defined")
-#endif (NOT TEST_ARGS)
-#if (NOT TEST_EXPECT)
-# message (STATUS "Require TEST_EXPECT to be defined")
-#endif (NOT TEST_EXPECT)
+endif ()
if (NOT TEST_FOLDER)
message ( FATAL_ERROR "Require TEST_FOLDER to be defined")
-endif (NOT TEST_FOLDER)
+endif ()
if (NOT TEST_VFD)
message (FATAL_ERROR "Require TEST_VFD to be defined")
-endif (NOT TEST_VFD)
+endif ()
-set (ERROR_APPEND 1)
+if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+ file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
+endif ()
+
+if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+ file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+endif ()
+
+# if there is not an error reference file add the error output to the stdout file
+if (NOT TEST_ERRREF)
+ set (ERROR_APPEND 1)
+endif ()
message (STATUS "USING ${TEST_VFD} ON COMMAND: ${TEST_PROGRAM} ${TEST_ARGS}")
set (ENV{HDF5_DRIVER} "${TEST_VFD}")
+
# run the test program, capture the stdout/stderr and the result var
-EXECUTE_PROCESS (
+execute_process (
COMMAND ${TEST_PROGRAM} ${TEST_ARGS}
WORKING_DIRECTORY ${TEST_FOLDER}
+ RESULT_VARIABLE TEST_RESULT
OUTPUT_FILE ${TEST_OUTPUT}_${TEST_VFD}.out
ERROR_FILE ${TEST_OUTPUT}_${TEST_VFD}.err
OUTPUT_VARIABLE TEST_OUT
@@ -35,16 +53,24 @@ EXECUTE_PROCESS (
message (STATUS "COMMAND Result: ${TEST_RESULT}")
+# if the .err file exists and ERRROR_APPEND is enabled
if (ERROR_APPEND AND EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.err)
file (READ ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.err TEST_STREAM)
file (APPEND ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.out "${TEST_STREAM}")
-endif (ERROR_APPEND AND EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.err)
+endif ()
# if the return value is !=${TEST_EXPECT} bail out
if (NOT ${TEST_RESULT} STREQUAL ${TEST_EXPECT})
- message ( FATAL_ERROR "Failed: Test program ${TEST_PROGRAM} exited != ${TEST_EXPECT}.\n${TEST_ERROR}")
-endif (NOT ${TEST_RESULT} STREQUAL ${TEST_EXPECT})
+ if (NOT TEST_NOERRDISPLAY)
+ if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.out)
+ file (READ ${TEST_FOLDER}/${TEST_OUTPUT}_${TEST_VFD}.out TEST_STREAM)
+ message (STATUS "Output USING ${TEST_VFD}:\n${TEST_STREAM}")
+ endif ()
+ endif ()
+ message (FATAL_ERROR "Failed: Test program ${TEST_PROGRAM} exited != ${TEST_EXPECT}.\n${TEST_ERROR}")
+endif ()
+
+message (STATUS "COMMAND Error: ${TEST_ERROR}")
# everything went fine...
message ("Passed: The ${TEST_PROGRAM} program used vfd ${TEST_VFD}")
-
diff --git a/config/cmake_ext_mod/CheckTypeSize.cmake b/config/cmake_ext_mod/CheckTypeSize.cmake
index 5095a27..c14c2f2 100644
--- a/config/cmake_ext_mod/CheckTypeSize.cmake
+++ b/config/cmake_ext_mod/CheckTypeSize.cmake
@@ -1,4 +1,15 @@
#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+#
# Check if the type exists and determine size of type. if the type
# exists, the size will be stored to the variable.
#
@@ -7,24 +18,24 @@
# HAVE_${VARIABLE} - does the variable exists or not
#
-MACRO (HDF_CHECK_TYPE_SIZE TYPE VARIABLE)
+macro (HDF_CHECK_TYPE_SIZE TYPE VARIABLE)
set (CMAKE_ALLOW_UNKNOWN_VARIABLE_READ_ACCESS 1)
if ("HAVE_${VARIABLE}" MATCHES "^HAVE_${VARIABLE}$")
- set (MACRO_CHECK_TYPE_SIZE_FLAGS
+ set (MACRO_CHECK_TYPE_SIZE_FLAGS
"-DCHECK_TYPE_SIZE_TYPE=\"${TYPE}\" ${CMAKE_REQUIRED_FLAGS}"
)
foreach (def HAVE_SYS_TYPES_H HAVE_STDINT_H HAVE_STDDEF_H HAVE_INTTYPES_H)
if ("${def}")
set (MACRO_CHECK_TYPE_SIZE_FLAGS "${MACRO_CHECK_TYPE_SIZE_FLAGS} -D${def}")
- endif ("${def}")
- endforeach (def)
+ endif ()
+ endforeach ()
message (STATUS "Check size of ${TYPE}")
if (CMAKE_REQUIRED_LIBRARIES)
- set (CHECK_TYPE_SIZE_ADD_LIBRARIES
+ set (CHECK_TYPE_SIZE_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}"
)
- endif (CMAKE_REQUIRED_LIBRARIES)
+ endif ()
try_run (${VARIABLE} HAVE_${VARIABLE}
${CMAKE_BINARY_DIR}
${HDF_RESOURCES_EXT_DIR}/CheckTypeSize.c
@@ -35,16 +46,16 @@ MACRO (HDF_CHECK_TYPE_SIZE TYPE VARIABLE)
if (HAVE_${VARIABLE})
message (STATUS "Check size of ${TYPE} - done")
file (APPEND
- ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeOutput.log
+ ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeOutput.log
"Determining size of ${TYPE} passed with the following output:\n${OUTPUT}\n\n"
)
- else (HAVE_${VARIABLE})
+ else ()
message (STATUS "Check size of ${TYPE} - failed")
file (APPEND
- ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log
+ ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log
"Determining size of ${TYPE} failed with the following output:\n${OUTPUT}\n\n"
)
- endif (HAVE_${VARIABLE})
- endif ("HAVE_${VARIABLE}" MATCHES "^HAVE_${VARIABLE}$")
+ endif ()
+ endif ()
set (CMAKE_ALLOW_UNKNOWN_VARIABLE_READ_ACCESS)
-ENDMACRO (HDF_CHECK_TYPE_SIZE)
+endmacro ()
diff --git a/config/cmake_ext_mod/ConfigureChecks.cmake b/config/cmake_ext_mod/ConfigureChecks.cmake
index 5a14990..56d45f9 100644
--- a/config/cmake_ext_mod/ConfigureChecks.cmake
+++ b/config/cmake_ext_mod/ConfigureChecks.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#-----------------------------------------------------------------------------
# Include all the necessary files for macros
#-----------------------------------------------------------------------------
@@ -11,9 +22,9 @@ include (${CMAKE_ROOT}/Modules/CheckTypeSize.cmake)
include (${CMAKE_ROOT}/Modules/CheckVariableExists.cmake)
include (${CMAKE_ROOT}/Modules/CheckFortranFunctionExists.cmake)
include (${CMAKE_ROOT}/Modules/TestBigEndian.cmake)
-if(CMAKE_CXX_COMPILER)
+if (CMAKE_CXX_COMPILER AND CMAKE_CXX_COMPILER_LOADED)
include (${CMAKE_ROOT}/Modules/TestForSTDNamespace.cmake)
-endif(CMAKE_CXX_COMPILER)
+endif ()
#-----------------------------------------------------------------------------
# APPLE/Darwin setup
@@ -28,29 +39,29 @@ if (APPLE)
"variable has been set to a blank value which will build the default architecture for this system.")
endif ()
set (${HDF_PREFIX}_AC_APPLE_UNIVERSAL_BUILD 0)
-endif (APPLE)
+endif ()
# Check for Darwin (not just Apple - we also want to catch OpenDarwin)
if (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
set (${HDF_PREFIX}_HAVE_DARWIN 1)
-endif (${CMAKE_SYSTEM_NAME} MATCHES "Darwin")
+endif ()
# Check for Solaris
if (${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
set (${HDF_PREFIX}_HAVE_SOLARIS 1)
-endif (${CMAKE_SYSTEM_NAME} MATCHES "SunOS")
+endif ()
#-----------------------------------------------------------------------------
# This MACRO checks IF the symbol exists in the library and IF it
# does, it appends library to the list.
#-----------------------------------------------------------------------------
set (LINK_LIBS "")
-MACRO (CHECK_LIBRARY_EXISTS_CONCAT LIBRARY SYMBOL VARIABLE)
+macro (CHECK_LIBRARY_EXISTS_CONCAT LIBRARY SYMBOL VARIABLE)
CHECK_LIBRARY_EXISTS ("${LIBRARY};${LINK_LIBS}" ${SYMBOL} "" ${VARIABLE})
if (${VARIABLE})
set (LINK_LIBS ${LINK_LIBS} ${LIBRARY})
- endif (${VARIABLE})
-ENDMACRO (CHECK_LIBRARY_EXISTS_CONCAT)
+ endif ()
+endmacro ()
# ----------------------------------------------------------------------
# WINDOWS Hard code Values
@@ -62,7 +73,7 @@ if (WIN32)
set (${HDF_PREFIX}_HAVE_MINGW 1)
set (WINDOWS 1) # MinGW tries to imitate Windows
set (CMAKE_REQUIRED_FLAGS "-DWIN32_LEAN_AND_MEAN=1 -DNOGDI=1")
- endif (MINGW)
+ endif ()
set (${HDF_PREFIX}_HAVE_WIN32_API 1)
set (CMAKE_REQUIRED_LIBRARIES "ws2_32.lib;wsock32.lib")
if (NOT UNIX AND NOT MINGW)
@@ -70,9 +81,9 @@ if (WIN32)
set (CMAKE_REQUIRED_FLAGS "/DWIN32_LEAN_AND_MEAN=1 /DNOGDI=1")
if (MSVC)
set (${HDF_PREFIX}_HAVE_VISUAL_STUDIO 1)
- endif (MSVC)
- endif (NOT UNIX AND NOT MINGW)
-endif (WIN32)
+ endif ()
+ endif ()
+endif ()
if (WINDOWS)
set (${HDF_PREFIX}_HAVE_STDDEF_H 1)
@@ -84,20 +95,20 @@ if (WINDOWS)
set (${HDF_PREFIX}_HAVE_LONGJMP 1)
if (NOT MINGW)
set (${HDF_PREFIX}_HAVE_GETHOSTNAME 1)
- endif (NOT MINGW)
+ endif ()
if (NOT UNIX AND NOT CYGWIN AND NOT MINGW)
set (${HDF_PREFIX}_HAVE_GETCONSOLESCREENBUFFERINFO 1)
- endif (NOT UNIX AND NOT CYGWIN AND NOT MINGW)
+ endif ()
set (${HDF_PREFIX}_HAVE_FUNCTION 1)
set (${HDF_PREFIX}_GETTIMEOFDAY_GIVES_TZ 1)
set (${HDF_PREFIX}_HAVE_TIMEZONE 1)
set (${HDF_PREFIX}_HAVE_GETTIMEOFDAY 1)
if (MINGW)
set (${HDF_PREFIX}_HAVE_WINSOCK2_H 1)
- endif (MINGW)
+ endif ()
set (${HDF_PREFIX}_HAVE_LIBWS2_32 1)
set (${HDF_PREFIX}_HAVE_LIBWSOCK32 1)
-endif (WINDOWS)
+endif ()
# ----------------------------------------------------------------------
# END of WINDOWS Hard code Values
@@ -105,7 +116,7 @@ endif (WINDOWS)
if (CYGWIN)
set (${HDF_PREFIX}_HAVE_LSEEK64 0)
-endif (CYGWIN)
+endif ()
#-----------------------------------------------------------------------------
# Check for the math library "m"
@@ -115,7 +126,7 @@ if (NOT WINDOWS)
CHECK_LIBRARY_EXISTS_CONCAT ("dl" dlopen ${HDF_PREFIX}_HAVE_LIBDL)
CHECK_LIBRARY_EXISTS_CONCAT ("ws2_32" WSAStartup ${HDF_PREFIX}_HAVE_LIBWS2_32)
CHECK_LIBRARY_EXISTS_CONCAT ("wsock32" gethostbyname ${HDF_PREFIX}_HAVE_LIBWSOCK32)
-endif (NOT WINDOWS)
+endif ()
# UCB (BSD) compatibility library
CHECK_LIBRARY_EXISTS_CONCAT ("ucb" gethostname ${HDF_PREFIX}_HAVE_LIBUCB)
@@ -126,20 +137,20 @@ set (CMAKE_REQUIRED_LIBRARIES ${CMAKE_REQUIRED_LIBRARIES} ${LINK_LIBS})
set (USE_INCLUDES "")
if (WINDOWS)
set (USE_INCLUDES ${USE_INCLUDES} "windows.h")
-endif (WINDOWS)
+endif ()
if (NOT WINDOWS)
TEST_BIG_ENDIAN (${HDF_PREFIX}_WORDS_BIGENDIAN)
-endif (NOT WINDOWS)
+endif ()
# For other specific tests, use this MACRO.
-MACRO (HDF_FUNCTION_TEST OTHER_TEST)
+macro (HDF_FUNCTION_TEST OTHER_TEST)
if ("${HDF_PREFIX}_${OTHER_TEST}" MATCHES "^${HDF_PREFIX}_${OTHER_TEST}$")
set (MACRO_CHECK_FUNCTION_DEFINITIONS "-D${OTHER_TEST} ${CMAKE_REQUIRED_FLAGS}")
set (OTHER_TEST_ADD_LIBRARIES)
if (CMAKE_REQUIRED_LIBRARIES)
set (OTHER_TEST_ADD_LIBRARIES "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
- endif (CMAKE_REQUIRED_LIBRARIES)
+ endif ()
foreach (def
HAVE_SYS_TIME_H
@@ -149,14 +160,14 @@ MACRO (HDF_FUNCTION_TEST OTHER_TEST)
)
if ("${${HDF_PREFIX}_${def}}")
set (MACRO_CHECK_FUNCTION_DEFINITIONS "${MACRO_CHECK_FUNCTION_DEFINITIONS} -D${def}")
- endif ("${${HDF_PREFIX}_${def}}")
- endforeach (def)
+ endif ()
+ endforeach ()
if (LARGEFILE)
set (MACRO_CHECK_FUNCTION_DEFINITIONS
"${MACRO_CHECK_FUNCTION_DEFINITIONS} -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_LARGEFILE_SOURCE"
)
- endif (LARGEFILE)
+ endif ()
#message (STATUS "Performing ${OTHER_TEST}")
TRY_COMPILE (${OTHER_TEST}
@@ -169,16 +180,16 @@ MACRO (HDF_FUNCTION_TEST OTHER_TEST)
if (${OTHER_TEST})
set (${HDF_PREFIX}_${OTHER_TEST} 1 CACHE INTERNAL "Other test ${FUNCTION}")
message (STATUS "Performing Other Test ${OTHER_TEST} - Success")
- else (${OTHER_TEST})
+ else ()
message (STATUS "Performing Other Test ${OTHER_TEST} - Failed")
set (${HDF_PREFIX}_${OTHER_TEST} "" CACHE INTERNAL "Other test ${FUNCTION}")
file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Performing Other Test ${OTHER_TEST} failed with the following output:\n"
"${OUTPUT}\n"
)
- endif (${OTHER_TEST})
- endif ("${HDF_PREFIX}_${OTHER_TEST}" MATCHES "^${HDF_PREFIX}_${OTHER_TEST}$")
-ENDMACRO (HDF_FUNCTION_TEST)
+ endif ()
+ endif ()
+endmacro ()
#-----------------------------------------------------------------------------
# Check for these functions before the time headers are checked
@@ -188,28 +199,29 @@ HDF_FUNCTION_TEST (STDC_HEADERS)
#-----------------------------------------------------------------------------
# Check IF header file exists and add it to the list.
#-----------------------------------------------------------------------------
-MACRO (CHECK_INCLUDE_FILE_CONCAT FILE VARIABLE)
+macro (CHECK_INCLUDE_FILE_CONCAT FILE VARIABLE)
CHECK_INCLUDE_FILES ("${USE_INCLUDES};${FILE}" ${VARIABLE})
if (${VARIABLE})
set (USE_INCLUDES ${USE_INCLUDES} ${FILE})
- endif (${VARIABLE})
-ENDMACRO (CHECK_INCLUDE_FILE_CONCAT)
+ endif ()
+endmacro ()
#-----------------------------------------------------------------------------
# Check for the existence of certain header files
#-----------------------------------------------------------------------------
-CHECK_INCLUDE_FILE_CONCAT ("sys/resource.h" ${HDF_PREFIX}_HAVE_SYS_RESOURCE_H)
-CHECK_INCLUDE_FILE_CONCAT ("sys/time.h" ${HDF_PREFIX}_HAVE_SYS_TIME_H)
-CHECK_INCLUDE_FILE_CONCAT ("unistd.h" ${HDF_PREFIX}_HAVE_UNISTD_H)
+CHECK_INCLUDE_FILE_CONCAT ("sys/file.h" ${HDF_PREFIX}_HAVE_SYS_FILE_H)
CHECK_INCLUDE_FILE_CONCAT ("sys/ioctl.h" ${HDF_PREFIX}_HAVE_SYS_IOCTL_H)
-CHECK_INCLUDE_FILE_CONCAT ("sys/stat.h" ${HDF_PREFIX}_HAVE_SYS_STAT_H)
+CHECK_INCLUDE_FILE_CONCAT ("sys/resource.h" ${HDF_PREFIX}_HAVE_SYS_RESOURCE_H)
CHECK_INCLUDE_FILE_CONCAT ("sys/socket.h" ${HDF_PREFIX}_HAVE_SYS_SOCKET_H)
+CHECK_INCLUDE_FILE_CONCAT ("sys/stat.h" ${HDF_PREFIX}_HAVE_SYS_STAT_H)
+CHECK_INCLUDE_FILE_CONCAT ("sys/time.h" ${HDF_PREFIX}_HAVE_SYS_TIME_H)
CHECK_INCLUDE_FILE_CONCAT ("sys/types.h" ${HDF_PREFIX}_HAVE_SYS_TYPES_H)
-CHECK_INCLUDE_FILE_CONCAT ("stddef.h" ${HDF_PREFIX}_HAVE_STDDEF_H)
-CHECK_INCLUDE_FILE_CONCAT ("setjmp.h" ${HDF_PREFIX}_HAVE_SETJMP_H)
CHECK_INCLUDE_FILE_CONCAT ("features.h" ${HDF_PREFIX}_HAVE_FEATURES_H)
CHECK_INCLUDE_FILE_CONCAT ("dirent.h" ${HDF_PREFIX}_HAVE_DIRENT_H)
+CHECK_INCLUDE_FILE_CONCAT ("setjmp.h" ${HDF_PREFIX}_HAVE_SETJMP_H)
+CHECK_INCLUDE_FILE_CONCAT ("stddef.h" ${HDF_PREFIX}_HAVE_STDDEF_H)
CHECK_INCLUDE_FILE_CONCAT ("stdint.h" ${HDF_PREFIX}_HAVE_STDINT_H)
+CHECK_INCLUDE_FILE_CONCAT ("unistd.h" ${HDF_PREFIX}_HAVE_UNISTD_H)
# IF the c compiler found stdint, check the C++ as well. On some systems this
# file will be found by C but not C++, only do this test IF the C++ compiler
@@ -219,8 +231,8 @@ if (${HDF_PREFIX}_HAVE_STDINT_H AND CMAKE_CXX_COMPILER_LOADED)
if (NOT ${HDF_PREFIX}_HAVE_STDINT_H_CXX)
set (${HDF_PREFIX}_HAVE_STDINT_H "" CACHE INTERNAL "Have includes HAVE_STDINT_H")
set (USE_INCLUDES ${USE_INCLUDES} "stdint.h")
- endif (NOT ${HDF_PREFIX}_HAVE_STDINT_H_CXX)
-endif (${HDF_PREFIX}_HAVE_STDINT_H AND CMAKE_CXX_COMPILER_LOADED)
+ endif ()
+endif ()
# Darwin
CHECK_INCLUDE_FILE_CONCAT ("mach/mach_time.h" ${HDF_PREFIX}_HAVE_MACH_MACH_TIME_H)
@@ -229,16 +241,16 @@ CHECK_INCLUDE_FILE_CONCAT ("mach/mach_time.h" ${HDF_PREFIX}_HAVE_MACH_MACH_TIME_
CHECK_INCLUDE_FILE_CONCAT ("io.h" ${HDF_PREFIX}_HAVE_IO_H)
if (NOT CYGWIN)
CHECK_INCLUDE_FILE_CONCAT ("winsock2.h" ${HDF_PREFIX}_HAVE_WINSOCK2_H)
-endif (NOT CYGWIN)
+endif ()
CHECK_INCLUDE_FILE_CONCAT ("sys/timeb.h" ${HDF_PREFIX}_HAVE_SYS_TIMEB_H)
if (CMAKE_SYSTEM_NAME MATCHES "OSF")
CHECK_INCLUDE_FILE_CONCAT ("sys/sysinfo.h" ${HDF_PREFIX}_HAVE_SYS_SYSINFO_H)
CHECK_INCLUDE_FILE_CONCAT ("sys/proc.h" ${HDF_PREFIX}_HAVE_SYS_PROC_H)
-else (CMAKE_SYSTEM_NAME MATCHES "OSF")
+else ()
set (${HDF_PREFIX}_HAVE_SYS_SYSINFO_H "" CACHE INTERNAL "" FORCE)
set (${HDF_PREFIX}_HAVE_SYS_PROC_H "" CACHE INTERNAL "" FORCE)
-endif (CMAKE_SYSTEM_NAME MATCHES "OSF")
+endif ()
CHECK_INCLUDE_FILE_CONCAT ("globus/common.h" ${HDF_PREFIX}_HAVE_GLOBUS_COMMON_H)
CHECK_INCLUDE_FILE_CONCAT ("pdb.h" ${HDF_PREFIX}_HAVE_PDB_H)
@@ -295,29 +307,29 @@ if (NOT WINDOWS)
# check should be generalized for all POSIX systems as it
# is in the Autotools.
if (TEST_LFS_WORKS_COMPILE)
- if (TEST_LFS_WORKS_RUN MATCHES 0)
+ if (TEST_LFS_WORKS_RUN MATCHES 0)
set (TEST_LFS_WORKS 1 CACHE INTERNAL ${msg})
set (LARGEFILE 1)
set (HDF_EXTRA_FLAGS ${HDF_EXTRA_FLAGS} -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_LARGEFILE_SOURCE)
message (STATUS "${msg}... yes")
- else (TEST_LFS_WORKS_RUN MATCHES 0)
+ else ()
set (TEST_LFS_WORKS "" CACHE INTERNAL ${msg})
message (STATUS "${msg}... no")
file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Test TEST_LFS_WORKS Run failed with the following output and exit code:\n ${OUTPUT}\n"
)
- endif (TEST_LFS_WORKS_RUN MATCHES 0)
- else (TEST_LFS_WORKS_COMPILE )
+ endif ()
+ else ()
set (TEST_LFS_WORKS "" CACHE INTERNAL ${msg})
message (STATUS "${msg}... no")
file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Test TEST_LFS_WORKS Compile failed with the following output:\n ${OUTPUT}\n"
)
- endif (TEST_LFS_WORKS_COMPILE)
- endif (HDF_ENABLE_LARGE_FILE)
+ endif ()
+ endif ()
set (CMAKE_REQUIRED_DEFINITIONS ${CMAKE_REQUIRED_DEFINITIONS} ${HDF_EXTRA_FLAGS})
- endif (NOT ${HDF_PREFIX}_HAVE_SOLARIS AND NOT ${HDF_PREFIX}_HAVE_DARWIN)
-endif (NOT WINDOWS)
+ endif ()
+endif ()
add_definitions (${HDF_EXTRA_FLAGS})
@@ -331,7 +343,7 @@ if (NOT WINDOWS OR MINGW)
CHECK_FUNCTION_EXISTS (fseeko64 ${HDF_PREFIX}_HAVE_FSEEKO64)
CHECK_FUNCTION_EXISTS (ftello64 ${HDF_PREFIX}_HAVE_FTELLO64)
CHECK_FUNCTION_EXISTS (ftruncate64 ${HDF_PREFIX}_HAVE_FTRUNCATE64)
- endif (${HDF_PREFIX}_HAVE_OFF64_T)
+ endif ()
CHECK_FUNCTION_EXISTS (fseeko ${HDF_PREFIX}_HAVE_FSEEKO)
CHECK_FUNCTION_EXISTS (ftello ${HDF_PREFIX}_HAVE_FTELLO)
@@ -340,13 +352,13 @@ if (NOT WINDOWS OR MINGW)
if (HAVE_STAT64_STRUCT)
CHECK_FUNCTION_EXISTS (fstat64 ${HDF_PREFIX}_HAVE_FSTAT64)
CHECK_FUNCTION_EXISTS (stat64 ${HDF_PREFIX}_HAVE_STAT64)
- endif (HAVE_STAT64_STRUCT)
-endif (NOT WINDOWS OR MINGW)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Check the size in bytes of all the int and float types
#-----------------------------------------------------------------------------
-MACRO (HDF_CHECK_TYPE_SIZE type var)
+macro (HDF_CHECK_TYPE_SIZE type var)
set (aType ${type})
set (aVar ${var})
# message (STATUS "Checking size of ${aType} and storing into ${aVar}")
@@ -354,8 +366,8 @@ MACRO (HDF_CHECK_TYPE_SIZE type var)
if (NOT ${aVar})
set (${aVar} 0 CACHE INTERNAL "SizeOf for ${aType}")
# message (STATUS "Size of ${aType} was NOT Found")
- endif (NOT ${aVar})
-ENDMACRO (HDF_CHECK_TYPE_SIZE)
+ endif ()
+endmacro ()
HDF_CHECK_TYPE_SIZE (char ${HDF_PREFIX}_SIZEOF_CHAR)
HDF_CHECK_TYPE_SIZE (short ${HDF_PREFIX}_SIZEOF_SHORT)
@@ -363,12 +375,12 @@ HDF_CHECK_TYPE_SIZE (int ${HDF_PREFIX}_SIZEOF_INT)
HDF_CHECK_TYPE_SIZE (unsigned ${HDF_PREFIX}_SIZEOF_UNSIGNED)
if (NOT APPLE)
HDF_CHECK_TYPE_SIZE (long ${HDF_PREFIX}_SIZEOF_LONG)
-endif (NOT APPLE)
+endif ()
HDF_CHECK_TYPE_SIZE ("long long" ${HDF_PREFIX}_SIZEOF_LONG_LONG)
HDF_CHECK_TYPE_SIZE (__int64 ${HDF_PREFIX}_SIZEOF___INT64)
if (NOT ${HDF_PREFIX}_SIZEOF___INT64)
set (${HDF_PREFIX}_SIZEOF___INT64 0)
-endif (NOT ${HDF_PREFIX}_SIZEOF___INT64)
+endif ()
HDF_CHECK_TYPE_SIZE (float ${HDF_PREFIX}_SIZEOF_FLOAT)
HDF_CHECK_TYPE_SIZE (double ${HDF_PREFIX}_SIZEOF_DOUBLE)
@@ -407,17 +419,17 @@ if (NOT APPLE)
HDF_CHECK_TYPE_SIZE (ssize_t ${HDF_PREFIX}_SIZEOF_SSIZE_T)
if (NOT ${HDF_PREFIX}_SIZEOF_SSIZE_T)
set (${HDF_PREFIX}_SIZEOF_SSIZE_T 0)
- endif (NOT ${HDF_PREFIX}_SIZEOF_SSIZE_T)
+ endif ()
if (NOT WINDOWS)
HDF_CHECK_TYPE_SIZE (ptrdiff_t ${HDF_PREFIX}_SIZEOF_PTRDIFF_T)
- endif (NOT WINDOWS)
-endif (NOT APPLE)
+ endif ()
+endif ()
HDF_CHECK_TYPE_SIZE (off_t ${HDF_PREFIX}_SIZEOF_OFF_T)
HDF_CHECK_TYPE_SIZE (off64_t ${HDF_PREFIX}_SIZEOF_OFF64_T)
if (NOT ${HDF_PREFIX}_SIZEOF_OFF64_T)
set (${HDF_PREFIX}_SIZEOF_OFF64_T 0)
-endif (NOT ${HDF_PREFIX}_SIZEOF_OFF64_T)
+endif ()
#-----------------------------------------------------------------------------
# Extra C99 types
@@ -428,9 +440,9 @@ CHECK_INCLUDE_FILE_CONCAT (stdbool.h ${HDF_PREFIX}_HAVE_STDBOOL_H)
if (HAVE_STDBOOL_H)
set (CMAKE_EXTRA_INCLUDE_FILES stdbool.h)
HDF_CHECK_TYPE_SIZE (bool ${HDF_PREFIX}_SIZEOF_BOOL)
-else (HAVE_STDBOOL_H)
+else ()
HDF_CHECK_TYPE_SIZE (_Bool ${HDF_PREFIX}_SIZEOF_BOOL)
-endif (HAVE_STDBOOL_H)
+endif ()
if (NOT WINDOWS)
#-----------------------------------------------------------------------------
@@ -459,11 +471,11 @@ if (NOT WINDOWS)
HAVE_STRUCT_TM_TM_ZONE
)
HDF_FUNCTION_TEST (${test})
- endforeach (test)
+ endforeach ()
if (NOT CYGWIN AND NOT MINGW)
HDF_FUNCTION_TEST (HAVE_TIMEZONE)
# HDF_FUNCTION_TEST (HAVE_STAT_ST_BLOCKS)
- endif (NOT CYGWIN AND NOT MINGW)
+ endif ()
# ----------------------------------------------------------------------
# Does the struct stat have the st_blocks field? This field is not Posix.
@@ -481,7 +493,7 @@ if (NOT WINDOWS)
CHECK_FUNCTION_EXISTS (_scrsize ${HDF_PREFIX}_HAVE__SCRSIZE)
if (NOT CYGWIN AND NOT MINGW)
CHECK_FUNCTION_EXISTS (GetConsoleScreenBufferInfo ${HDF_PREFIX}_HAVE_GETCONSOLESCREENBUFFERINFO)
- endif (NOT CYGWIN AND NOT MINGW)
+ endif ()
CHECK_SYMBOL_EXISTS (TIOCGWINSZ "sys/ioctl.h" ${HDF_PREFIX}_HAVE_TIOCGWINSZ)
CHECK_SYMBOL_EXISTS (TIOCGETD "sys/ioctl.h" ${HDF_PREFIX}_HAVE_TIOCGETD)
@@ -490,8 +502,8 @@ if (NOT WINDOWS)
#
if (NOT CYGWIN AND NOT MINGW)
CHECK_FUNCTION_EXISTS (getpwuid ${HDF_PREFIX}_HAVE_GETPWUID)
- endif (NOT CYGWIN AND NOT MINGW)
-endif (NOT WINDOWS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Check for some functions that are used
@@ -527,6 +539,8 @@ CHECK_FUNCTION_EXISTS (sigprocmask ${HDF_PREFIX}_HAVE_SIGPROCMASK)
CHECK_FUNCTION_EXISTS (snprintf ${HDF_PREFIX}_HAVE_SNPRINTF)
CHECK_FUNCTION_EXISTS (srandom ${HDF_PREFIX}_HAVE_SRANDOM)
CHECK_FUNCTION_EXISTS (strdup ${HDF_PREFIX}_HAVE_STRDUP)
+CHECK_FUNCTION_EXISTS (strtoll ${HDF_PREFIX}_HAVE_STRTOLL)
+CHECK_FUNCTION_EXISTS (strtoull ${HDF_PREFIX}_HAVE_STRTOULL)
CHECK_FUNCTION_EXISTS (symlink ${HDF_PREFIX}_HAVE_SYMLINK)
CHECK_FUNCTION_EXISTS (system ${HDF_PREFIX}_HAVE_SYSTEM)
@@ -539,8 +553,8 @@ CHECK_FUNCTION_EXISTS (vsnprintf ${HDF_PREFIX}_HAVE_VSNPRINTF)
if (NOT WINDOWS)
if (${HDF_PREFIX}_HAVE_VSNPRINTF)
HDF_FUNCTION_TEST (VSNPRINTF_WORKS)
- endif (${HDF_PREFIX}_HAVE_VSNPRINTF)
-endif (NOT WINDOWS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# sigsetjmp is special; may actually be a macro
@@ -550,9 +564,9 @@ if (NOT ${HDF_PREFIX}_HAVE_SIGSETJMP)
CHECK_SYMBOL_EXISTS (sigsetjmp "setjmp.h" ${HDF_PREFIX}_HAVE_MACRO_SIGSETJMP)
if (${HDF_PREFIX}_HAVE_MACRO_SIGSETJMP)
set (${HDF_PREFIX}_HAVE_SIGSETJMP 1)
- endif (${HDF_PREFIX}_HAVE_MACRO_SIGSETJMP)
- endif (${HDF_PREFIX}_HAVE_SETJMP_H)
-endif (NOT ${HDF_PREFIX}_HAVE_SIGSETJMP)
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Check a bunch of other functions
@@ -568,34 +582,35 @@ if (NOT WINDOWS)
HAVE_SOCKLEN_T
)
HDF_FUNCTION_TEST (${test})
- endforeach (test)
-endif (NOT WINDOWS)
+ endforeach ()
+endif ()
# For other CXX specific tests, use this MACRO.
-MACRO (HDF_CXX_FUNCTION_TEST OTHER_TEST)
+macro (HDF_CXX_FUNCTION_TEST OTHER_TEST)
if ("${OTHER_TEST}" MATCHES "^${OTHER_TEST}$")
set (MACRO_CHECK_FUNCTION_DEFINITIONS "-D${OTHER_TEST} ${CMAKE_REQUIRED_FLAGS}")
set (OTHER_TEST_ADD_LIBRARIES)
if (CMAKE_REQUIRED_LIBRARIES)
set (OTHER_TEST_ADD_LIBRARIES "-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
- endif (CMAKE_REQUIRED_LIBRARIES)
+ endif ()
foreach (def
HAVE_SYS_TIME_H
HAVE_UNISTD_H
HAVE_SYS_TYPES_H
HAVE_SYS_SOCKET_H
+ HAVE_SYS_FILE_H
)
if ("${${HDF_PREFIX}_${def}}")
set (MACRO_CHECK_FUNCTION_DEFINITIONS "${MACRO_CHECK_FUNCTION_DEFINITIONS} -D${def}")
- endif ("${${HDF_PREFIX}_${def}}")
- endforeach (def)
+ endif ()
+ endforeach ()
if (LARGEFILE)
set (MACRO_CHECK_FUNCTION_DEFINITIONS
"${MACRO_CHECK_FUNCTION_DEFINITIONS} -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_LARGEFILE_SOURCE"
)
- endif (LARGEFILE)
+ endif ()
#message (STATUS "Performing ${OTHER_TEST}")
TRY_COMPILE (${OTHER_TEST}
@@ -608,16 +623,16 @@ MACRO (HDF_CXX_FUNCTION_TEST OTHER_TEST)
if (${OTHER_TEST} EQUAL 0)
set (${OTHER_TEST} 1 CACHE INTERNAL "CXX test ${FUNCTION}")
message (STATUS "Performing CXX Test ${OTHER_TEST} - Success")
- else (${OTHER_TEST} EQUAL 0)
+ else ()
message (STATUS "Performing CXX Test ${OTHER_TEST} - Failed")
set (${OTHER_TEST} "" CACHE INTERNAL "CXX test ${FUNCTION}")
file (APPEND ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log
"Performing CXX Test ${OTHER_TEST} failed with the following output:\n"
"${OUTPUT}\n"
)
- endif (${OTHER_TEST} EQUAL 0)
- endif ("${OTHER_TEST}" MATCHES "^${OTHER_TEST}$")
-ENDMACRO (HDF_CXX_FUNCTION_TEST)
+ endif ()
+ endif ()
+endmacro ()
#-----------------------------------------------------------------------------
# Check a bunch of cxx functions
@@ -632,8 +647,8 @@ if (CMAKE_CXX_COMPILER_LOADED)
CXX_HAVE_OFFSETOF
)
HDF_CXX_FUNCTION_TEST (${test})
- endforeach (test)
-endif (CMAKE_CXX_COMPILER_LOADED)
+ endforeach ()
+endif ()
#-----------------------------------------------------------------------------
# Check if InitOnceExecuteOnce is available
@@ -646,21 +661,21 @@ if (WINDOWS)
set (CMAKE_REQUIRED_DEFINITIONS
"${CURRENT_TEST_DEFINITIONS} -D_FILE_OFFSET_BITS=64 -D_LARGEFILE64_SOURCE -D_LARGEFILE_SOURCE"
)
- endif (LARGEFILE)
+ endif ()
set (MACRO_CHECK_FUNCTION_DEFINITIONS
"-DHAVE_IOEO ${CMAKE_REQUIRED_FLAGS}")
if (CMAKE_REQUIRED_LIBRARIES)
set (CHECK_C_SOURCE_COMPILES_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
- else (CMAKE_REQUIRED_LIBRARIES)
+ else ()
set (CHECK_C_SOURCE_COMPILES_ADD_LIBRARIES)
- endif (CMAKE_REQUIRED_LIBRARIES)
+ endif ()
if (CMAKE_REQUIRED_INCLUDES)
set (CHECK_C_SOURCE_COMPILES_ADD_INCLUDES
"-DINCLUDE_DIRECTORIES:STRING=${CMAKE_REQUIRED_INCLUDES}")
- else (CMAKE_REQUIRED_INCLUDES)
+ else ()
set (CHECK_C_SOURCE_COMPILES_ADD_INCLUDES)
- endif (CMAKE_REQUIRED_INCLUDES)
+ endif ()
TRY_RUN(HAVE_IOEO_EXITCODE HAVE_IOEO_COMPILED
${CMAKE_BINARY_DIR}
@@ -674,7 +689,7 @@ if (WINDOWS)
# if it did not compile make the return value fail code of 1
if (NOT HAVE_IOEO_COMPILED)
set (HAVE_IOEO_EXITCODE 1)
- endif (NOT HAVE_IOEO_COMPILED)
+ endif ()
# if the return value was 0 then it worked
if ("${HAVE_IOEO_EXITCODE}" EQUAL 0)
set (${HDF_PREFIX}_HAVE_IOEO 1 CACHE INTERNAL "Test InitOnceExecuteOnce")
@@ -683,22 +698,22 @@ if (WINDOWS)
"Performing C SOURCE FILE Test InitOnceExecuteOnce succeded with the following output:\n"
"${OUTPUT}\n"
"Return value: ${HAVE_IOEO}\n")
- else ("${HAVE_IOEO_EXITCODE}" EQUAL 0)
+ else ()
if (CMAKE_CROSSCOMPILING AND "${HAVE_IOEO_EXITCODE}" MATCHES "FAILED_TO_RUN")
set (${HDF_PREFIX}_HAVE_IOEO "${HAVE_IOEO_EXITCODE}")
- else (CMAKE_CROSSCOMPILING AND "${HAVE_IOEO_EXITCODE}" MATCHES "FAILED_TO_RUN")
+ else ()
set (${HDF_PREFIX}_HAVE_IOEO "" CACHE INTERNAL "Test InitOnceExecuteOnce")
- endif (CMAKE_CROSSCOMPILING AND "${HAVE_IOEO_EXITCODE}" MATCHES "FAILED_TO_RUN")
+ endif ()
message (STATUS "Performing Test InitOnceExecuteOnce - Failed")
file (APPEND ${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeError.log
"Performing InitOnceExecuteOnce Test failed with the following output:\n"
"${OUTPUT}\n"
"Return value: ${HAVE_IOEO_EXITCODE}\n")
- endif ("${HAVE_IOEO_EXITCODE}" EQUAL 0)
- endif ("${${HDF_PREFIX}_HAVE_IOEO}" MATCHES "^${${HDF_PREFIX}_HAVE_IOEO}$")
- endif (NOT HDF_NO_IOEO_TEST)
-endif (WINDOWS)
+ endif ()
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Determine how 'inline' is used
@@ -706,7 +721,7 @@ endif (WINDOWS)
foreach (inline_test inline __inline__ __inline)
string (TOUPPER ${inline_test} INLINE_TEST_MACRO)
HDF_FUNCTION_TEST (HAVE_${INLINE_TEST_MACRO})
-endforeach (inline_test)
+endforeach ()
#-----------------------------------------------------------------------------
# Check how to print a Long Long integer
@@ -717,7 +732,7 @@ if (NOT ${HDF_PREFIX}_PRINTF_LL_WIDTH OR ${HDF_PREFIX}_PRINTF_LL_WIDTH MATCHES "
set (CURRENT_TEST_DEFINITIONS "-DPRINTF_LL_WIDTH")
if (${HDF_PREFIX}_SIZEOF_LONG_LONG)
set (CURRENT_TEST_DEFINITIONS "${CURRENT_TEST_DEFINITIONS} -DHAVE_LONG_LONG")
- endif (${HDF_PREFIX}_SIZEOF_LONG_LONG)
+ endif ()
TRY_RUN (${HDF_PREFIX}_PRINTF_LL_TEST_RUN ${HDF_PREFIX}_PRINTF_LL_TEST_COMPILE
${CMAKE_BINARY_DIR}
${HDF_RESOURCES_EXT_DIR}/HDFTests.c
@@ -729,24 +744,24 @@ if (NOT ${HDF_PREFIX}_PRINTF_LL_WIDTH OR ${HDF_PREFIX}_PRINTF_LL_WIDTH MATCHES "
string(REGEX REPLACE ".*PRINTF_LL_WIDTH=\\[(.*)\\].*" "\\1" ${HDF_PREFIX}_PRINTF_LL "${OUTPUT}")
set (${HDF_PREFIX}_PRINTF_LL_WIDTH "\"${${HDF_PREFIX}_PRINTF_LL}\"" CACHE INTERNAL "Width for printf for type `long long' or `__int64', us. `ll")
set (PRINT_LL_FOUND 1)
- else (${HDF_PREFIX}_PRINTF_LL_TEST_RUN MATCHES 0)
+ else ()
message ("Width test failed with result: ${${HDF_PREFIX}_PRINTF_LL_TEST_RUN}")
- endif (${HDF_PREFIX}_PRINTF_LL_TEST_RUN MATCHES 0)
- else (${HDF_PREFIX}_PRINTF_LL_TEST_COMPILE)
+ endif ()
+ else ()
file (APPEND ${CMAKE_BINARY_DIR}/CMakeFiles/CMakeError.log
"Test ${HDF_PREFIX}_PRINTF_LL_WIDTH failed with the following output:\n ${OUTPUT}\n"
)
- endif (${HDF_PREFIX}_PRINTF_LL_TEST_COMPILE)
+ endif ()
if (PRINT_LL_FOUND)
message (STATUS "Checking for appropriate format for 64 bit long: found ${${HDF_PREFIX}_PRINTF_LL_WIDTH}")
- else (PRINT_LL_FOUND)
+ else ()
message (STATUS "Checking for appropriate format for 64 bit long: not found")
set (${HDF_PREFIX}_PRINTF_LL_WIDTH "\"unknown\"" CACHE INTERNAL
"Width for printf for type `long long' or `__int64', us. `ll"
)
- endif (PRINT_LL_FOUND)
-endif (NOT ${HDF_PREFIX}_PRINTF_LL_WIDTH OR ${HDF_PREFIX}_PRINTF_LL_WIDTH MATCHES "unknown")
+ endif ()
+endif ()
# ----------------------------------------------------------------------
# Set the flag to indicate that the machine can handle converting
diff --git a/config/cmake_ext_mod/FindMPI.cmake b/config/cmake_ext_mod/FindMPI.cmake
index 1a02f82..ff1ead2 100644
--- a/config/cmake_ext_mod/FindMPI.cmake
+++ b/config/cmake_ext_mod/FindMPI.cmake
@@ -114,9 +114,10 @@ include(GetPrerequisites)
#
# Start out with the generic MPI compiler names, as these are most commonly used.
-set(_MPI_C_COMPILER_NAMES mpicc mpcc mpicc_r mpcc_r)
+set(_MPI_C_COMPILER_NAMES mpicc mpcc mpicc_r mpcc_r mpicc.bat)
set(_MPI_CXX_COMPILER_NAMES mpicxx mpiCC mpcxx mpCC mpic++ mpc++
- mpicxx_r mpiCC_r mpcxx_r mpCC_r mpic++_r mpc++_r)
+ mpicxx_r mpiCC_r mpcxx_r mpCC_r mpic++_r mpc++_r
+ mpicxx.bat)
set(_MPI_Fortran_COMPILER_NAMES mpif95 mpif95_r mpf95 mpf95_r
mpif90 mpif90_r mpf90 mpf90_r
mpif77 mpif77_r mpf77 mpf77_r)
@@ -128,9 +129,9 @@ set(_MPI_GNU_Fortran_COMPILER_NAMES mpigfortran mpgfortran mpigfortran_r
mpig77 mpig77_r mpg77 mpg77_r)
# Intel MPI compiler names
-set(_MPI_Intel_C_COMPILER_NAMES mpiicc)
-set(_MPI_Intel_CXX_COMPILER_NAMES mpiicpc mpiicxx mpiic++ mpiiCC)
-set(_MPI_Intel_Fortran_COMPILER_NAMES mpiifort mpiif95 mpiif90 mpiif77)
+set(_MPI_Intel_C_COMPILER_NAMES mpiicc mpiicc.bat)
+set(_MPI_Intel_CXX_COMPILER_NAMES mpiicpc mpiicxx mpiic++ mpiiCC mpiicpc.bat)
+set(_MPI_Intel_Fortran_COMPILER_NAMES mpiifort mpiif95 mpiif90 mpiif77 mpiifort.bat)
# PGI compiler names
set(_MPI_PGI_C_COMPILER_NAMES mpipgcc mppgcc)
@@ -314,8 +315,9 @@ function (interrogate_mpi_compiler lang try_libs)
set(MPI_COMPILE_FLAGS_WORK)
foreach(FLAG ${MPI_ALL_COMPILE_FLAGS})
+ string(REGEX REPLACE "^ " "" FLAG ${FLAG})
if (MPI_COMPILE_FLAGS_WORK)
- string(APPEND MPI_COMPILE_FLAGS_WORK " ${FLAG}")
+ set(MPI_COMPILE_FLAGS_WORK "${MPI_COMPILE_FLAGS_WORK} ${FLAG}")
else()
set(MPI_COMPILE_FLAGS_WORK ${FLAG})
endif()
@@ -323,9 +325,13 @@ function (interrogate_mpi_compiler lang try_libs)
# Extract include paths from compile command line
string(REGEX MATCHALL "(^| )-I([^\" ]+|\"[^\"]+\")" MPI_ALL_INCLUDE_PATHS "${MPI_COMPILE_CMDLINE}")
+ set(MPI_INCLUDE_PATH_WORK)
+
foreach(IPATH ${MPI_ALL_INCLUDE_PATHS})
string(REGEX REPLACE "^ ?-I" "" IPATH ${IPATH})
string(REPLACE "//" "/" IPATH ${IPATH})
+ string(REPLACE "\"" "" IPATH ${IPATH})
+ file(TO_CMAKE_PATH "${IPATH}" IPATH)
list(APPEND MPI_INCLUDE_PATH_WORK ${IPATH})
endforeach()
@@ -363,8 +369,9 @@ function (interrogate_mpi_compiler lang try_libs)
string(REGEX MATCHALL "(^| )(-Wl,|-Xlinker )([^\" ]+|\"[^\"]+\")" MPI_ALL_LINK_FLAGS "${MPI_LINK_CMDLINE}")
set(MPI_LINK_FLAGS_WORK)
foreach(FLAG ${MPI_ALL_LINK_FLAGS})
+ string(REGEX REPLACE "^ " "" FLAG ${FLAG})
if (MPI_LINK_FLAGS_WORK)
- string(APPEND MPI_LINK_FLAGS_WORK " ${FLAG}")
+ set(MPI_LINK_FLAGS_WORK "${MPI_LINK_FLAGS_WORK} ${FLAG}")
else()
set(MPI_LINK_FLAGS_WORK ${FLAG})
endif()
@@ -386,8 +393,7 @@ function (interrogate_mpi_compiler lang try_libs)
# in the showme list that can only be found in the implicit
# link directories of the compiler.
if (DEFINED CMAKE_${lang}_IMPLICIT_LINK_DIRECTORIES)
- string(APPEND MPI_LINK_PATH
- ";${CMAKE_${lang}_IMPLICIT_LINK_DIRECTORIES}")
+ set(MPI_LINK_PATH "${MPI_LINK_PATH};${CMAKE_${lang}_IMPLICIT_LINK_DIRECTORIES}")
endif ()
# Determine full path names for all of the libraries that one needs
@@ -462,11 +468,11 @@ function (interrogate_mpi_compiler lang try_libs)
set(MPI_HEADER_PATH "MPI_HEADER_PATH-NOTFOUND" CACHE FILEPATH "Cleared" FORCE)
find_path(MPI_HEADER_PATH mpifptr.h
HINTS ${_MPI_BASE_DIR} ${_MPI_PREFIX_PATH}
- PATH_SUFFIXES include include/${MS_MPI_ARCH_DIR} include/${MS_MPI_ARCH_DIR2} Inc Inc/${MS_MPI_ARCH_DIR} Inc/${MS_MPI_ARCH_DIR2})
+ PATH_SUFFIXES include Include include/${MS_MPI_ARCH_DIR} Include/${MS_MPI_ARCH_DIR2} Include/${MS_MPI_ARCH_DIR} include/${MS_MPI_ARCH_DIR2} Inc Inc/${MS_MPI_ARCH_DIR} Inc/${MS_MPI_ARCH_DIR2})
if (MPI_INCLUDE_PATH_WORK AND MPI_HEADER_PATH)
list(APPEND MPI_INCLUDE_PATH_WORK ${MPI_HEADER_PATH})
- endif()
-
+ endif ()
+
set(MPI_LIB "MPI_LIB-NOTFOUND" CACHE FILEPATH "Cleared" FORCE)
find_library(MPI_LIB
NAMES fmpi fmpich fmpich2 fmpich2g msmpifec msmpifmc
@@ -474,8 +480,8 @@ function (interrogate_mpi_compiler lang try_libs)
PATH_SUFFIXES lib lib/${MS_MPI_ARCH_DIR} Lib Lib/${MS_MPI_ARCH_DIR})
if (MPI_LIBRARIES_WORK AND MPI_LIB)
list(APPEND MPI_LIBRARIES_WORK ${MPI_LIB})
- endif()
- endif()
+ endif ()
+ endif ()
if (NOT MPI_LIBRARIES_WORK)
set(MPI_LIBRARIES_WORK "MPI_${lang}_LIBRARIES-NOTFOUND")
@@ -624,6 +630,9 @@ foreach (lang C CXX Fortran)
try_regular_compiler(${lang} regular_compiler_worked)
endif()
+ # add fortran mpi module path if ENV VAR exists
+ set (MPI_${lang}_INCLUDE_PATH "${MPI_${lang}_INCLUDE_PATH};$ENV{MPI_FORTRAN_MOD_DIR}")
+
set(MPI_${lang}_FIND_QUIETLY ${MPI_FIND_QUIETLY})
set(MPI_${lang}_FIND_REQUIRED ${MPI_FIND_REQUIRED})
set(MPI_${lang}_FIND_VERSION ${MPI_FIND_VERSION})
diff --git a/config/cmake_ext_mod/FindSZIP.cmake b/config/cmake_ext_mod/FindSZIP.cmake
index 5f0f031..699be85 100644
--- a/config/cmake_ext_mod/FindSZIP.cmake
+++ b/config/cmake_ext_mod/FindSZIP.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# - Find SZIP library
# - Derived from the FindTiff.cmake that is included with cmake
@@ -24,7 +35,7 @@
# made to remove references to Qt and make this file more generally applicable
#########################################################################
-MACRO (SZIP_ADJUST_LIB_VARS basename)
+macro (SZIP_ADJUST_LIB_VARS basename)
if (${basename}_INCLUDE_DIR)
# if only the release version was found, set the debug variable also to the release version
@@ -32,38 +43,37 @@ MACRO (SZIP_ADJUST_LIB_VARS basename)
set (${basename}_LIBRARY_DEBUG ${${basename}_LIBRARY_RELEASE})
set (${basename}_LIBRARY ${${basename}_LIBRARY_RELEASE})
set (${basename}_LIBRARIES ${${basename}_LIBRARY_RELEASE})
- endif (${basename}_LIBRARY_RELEASE AND NOT ${basename}_LIBRARY_DEBUG)
+ endif ()
# if only the debug version was found, set the release variable also to the debug version
if (${basename}_LIBRARY_DEBUG AND NOT ${basename}_LIBRARY_RELEASE)
set (${basename}_LIBRARY_RELEASE ${${basename}_LIBRARY_DEBUG})
set (${basename}_LIBRARY ${${basename}_LIBRARY_DEBUG})
set (${basename}_LIBRARIES ${${basename}_LIBRARY_DEBUG})
- endif (${basename}_LIBRARY_DEBUG AND NOT ${basename}_LIBRARY_RELEASE)
+ endif ()
if (${basename}_LIBRARY_DEBUG AND ${basename}_LIBRARY_RELEASE)
# if the generator supports configuration types then set
# optimized and debug libraries, or if the CMAKE_BUILD_TYPE has a value
if (CMAKE_CONFIGURATION_TYPES OR CMAKE_BUILD_TYPE)
set (${basename}_LIBRARY optimized ${${basename}_LIBRARY_RELEASE} debug ${${basename}_LIBRARY_DEBUG})
- else (CMAKE_CONFIGURATION_TYPES OR CMAKE_BUILD_TYPE)
+ else ()
# if there are no configuration types and CMAKE_BUILD_TYPE has no value
# then just use the release libraries
set (${basename}_LIBRARY ${${basename}_LIBRARY_RELEASE} )
- endif (CMAKE_CONFIGURATION_TYPES OR CMAKE_BUILD_TYPE)
+ endif ()
set (${basename}_LIBRARIES optimized ${${basename}_LIBRARY_RELEASE} debug ${${basename}_LIBRARY_DEBUG})
- endif (${basename}_LIBRARY_DEBUG AND ${basename}_LIBRARY_RELEASE)
+ endif ()
set (${basename}_LIBRARY ${${basename}_LIBRARY} CACHE FILEPATH "The ${basename} library")
if (${basename}_LIBRARY)
set (${basename}_FOUND 1)
- endif (${basename}_LIBRARY)
-
- endif (${basename}_INCLUDE_DIR )
+ endif ()
+ endif ()
# Make variables changeble to the advanced user
MARK_AS_ADVANCED (${basename}_LIBRARY ${basename}_LIBRARY_RELEASE ${basename}_LIBRARY_DEBUG ${basename}_INCLUDE_DIR )
-ENDMACRO (SZIP_ADJUST_LIB_VARS)
+endmacro ()
# Look for the header file.
@@ -93,10 +103,10 @@ FIND_PATH (SZIP_INCLUDE_DIR
if (WIN32)
set (SZIP_SEARCH_DEBUG_NAMES "sz_d;libsz_d")
set (SZIP_SEARCH_RELEASE_NAMES "sz;libsz;libszip")
-else (WIN32)
+else ()
set (SZIP_SEARCH_DEBUG_NAMES "sz_d")
set (SZIP_SEARCH_RELEASE_NAMES "sz;szip")
-endif (WIN32)
+endif ()
# Look for the library.
FIND_LIBRARY (SZIP_LIBRARY_DEBUG
@@ -120,16 +130,15 @@ if (SZIP_INCLUDE_DIR AND SZIP_LIBRARY)
if (SZIP_LIBRARY_DEBUG)
get_filename_component (SZIP_LIBRARY_PATH ${SZIP_LIBRARY_DEBUG} PATH)
set (SZIP_LIB_DIR ${SZIP_LIBRARY_PATH})
- elseif (SZIP_LIBRARY_RELEASE)
+ elseif ()
get_filename_component (SZIP_LIBRARY_PATH ${SZIP_LIBRARY_RELEASE} PATH)
set (SZIP_LIB_DIR ${SZIP_LIBRARY_PATH})
- endif (SZIP_LIBRARY_DEBUG)
-
-else (SZIP_INCLUDE_DIR AND SZIP_LIBRARY)
+ endif ()
+else ()
set (SZIP_FOUND 0)
set (SZIP_LIBRARIES)
set (SZIP_INCLUDE_DIRS)
-endif (SZIP_INCLUDE_DIR AND SZIP_LIBRARY)
+endif ()
# Report the results.
if (NOT SZIP_FOUND)
@@ -138,12 +147,12 @@ if (NOT SZIP_FOUND)
)
if (NOT SZIP_FIND_QUIETLY)
message (STATUS "${SZIP_DIR_MESSAGE}")
- else (NOT SZIP_FIND_QUIETLY)
+ else ()
if (SZIP_FIND_REQUIRED)
message (FATAL_ERROR "SZip was NOT found and is Required by this project")
- endif (SZIP_FIND_REQUIRED)
- endif (NOT SZIP_FIND_QUIETLY)
-endif (NOT SZIP_FOUND)
+ endif ()
+ endif ()
+endif ()
if (SZIP_FOUND)
include (CheckSymbolExists)
@@ -161,7 +170,7 @@ if (SZIP_FOUND)
set (CMAKE_REQUIRED_FLAGS ${CMAKE_REQUIRED_FLAGS_SAVE})
#
#############################################
-endif (SZIP_FOUND)
+endif ()
if (FIND_SZIP_DEBUG)
message (STATUS "SZIP_INCLUDE_DIR: ${SZIP_INCLUDE_DIR}")
@@ -169,4 +178,4 @@ if (FIND_SZIP_DEBUG)
message (STATUS "SZIP_LIBRARY_DEBUG: ${SZIP_LIBRARY_DEBUG}")
message (STATUS "SZIP_LIBRARY_RELEASE: ${SZIP_LIBRARY_RELEASE}")
message (STATUS "CMAKE_BUILD_TYPE: ${CMAKE_BUILD_TYPE}")
-endif (FIND_SZIP_DEBUG)
+endif ()
diff --git a/config/cmake_ext_mod/GetTimeOfDayTest.cpp b/config/cmake_ext_mod/GetTimeOfDayTest.cpp
index 3b5bf60..5fd7c04 100644
--- a/config/cmake_ext_mod/GetTimeOfDayTest.cpp
+++ b/config/cmake_ext_mod/GetTimeOfDayTest.cpp
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#if defined (TRY_SYS_TIME_H)
#include <sys/time.h>
/* #include <time.h> */
diff --git a/config/cmake_ext_mod/HDFCXXTests.cpp b/config/cmake_ext_mod/HDFCXXTests.cpp
index f5f6644..1b98092 100644
--- a/config/cmake_ext_mod/HDFCXXTests.cpp
+++ b/config/cmake_ext_mod/HDFCXXTests.cpp
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifdef OLD_HEADER_FILENAME
diff --git a/config/cmake_ext_mod/HDFLibMacros.cmake b/config/cmake_ext_mod/HDFLibMacros.cmake
index 2145a3d..f2e03d7 100644
--- a/config/cmake_ext_mod/HDFLibMacros.cmake
+++ b/config/cmake_ext_mod/HDFLibMacros.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#-------------------------------------------------------------------------------
macro (EXTERNAL_JPEG_LIBRARY compress_type jpeg_pic)
# May need to build JPEG with PIC on x64 machines with gcc
@@ -17,6 +28,7 @@ macro (EXTERNAL_JPEG_LIBRARY compress_type jpeg_pic)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${jpeg_pic}
)
elseif (${compress_type} MATCHES "GIT")
@@ -33,6 +45,7 @@ macro (EXTERNAL_JPEG_LIBRARY compress_type jpeg_pic)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${jpeg_pic}
)
elseif (${compress_type} MATCHES "TGZ")
@@ -49,9 +62,10 @@ macro (EXTERNAL_JPEG_LIBRARY compress_type jpeg_pic)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${jpeg_pic}
)
- endif (${compress_type} MATCHES "SVN")
+ endif ()
externalproject_get_property (JPEG BINARY_DIR SOURCE_DIR)
##include (${BINARY_DIR}/${JPEG_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake)
@@ -68,13 +82,13 @@ macro (EXTERNAL_JPEG_LIBRARY compress_type jpeg_pic)
add_dependencies (JPEG jpeg-shared)
set (JPEG_SHARED_LIBRARY "jpeg-shared")
set (JPEG_LIBRARIES ${JPEG_LIBRARIES} ${JPEG_shared_LIBRARY})
- endif (BUILD_SHARED_LIBS)
+ endif ()
set (JPEG_INCLUDE_DIR_GEN "${BINARY_DIR}")
set (JPEG_INCLUDE_DIR "${SOURCE_DIR}/src")
set (JPEG_FOUND 1)
set (JPEG_INCLUDE_DIRS ${JPEG_INCLUDE_DIR_GEN} ${JPEG_INCLUDE_DIR})
-endmacro (EXTERNAL_JPEG_LIBRARY)
+endmacro ()
#-------------------------------------------------------------------------------
macro (PACKAGE_JPEG_LIBRARY compress_type)
@@ -85,8 +99,8 @@ macro (PACKAGE_JPEG_LIBRARY compress_type)
set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/jconfig.h)
if (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "SVN" OR ${compress_type} MATCHES "TGZ")
add_dependencies (JPEG-GenHeader-Copy JPEG)
- endif (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "SVN" OR ${compress_type} MATCHES "TGZ")
-endmacro (PACKAGE_JPEG_LIBRARY)
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (EXTERNAL_SZIP_LIBRARY compress_type encoding)
@@ -104,6 +118,7 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS}
-DSZIP_ENABLE_ENCODING:BOOL=${encoding}
)
@@ -121,6 +136,7 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS}
-DSZIP_ENABLE_ENCODING:BOOL=${encoding}
)
@@ -138,10 +154,11 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS}
-DSZIP_ENABLE_ENCODING:BOOL=${encoding}
)
- endif (${compress_type} MATCHES "SVN")
+ endif ()
externalproject_get_property (SZIP BINARY_DIR SOURCE_DIR)
##include (${BINARY_DIR}/${SZIP_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake)
@@ -158,13 +175,13 @@ macro (EXTERNAL_SZIP_LIBRARY compress_type encoding)
add_dependencies (SZIP szip-shared)
set (SZIP_SHARED_LIBRARY "szip-shared")
set (SZIP_LIBRARIES ${SZIP_LIBRARIES} ${SZIP_shared_LIBRARY})
- endif (BUILD_SHARED_LIBS)
+ endif ()
set (SZIP_INCLUDE_DIR_GEN "${BINARY_DIR}")
set (SZIP_INCLUDE_DIR "${SOURCE_DIR}/src")
set (SZIP_FOUND 1)
set (SZIP_INCLUDE_DIRS ${SZIP_INCLUDE_DIR_GEN} ${SZIP_INCLUDE_DIR})
-endmacro (EXTERNAL_SZIP_LIBRARY)
+endmacro ()
#-------------------------------------------------------------------------------
macro (PACKAGE_SZIP_LIBRARY compress_type)
@@ -175,8 +192,8 @@ macro (PACKAGE_SZIP_LIBRARY compress_type)
set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/SZconfig.h)
if (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "SVN" OR ${compress_type} MATCHES "TGZ")
add_dependencies (SZIP-GenHeader-Copy SZIP)
- endif (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "SVN" OR ${compress_type} MATCHES "TGZ")
-endmacro (PACKAGE_SZIP_LIBRARY)
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (EXTERNAL_ZLIB_LIBRARY compress_type)
@@ -194,6 +211,7 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS}
)
elseif (${compress_type} MATCHES "GIT")
@@ -210,6 +228,7 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS}
)
elseif (${compress_type} MATCHES "TGZ")
@@ -226,16 +245,17 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type)
-DCMAKE_RUNTIME_OUTPUT_DIRECTORY:PATH=${CMAKE_RUNTIME_OUTPUT_DIRECTORY}
-DCMAKE_LIBRARY_OUTPUT_DIRECTORY:PATH=${CMAKE_LIBRARY_OUTPUT_DIRECTORY}
-DCMAKE_ARCHIVE_OUTPUT_DIRECTORY:PATH=${CMAKE_ARCHIVE_OUTPUT_DIRECTORY}
+ -DCMAKE_PDB_OUTPUT_DIRECTORY:PATH=${CMAKE_PDB_OUTPUT_DIRECTORY}
-DCMAKE_ANSI_CFLAGS:STRING=${CMAKE_ANSI_CFLAGS}
)
- endif (${compress_type} MATCHES "SVN")
+ endif ()
externalproject_get_property (ZLIB BINARY_DIR SOURCE_DIR)
if (WIN32)
set (ZLIB_LIB_NAME "zlib")
- else (WIN32)
+ else ()
set (ZLIB_LIB_NAME "z")
- endif (WIN32)
+ endif ()
##include (${BINARY_DIR}/${ZLIB_PACKAGE_NAME}${HDF_PACKAGE_EXT}-targets.cmake)
# Create imported target zlib-static
add_library(zlib-static STATIC IMPORTED)
@@ -250,13 +270,13 @@ macro (EXTERNAL_ZLIB_LIBRARY compress_type)
add_dependencies (ZLIB zlib-shared)
set (ZLIB_SHARED_LIBRARY "zlib-shared")
set (ZLIB_LIBRARIES ${ZLIB_LIBRARIES} ${ZLIB_SHARED_LIBRARY})
- endif (BUILD_SHARED_LIBS)
+ endif ()
set (ZLIB_INCLUDE_DIR_GEN "${BINARY_DIR}")
set (ZLIB_INCLUDE_DIR "${SOURCE_DIR}")
set (ZLIB_FOUND 1)
set (ZLIB_INCLUDE_DIRS ${ZLIB_INCLUDE_DIR_GEN} ${ZLIB_INCLUDE_DIR})
-endmacro (EXTERNAL_ZLIB_LIBRARY)
+endmacro ()
#-------------------------------------------------------------------------------
macro (PACKAGE_ZLIB_LIBRARY compress_type)
@@ -267,5 +287,5 @@ macro (PACKAGE_ZLIB_LIBRARY compress_type)
set (EXTERNAL_HEADER_LIST ${EXTERNAL_HEADER_LIST} ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/zconf.h)
if (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "SVN" OR ${compress_type} MATCHES "TGZ")
add_dependencies (ZLIB-GenHeader-Copy ZLIB)
- endif (${compress_type} MATCHES "GIT" OR ${compress_type} MATCHES "SVN" OR ${compress_type} MATCHES "TGZ")
-endmacro (PACKAGE_ZLIB_LIBRARY)
+ endif ()
+endmacro ()
diff --git a/config/cmake_ext_mod/HDFMacros.cmake b/config/cmake_ext_mod/HDFMacros.cmake
index 1d6b49a..48f57e6 100644
--- a/config/cmake_ext_mod/HDFMacros.cmake
+++ b/config/cmake_ext_mod/HDFMacros.cmake
@@ -1,7 +1,18 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
#-------------------------------------------------------------------------------
macro (SET_GLOBAL_VARIABLE name value)
set (${name} ${value} CACHE INTERNAL "Used to pass variables between directories" FORCE)
-endmacro (SET_GLOBAL_VARIABLE)
+endmacro ()
#-------------------------------------------------------------------------------
macro (IDE_GENERATED_PROPERTIES SOURCE_PATH HEADERS SOURCES)
@@ -14,7 +25,7 @@ macro (IDE_GENERATED_PROPERTIES SOURCE_PATH HEADERS SOURCES)
#set_property (SOURCE ${HEADERS}
# PROPERTY MACOSX_PACKAGE_LOCATION Headers/${NAME}
#)
-endmacro (IDE_GENERATED_PROPERTIES)
+endmacro ()
#-------------------------------------------------------------------------------
macro (IDE_SOURCE_PROPERTIES SOURCE_PATH HEADERS SOURCES)
@@ -31,45 +42,49 @@ macro (IDE_SOURCE_PROPERTIES SOURCE_PATH HEADERS SOURCES)
#set_property (SOURCE ${HEADERS}
# PROPERTY MACOSX_PACKAGE_LOCATION Headers/${NAME}
#)
-endmacro (IDE_SOURCE_PROPERTIES)
+endmacro ()
#-------------------------------------------------------------------------------
macro (TARGET_NAMING libtarget libtype)
if (${libtype} MATCHES "SHARED")
set_target_properties (${libtarget} PROPERTIES OUTPUT_NAME "${libtarget}${ARGN}")
- endif (${libtype} MATCHES "SHARED")
-endmacro (TARGET_NAMING)
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (INSTALL_TARGET_PDB libtarget targetdestination targetcomponent)
if (WIN32 AND MSVC)
- get_target_property (target_name ${libtarget} OUTPUT_NAME_RELWITHDEBINFO)
+ get_target_property (target_type ${libtarget} TYPE)
+ if (${libtype} MATCHES "SHARED")
+ set (targetfilename $<TARGET_PDB_FILE:${libtarget}>)
+ else ()
+ get_property (target_name TARGET ${libtarget} PROPERTY OUTPUT_NAME_RELWITHDEBINFO)
+ set (targetfilename $<TARGET_FILE_DIR:${libtarget}>/${target_name}.pdb)
+ endif ()
install (
FILES
- ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE}/${CMAKE_IMPORT_LIBRARY_PREFIX}${target_name}.pdb
+ ${targetfilename}
DESTINATION
${targetdestination}
CONFIGURATIONS RelWithDebInfo
COMPONENT ${targetcomponent}
)
- endif (WIN32 AND MSVC)
-endmacro (INSTALL_TARGET_PDB)
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (INSTALL_PROGRAM_PDB progtarget targetdestination targetcomponent)
if (WIN32 AND MSVC)
- get_target_property (target_name ${progtarget} OUTPUT_NAME_RELWITHDEBINFO)
- get_target_property (target_prefix ${progtarget} PREFIX)
install (
FILES
- ${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE}/${target_prefix}${target_name}.pdb
+ $<TARGET_PDB_FILE:${progtarget}>
DESTINATION
${targetdestination}
CONFIGURATIONS RelWithDebInfo
COMPONENT ${targetcomponent}
)
- endif (WIN32 AND MSVC)
-endmacro (INSTALL_PROGRAM_PDB)
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (HDF_SET_LIB_OPTIONS libtarget libname libtype)
@@ -77,19 +92,19 @@ macro (HDF_SET_LIB_OPTIONS libtarget libname libtype)
if (WIN32)
set (LIB_RELEASE_NAME "${libname}")
set (LIB_DEBUG_NAME "${libname}_D")
- else (WIN32)
+ else ()
set (LIB_RELEASE_NAME "${libname}")
set (LIB_DEBUG_NAME "${libname}_debug")
- endif (WIN32)
- else (${libtype} MATCHES "SHARED")
+ endif ()
+ else ()
if (WIN32)
set (LIB_RELEASE_NAME "lib${libname}")
set (LIB_DEBUG_NAME "lib${libname}_D")
- else (WIN32)
+ else ()
set (LIB_RELEASE_NAME "${libname}")
set (LIB_DEBUG_NAME "${libname}_debug")
- endif (WIN32)
- endif (${libtype} MATCHES "SHARED")
+ endif ()
+ endif ()
set_target_properties (${libtarget}
PROPERTIES
@@ -98,6 +113,18 @@ macro (HDF_SET_LIB_OPTIONS libtarget libname libtype)
OUTPUT_NAME_MINSIZEREL ${LIB_RELEASE_NAME}
OUTPUT_NAME_RELWITHDEBINFO ${LIB_RELEASE_NAME}
)
+ if (${libtype} MATCHES "STATIC")
+ if (WIN32)
+ set_target_properties (${libtarget}
+ PROPERTIES
+ COMPILE_PDB_NAME_DEBUG ${LIB_DEBUG_NAME}
+ COMPILE_PDB_NAME_RELEASE ${LIB_RELEASE_NAME}
+ COMPILE_PDB_NAME_MINSIZEREL ${LIB_RELEASE_NAME}
+ COMPILE_PDB_NAME_RELWITHDEBINFO ${LIB_RELEASE_NAME}
+ COMPILE_PDB_OUTPUT_DIRECTORY "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}"
+ )
+ endif ()
+ endif ()
#----- Use MSVC Naming conventions for Shared Libraries
if (MINGW AND ${libtype} MATCHES "SHARED")
@@ -107,9 +134,8 @@ macro (HDF_SET_LIB_OPTIONS libtarget libname libtype)
IMPORT_PREFIX ""
PREFIX ""
)
- endif (MINGW AND ${libtype} MATCHES "SHARED")
-
-endmacro (HDF_SET_LIB_OPTIONS)
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (HDF_IMPORT_SET_LIB_OPTIONS libtarget libname libtype libversion)
@@ -117,12 +143,12 @@ macro (HDF_IMPORT_SET_LIB_OPTIONS libtarget libname libtype libversion)
if (${importtype} MATCHES "IMPORT")
set (importprefix "${CMAKE_STATIC_LIBRARY_PREFIX}")
- endif (${importtype} MATCHES "IMPORT")
+ endif ()
if (${CMAKE_BUILD_TYPE} MATCHES "Debug")
set (IMPORT_LIB_NAME ${LIB_DEBUG_NAME})
- else (${CMAKE_BUILD_TYPE} MATCHES "Debug")
+ else ()
set (IMPORT_LIB_NAME ${LIB_RELEASE_NAME})
- endif (${CMAKE_BUILD_TYPE} MATCHES "Debug")
+ endif ()
if (${libtype} MATCHES "SHARED")
if (WIN32)
@@ -131,91 +157,74 @@ macro (HDF_IMPORT_SET_LIB_OPTIONS libtarget libname libtype libversion)
IMPORTED_IMPLIB "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${IMPORT_LIB_NAME}.lib"
IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${IMPORT_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}"
)
- else (MINGW)
+ else ()
set_target_properties (${libtarget} PROPERTIES
IMPORTED_IMPLIB "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE}/${CMAKE_IMPORT_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_IMPORT_LIBRARY_SUFFIX}"
IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE}/${CMAKE_IMPORT_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}"
- )
- endif (MINGW)
- else (WIN32)
+ )
+ endif ()
+ else ()
if (CYGWIN)
set_target_properties (${libtarget} PROPERTIES
IMPORTED_IMPLIB "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_IMPORT_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_IMPORT_LIBRARY_SUFFIX}"
IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_IMPORT_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}"
)
- else (CYGWIN)
+ else ()
set_target_properties (${libtarget} PROPERTIES
IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_SHARED_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}"
IMPORTED_SONAME "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_SHARED_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX}.${libversion}"
SOVERSION "${libversion}"
)
- endif (CYGWIN)
- endif (WIN32)
- else (${libtype} MATCHES "SHARED")
+ endif ()
+ endif ()
+ else ()
if (WIN32 AND NOT MINGW)
set_target_properties (${libtarget} PROPERTIES
IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_BUILD_TYPE}/${IMPORT_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}"
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
)
- else (WIN32 AND NOT MINGW)
+ else ()
set_target_properties (${libtarget} PROPERTIES
IMPORTED_LOCATION "${CMAKE_LIBRARY_OUTPUT_DIRECTORY}/${CMAKE_STATIC_LIBRARY_PREFIX}${IMPORT_LIB_NAME}${CMAKE_STATIC_LIBRARY_SUFFIX}"
IMPORTED_LINK_INTERFACE_LANGUAGES "C"
)
- endif (WIN32 AND NOT MINGW)
- endif (${libtype} MATCHES "SHARED")
-
-endmacro (HDF_IMPORT_SET_LIB_OPTIONS)
+ endif ()
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (TARGET_C_PROPERTIES wintarget libtype addcompileflags addlinkflags)
if (MSVC)
TARGET_MSVC_PROPERTIES (${wintarget} ${libtype} "${addcompileflags} ${WIN_COMPILE_FLAGS}" "${addlinkflags} ${WIN_LINK_FLAGS}")
- else (MSVC)
- set_target_properties (${wintarget}
- PROPERTIES
- COMPILE_FLAGS "${addcompileflags}"
- LINK_FLAGS "${addlinkflags}"
- )
- endif (MSVC)
-endmacro (TARGET_C_PROPERTIES)
+ else ()
+ set_target_properties (${wintarget} PROPERTIES COMPILE_FLAGS "${addcompileflags}" LINK_FLAGS "${addlinkflags}")
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (TARGET_MSVC_PROPERTIES wintarget libtype addcompileflags addlinkflags)
if (MSVC)
- set_target_properties (${wintarget}
- PROPERTIES
- COMPILE_FLAGS "${addcompileflags}"
- LINK_FLAGS "${addlinkflags}"
- )
- endif (MSVC)
-endmacro (TARGET_MSVC_PROPERTIES)
+ set_target_properties (${wintarget} PROPERTIES COMPILE_FLAGS "${addcompileflags}" LINK_FLAGS "${addlinkflags}")
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (TARGET_FORTRAN_PROPERTIES forttarget libtype addcompileflags addlinkflags)
if (WIN32)
TARGET_FORTRAN_WIN_PROPERTIES (${forttarget} ${libtype} "${addcompileflags} ${WIN_COMPILE_FLAGS}" "${addlinkflags} ${WIN_LINK_FLAGS}")
- endif (WIN32)
-endmacro (TARGET_FORTRAN_PROPERTIES)
+ endif ()
+endmacro ()
#-------------------------------------------------------------------------------
macro (TARGET_FORTRAN_WIN_PROPERTIES forttarget libtype addcompileflags addlinkflags)
if (MSVC)
if (${libtype} MATCHES "SHARED")
- set_target_properties (${forttarget}
- PROPERTIES
- COMPILE_FLAGS "/dll ${addcompileflags}"
- LINK_FLAGS "/SUBSYSTEM:CONSOLE ${addlinkflags}"
- )
- else (${libtype} MATCHES "SHARED")
- set_target_properties (${forttarget}
- PROPERTIES
- COMPILE_FLAGS "${addcompileflags}"
- LINK_FLAGS "/SUBSYSTEM:CONSOLE ${addlinkflags}"
- )
- endif (${libtype} MATCHES "SHARED")
- endif (MSVC)
-endmacro (TARGET_FORTRAN_WIN_PROPERTIES)
+ set_target_properties (${forttarget} PROPERTIES COMPILE_FLAGS "/dll ${addcompileflags}" LINK_FLAGS "/SUBSYSTEM:CONSOLE ${addlinkflags}")
+ else ()
+ set_target_properties (${forttarget} PROPERTIES COMPILE_FLAGS "${addcompileflags}" LINK_FLAGS "/SUBSYSTEM:CONSOLE ${addlinkflags}")
+ endif ()
+ endif ()
+endmacro ()
#-----------------------------------------------------------------------------
# Configure the README.txt file for the binary package
@@ -228,57 +237,59 @@ macro (HDF_README_PROPERTIES target_fortran)
set (BINARY_INSTALL_ENDING "msi")
if (CMAKE_CL_64)
set (BINARY_SYSTEM_NAME "win64")
- else (CMAKE_CL_64)
+ else ()
set (BINARY_SYSTEM_NAME "win32")
- endif (CMAKE_CL_64)
+ endif ()
if (${CMAKE_SYSTEM_VERSION} MATCHES "6.1")
set (BINARY_PLATFORM "${BINARY_PLATFORM} 7")
elseif (${CMAKE_SYSTEM_VERSION} MATCHES "6.2")
set (BINARY_PLATFORM "${BINARY_PLATFORM} 8")
elseif (${CMAKE_SYSTEM_VERSION} MATCHES "6.3")
set (BINARY_PLATFORM "${BINARY_PLATFORM} 10")
- endif (${CMAKE_SYSTEM_VERSION} MATCHES "6.1")
+ endif ()
set (BINARY_PLATFORM "${BINARY_PLATFORM} ${MSVC_C_ARCHITECTURE_ID}")
- if (${CMAKE_C_COMPILER_VERSION} MATCHES "16.*")
+ if (${CMAKE_C_COMPILER_VERSION} MATCHES "^16.*")
set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2010")
- elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "15.*")
+ elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "^15.*")
set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2008")
- elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "17.*")
+ elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "^17.*")
set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2012")
- elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "18.*")
+ elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "^18.*")
set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2013")
- elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "19.*")
+ elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "^19.*")
set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2015")
- else (${CMAKE_C_COMPILER_VERSION} MATCHES "16.*")
+ elseif (${CMAKE_C_COMPILER_VERSION} MATCHES "^20.*")
+ set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO 2017")
+ else ()
set (BINARY_PLATFORM "${BINARY_PLATFORM}, using VISUAL STUDIO ${CMAKE_C_COMPILER_VERSION}")
- endif (${CMAKE_C_COMPILER_VERSION} MATCHES "16.*")
+ endif ()
elseif (APPLE)
set (BINARY_EXAMPLE_ENDING "tar.gz")
set (BINARY_INSTALL_ENDING "dmg")
set (BINARY_PLATFORM "${BINARY_PLATFORM} ${CMAKE_SYSTEM_VERSION} ${CMAKE_SYSTEM_PROCESSOR}")
set (BINARY_PLATFORM "${BINARY_PLATFORM}, using ${CMAKE_C_COMPILER_ID} C ${CMAKE_C_COMPILER_VERSION}")
- else (WIN32)
+ else ()
set (BINARY_EXAMPLE_ENDING "tar.gz")
set (BINARY_INSTALL_ENDING "sh")
set (BINARY_PLATFORM "${BINARY_PLATFORM} ${CMAKE_SYSTEM_VERSION} ${CMAKE_SYSTEM_PROCESSOR}")
set (BINARY_PLATFORM "${BINARY_PLATFORM}, using ${CMAKE_C_COMPILER_ID} C ${CMAKE_C_COMPILER_VERSION}")
- endif (WIN32)
+ endif ()
if (target_fortran)
set (BINARY_PLATFORM "${BINARY_PLATFORM} / ${CMAKE_Fortran_COMPILER_ID} Fortran")
- endif (target_fortran)
+ endif ()
if (BUILD_SHARED_LIBS)
set (LIB_TYPE "Static and Shared")
- else (BUILD_SHARED_LIBS)
+ else ()
set (LIB_TYPE "Static")
- endif (BUILD_SHARED_LIBS)
+ endif ()
configure_file (
${HDF_RESOURCES_DIR}/README.txt.cmake.in
${CMAKE_BINARY_DIR}/README.txt @ONLY
)
-endmacro (HDF_README_PROPERTIES)
+endmacro ()
macro (HDFTEST_COPY_FILE src dest target)
add_custom_command(
diff --git a/config/cmake_ext_mod/HDFTests.c b/config/cmake_ext_mod/HDFTests.c
index 648f795..60ac744 100644
--- a/config/cmake_ext_mod/HDFTests.c
+++ b/config/cmake_ext_mod/HDFTests.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#define SIMPLE_TEST(x) int main(){ x; return 0; }
#ifdef HAVE_C99_DESIGNATED_INITIALIZER
diff --git a/config/cmake_ext_mod/HDFUseFortran.cmake b/config/cmake_ext_mod/HDFUseFortran.cmake
index 275f2ea..7bf4d81 100644
--- a/config/cmake_ext_mod/HDFUseFortran.cmake
+++ b/config/cmake_ext_mod/HDFUseFortran.cmake
@@ -1,4 +1,15 @@
#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+#
# This file provides functions for Fortran support.
#
#-------------------------------------------------------------------------------
@@ -26,14 +37,14 @@ set (H5_FC_FUNC_ "H5_FC_FUNC_(name,NAME) ${CMAKE_MATCH_1}")
# The provided CMake Fortran macros don't provide a general check function
# so this one is used for a sizeof test.
#-----------------------------------------------------------------------------
-MACRO (CHECK_FORTRAN_FEATURE FUNCTION CODE VARIABLE)
+macro (CHECK_FORTRAN_FEATURE FUNCTION CODE VARIABLE)
message (STATUS "Testing Fortran ${FUNCTION}")
if (CMAKE_REQUIRED_LIBRARIES)
set (CHECK_FUNCTION_EXISTS_ADD_LIBRARIES
"-DLINK_LIBRARIES:STRING=${CMAKE_REQUIRED_LIBRARIES}")
- else (CMAKE_REQUIRED_LIBRARIES)
+ else ()
set (CHECK_FUNCTION_EXISTS_ADD_LIBRARIES)
- endif (CMAKE_REQUIRED_LIBRARIES)
+ endif ()
file (WRITE
${CMAKE_BINARY_DIR}${CMAKE_FILES_DIRECTORY}/CMakeTmp/testFortranCompiler.f90
"${CODE}"
@@ -63,8 +74,7 @@ MACRO (CHECK_FORTRAN_FEATURE FUNCTION CODE VARIABLE)
"Determining if the Fortran ${FUNCTION} exists failed with the following output:\n"
"${OUTPUT}\n\n")
endif ()
-
-ENDMACRO (CHECK_FORTRAN_FEATURE)
+endmacro ()
#-----------------------------------------------------------------------------
# Configure Checks which require Fortran compilation must go in here
@@ -75,7 +85,7 @@ ENDMACRO (CHECK_FORTRAN_FEATURE)
#-----------------------------------------------------------------------------
# Check for Non-standard extension intrinsic function SIZEOF
-set(FORTRAN_HAVE_SIZEOF FALSE)
+set (FORTRAN_HAVE_SIZEOF FALSE)
CHECK_FORTRAN_FEATURE(sizeof
"
PROGRAM main
@@ -86,7 +96,7 @@ CHECK_FORTRAN_FEATURE(sizeof
)
# Check for F2008 standard intrinsic function C_SIZEOF
-set(FORTRAN_HAVE_C_SIZEOF FALSE)
+set (FORTRAN_HAVE_C_SIZEOF FALSE)
CHECK_FORTRAN_FEATURE(c_sizeof
"
PROGRAM main
@@ -112,7 +122,7 @@ CHECK_FORTRAN_FEATURE(storage_size
)
# Check for F2008 standard intrinsic module "ISO_FORTRAN_ENV"
-set(HAVE_ISO_FORTRAN_ENV FALSE)
+set (HAVE_ISO_FORTRAN_ENV FALSE)
CHECK_FORTRAN_FEATURE(ISO_FORTRAN_ENV
"
PROGRAM main
@@ -122,7 +132,7 @@ CHECK_FORTRAN_FEATURE(ISO_FORTRAN_ENV
HAVE_ISO_FORTRAN_ENV
)
-set(FORTRAN_DEFAULT_REAL_NOT_DOUBLE FALSE)
+set (FORTRAN_DEFAULT_REAL_NOT_DOUBLE FALSE)
CHECK_FORTRAN_FEATURE(RealIsNotDouble
"
MODULE type_mod
@@ -152,7 +162,7 @@ CHECK_FORTRAN_FEATURE(RealIsNotDouble
#-----------------------------------------------------------------------------
# Checks if the ISO_C_BINDING module meets all the requirements
#-----------------------------------------------------------------------------
-set(FORTRAN_HAVE_ISO_C_BINDING FALSE)
+set (FORTRAN_HAVE_ISO_C_BINDING FALSE)
CHECK_FORTRAN_FEATURE(iso_c_binding
"
PROGRAM main
@@ -175,5 +185,5 @@ if (CMAKE_Fortran_COMPILER MATCHES ifort)
if (WIN32)
set (CMAKE_Fortran_FLAGS_DEBUG "/debug:full /dbglibs " CACHE "flags" STRING FORCE)
set (CMAKE_EXE_LINKER_FLAGS_DEBUG "/DEBUG" CACHE "flags" STRING FORCE)
- endif (WIN32)
-endif (CMAKE_Fortran_COMPILER MATCHES ifort)
+ endif ()
+endif ()
diff --git a/config/cmake_ext_mod/grepTest.cmake b/config/cmake_ext_mod/grepTest.cmake
index a090057..5b0e8fd 100644
--- a/config/cmake_ext_mod/grepTest.cmake
+++ b/config/cmake_ext_mod/grepTest.cmake
@@ -1,28 +1,39 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# grepTest.cmake executes a command and captures the output in a file. File is then compared
# against a reference file. Exit status of command can also be compared.
# arguments checking
if (NOT TEST_PROGRAM)
message (FATAL_ERROR "Require TEST_PROGRAM to be defined")
-endif (NOT TEST_PROGRAM)
+endif ()
#if (NOT TEST_ARGS)
# message (STATUS "Require TEST_ARGS to be defined")
-#endif (NOT TEST_ARGS)
+#endif ()
if (NOT TEST_FOLDER)
message ( FATAL_ERROR "Require TEST_FOLDER to be defined")
-endif (NOT TEST_FOLDER)
+endif ()
if (NOT TEST_OUTPUT)
message (FATAL_ERROR "Require TEST_OUTPUT to be defined")
-endif (NOT TEST_OUTPUT)
+endif ()
#if (NOT TEST_EXPECT)
# message (STATUS "Require TEST_EXPECT to be defined")
-#endif (NOT TEST_EXPECT)
+#endif ()
if (NOT TEST_FILTER)
message (STATUS "Require TEST_FILTER to be defined")
-endif (NOT TEST_FILTER)
+endif ()
if (NOT TEST_REFERENCE)
message (FATAL_ERROR "Require TEST_REFERENCE to be defined")
-endif (NOT TEST_REFERENCE)
+endif ()
message (STATUS "COMMAND: ${TEST_PROGRAM} ${TEST_ARGS}")
@@ -48,7 +59,7 @@ string (REGEX MATCH "${TEST_REFERENCE}" TEST_MATCH ${TEST_STREAM})
string (COMPARE EQUAL "${TEST_REFERENCE}" "${TEST_MATCH}" TEST_RESULT)
if (${TEST_RESULT} STREQUAL "0")
message (FATAL_ERROR "Failed: The output of ${TEST_PROGRAM} did not contain ${TEST_REFERENCE}")
-endif (${TEST_RESULT} STREQUAL "0")
+endif ()
string (REGEX MATCH "${TEST_FILTER}" TEST_MATCH ${TEST_STREAM})
if (${TEST_EXPECT} STREQUAL "1")
@@ -56,8 +67,8 @@ if (${TEST_EXPECT} STREQUAL "1")
string (LENGTH "${TEST_MATCH}" TEST_RESULT)
if (NOT ${TEST_RESULT} STREQUAL "0")
message (FATAL_ERROR "Failed: The output of ${TEST_PROGRAM} did contain ${TEST_FILTER}")
- endif (NOT ${TEST_RESULT} STREQUAL "0")
-endif (${TEST_EXPECT} STREQUAL "1")
+ endif ()
+endif ()
# everything went fine...
message ("Passed: The output of ${TEST_PROGRAM} matched")
diff --git a/config/cmake_ext_mod/prunTest.cmake b/config/cmake_ext_mod/prunTest.cmake
deleted file mode 100644
index 38ecb7e..0000000
--- a/config/cmake_ext_mod/prunTest.cmake
+++ /dev/null
@@ -1,145 +0,0 @@
-# runTest.cmake executes a command and captures the output in a file. File is then compared
-# against a reference file. Exit status of command can also be compared.
-cmake_policy(SET CMP0007 NEW)
-
-# arguments checking
-if (NOT TEST_PROGRAM)
- message (FATAL_ERROR "Require TEST_PROGRAM to be defined")
-endif (NOT TEST_PROGRAM)
-#if (NOT TEST_ARGS)
-# message (STATUS "Require TEST_ARGS to be defined")
-#endif (NOT TEST_ARGS)
-if (NOT TEST_FOLDER)
- message ( FATAL_ERROR "Require TEST_FOLDER to be defined")
-endif (NOT TEST_FOLDER)
-if (NOT TEST_OUTPUT)
- message (FATAL_ERROR "Require TEST_OUTPUT to be defined")
-endif (NOT TEST_OUTPUT)
-#if (NOT TEST_EXPECT)
-# message (STATUS "Require TEST_EXPECT to be defined")
-#endif (NOT TEST_EXPECT)
-#if (NOT TEST_FILTER)
-# message (STATUS "Require TEST_FILTER to be defined")
-#endif (NOT TEST_FILTER)
-if (NOT TEST_SKIP_COMPARE AND NOT TEST_REFERENCE)
- message (FATAL_ERROR "Require TEST_REFERENCE to be defined")
-endif (NOT TEST_SKIP_COMPARE AND NOT TEST_REFERENCE)
-
-set (ERROR_APPEND 1)
-
-message (STATUS "COMMAND: ${TEST_PROGRAM} ${TEST_ARGS}")
-
-if (TEST_ENV_VAR)
- set (ENV{${TEST_ENV_VAR}} "${TEST_ENV_VALUE}")
-endif (TEST_ENV_VAR)
-
-# run the test program, capture the stdout/stderr and the result var
-EXECUTE_PROCESS (
- COMMAND ${TEST_PROGRAM} ${TEST_ARGS}
- WORKING_DIRECTORY ${TEST_FOLDER}
- RESULT_VARIABLE TEST_RESULT
- OUTPUT_FILE ${TEST_OUTPUT}
- ERROR_FILE ${TEST_OUTPUT}.err
- OUTPUT_VARIABLE TEST_OUT
- ERROR_VARIABLE TEST_ERROR
-)
-
-message (STATUS "COMMAND Result: ${TEST_RESULT}")
-
-file (READ ${TEST_FOLDER}/${TEST_REFERENCE} TEST_STREAM)
-file (WRITE ${TEST_FOLDER}/P_${TEST_REFERENCE} "${TEST_STREAM}")
-
-if (ERROR_APPEND AND EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
- file (READ ${TEST_FOLDER}/${TEST_OUTPUT}.err TEST_STREAM)
- file (APPEND ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
-endif (ERROR_APPEND AND EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
-
-if (TEST_APPEND)
- file (APPEND ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_APPEND} ${TEST_ERROR}\n")
-endif (TEST_APPEND)
-
-message (STATUS "COMMAND Error: ${TEST_ERROR}")
-
-if (TEST_MASK)
- file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
- STRING(REGEX REPLACE "Storage:[^\n]+\n" "Storage: <details removed for portability>\n" TEST_STREAM "${TEST_STREAM}")
- file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
-endif (TEST_MASK)
-
-if (TEST_MASK_MOD)
- file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
- STRING(REGEX REPLACE "Modified:[^\n]+\n" "Modified: XXXX-XX-XX XX:XX:XX XXX\n" TEST_STREAM "${TEST_STREAM}")
- file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
-endif (TEST_MASK_MOD)
-
-if (TEST_MASK_ERROR)
- file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
- STRING(REGEX REPLACE "thread [0-9]*:" "thread (IDs):" TEST_STREAM "${TEST_STREAM}")
- STRING(REGEX REPLACE ": ([^\n]*)[.]c " ": (file name) " TEST_STREAM "${TEST_STREAM}")
- STRING(REGEX REPLACE " line [0-9]*" " line (number)" TEST_STREAM "${TEST_STREAM}")
- STRING(REGEX REPLACE "v[1-9]*[.][0-9]*[.]" "version (number)." TEST_STREAM "${TEST_STREAM}")
- STRING(REGEX REPLACE "[1-9]*[.][0-9]*[.][0-9]*[^)]*" "version (number)" TEST_STREAM "${TEST_STREAM}")
- STRING(REGEX REPLACE "H5Eget_auto[1-2]*" "H5Eget_auto(1 or 2)" TEST_STREAM "${TEST_STREAM}")
- STRING(REGEX REPLACE "H5Eset_auto[1-2]*" "H5Eset_auto(1 or 2)" TEST_STREAM "${TEST_STREAM}")
- file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
-endif (TEST_MASK_ERROR)
-
-if (TEST_FILTER)
- file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
- STRING(REGEX REPLACE "${TEST_FILTER}" "" TEST_STREAM "${TEST_STREAM}")
- file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
-endif (TEST_FILTER)
-
-#if (TEST_REF_FILTER)
-# message (STATUS "TEST_REF_FILTER: ${TEST_APPEND}${TEST_REF_FILTER}")
-# file (READ ${TEST_FOLDER}/P_${TEST_REFERENCE} TEST_STREAM)
-# STRING(REGEX REPLACE "${TEST_APPEND}" "${TEST_REF_FILTER}" TEST_STREAM "${TEST_STREAM}")
-# file (WRITE ${TEST_FOLDER}/P_${TEST_REFERENCE} "${TEST_STREAM}")
-#endif (TEST_REF_FILTER)
-
-if (NOT TEST_SKIP_COMPARE)
- if (WIN32 AND NOT MINGW)
- file (READ ${TEST_FOLDER}/P_${TEST_REFERENCE} TEST_STREAM)
- file (WRITE ${TEST_FOLDER}/P_${TEST_REFERENCE} "${TEST_STREAM}")
- endif (WIN32 AND NOT MINGW)
-
- # now compare the output with the reference
- EXECUTE_PROCESS (
- COMMAND ${CMAKE_COMMAND} -E compare_files ${TEST_FOLDER}/${TEST_OUTPUT} ${TEST_FOLDER}/P_${TEST_REFERENCE}
- RESULT_VARIABLE TEST_RESULT
- )
- if (NOT ${TEST_RESULT} STREQUAL 0)
- set (TEST_RESULT 0)
- file (STRINGS ${TEST_FOLDER}/${TEST_OUTPUT} test_act)
- LIST (LENGTH test_act len_act)
- file (STRINGS ${TEST_FOLDER}/P_${TEST_REFERENCE} test_ref)
- LIST (LENGTH test_ref len_ref)
- if (NOT ${len_act} STREQUAL "0")
- MATH (EXPR _FP_LEN "${len_ref} - 1")
- foreach (line RANGE 0 ${_FP_LEN})
- LIST (GET test_act ${line} str_act)
- LIST (GET test_ref ${line} str_ref)
- if (NOT "${str_act}" STREQUAL "${str_ref}")
- if (NOT "${str_act}" STREQUAL "")
- set (TEST_RESULT 1)
- message ("line = ${line}\n***ACTUAL: ${str_act}\n****REFER: ${str_ref}\n")
- endif (NOT "${str_act}" STREQUAL "")
- endif (NOT "${str_act}" STREQUAL "${str_ref}")
- endforeach (line RANGE 0 ${_FP_LEN})
- endif (NOT ${len_act} STREQUAL "0")
- if (NOT ${len_act} STREQUAL ${len_ref})
- set (TEST_RESULT 1)
- endif (NOT ${len_act} STREQUAL ${len_ref})
- endif (NOT ${TEST_RESULT} STREQUAL 0)
-
- message (STATUS "COMPARE Result: ${TEST_RESULT}")
-
- # again, if return value is !=0 scream and shout
- if (NOT ${TEST_RESULT} STREQUAL 0)
- message (FATAL_ERROR "Failed: The output of ${TEST_OUTPUT} did not match P_${TEST_REFERENCE}")
- endif (NOT ${TEST_RESULT} STREQUAL 0)
-endif (NOT TEST_SKIP_COMPARE)
-
-# everything went fine...
-message ("Passed: The output of ${TEST_PROGRAM} matches P_${TEST_REFERENCE}")
-
diff --git a/config/cmake_ext_mod/runTest.cmake b/config/cmake_ext_mod/runTest.cmake
index 48e9d03..3a02614 100644
--- a/config/cmake_ext_mod/runTest.cmake
+++ b/config/cmake_ext_mod/runTest.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
# runTest.cmake executes a command and captures the output in a file. File is then compared
# against a reference file. Exit status of command can also be compared.
cmake_policy(SET CMP0007 NEW)
@@ -6,9 +17,6 @@ cmake_policy(SET CMP0007 NEW)
if (NOT TEST_PROGRAM)
message (FATAL_ERROR "Require TEST_PROGRAM to be defined")
endif ()
-#if (NOT TEST_ARGS)
-# message (STATUS "Require TEST_ARGS to be defined")
-#endif ()
if (NOT TEST_FOLDER)
message ( FATAL_ERROR "Require TEST_FOLDER to be defined")
endif ()
@@ -18,20 +26,17 @@ endif ()
if (NOT TEST_EXPECT)
message (STATUS "Require TEST_EXPECT to be defined")
endif ()
-#if (NOT TEST_FILTER)
-# message (STATUS "Require TEST_FILTER to be defined")
-#endif ()
if (NOT TEST_SKIP_COMPARE AND NOT TEST_REFERENCE)
message (FATAL_ERROR "Require TEST_REFERENCE to be defined")
-endif (NOT TEST_SKIP_COMPARE AND NOT TEST_REFERENCE)
+endif ()
if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT})
-endif (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+endif ()
if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
file (REMOVE ${TEST_FOLDER}/${TEST_OUTPUT}.err)
-endif (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT}.err)
+endif ()
# if there is not an error reference file add the error output to the stdout file
if (NOT TEST_ERRREF)
@@ -40,6 +45,14 @@ endif ()
message (STATUS "COMMAND: ${TEST_PROGRAM} ${TEST_ARGS}")
+if (TEST_LIBRARY_DIRECTORY)
+ if (WIN32 AND NOT MINGW)
+ set (ENV{PATH} "$ENV{PATH};${TEST_LIBRARY_DIRECTORY}")
+ else ()
+ set (ENV{LD_LIBRARY_PATH} "$ENV{LD_LIBRARY_PATH}:${TEST_LIBRARY_DIRECTORY}")
+ endif ()
+endif ()
+
if (TEST_ENV_VAR)
set (ENV{${TEST_ENV_VAR}} "${TEST_ENV_VALUE}")
endif ()
@@ -69,6 +82,16 @@ else ()
)
endif ()
+if (TEST_REGEX)
+ # TEST_REGEX should always be matched
+ file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
+ string (REGEX MATCH "${TEST_REGEX}" REGEX_MATCH ${TEST_STREAM})
+ string (COMPARE EQUAL "${REGEX_MATCH}" "${TEST_MATCH}" REGEX_RESULT)
+ if (${REGEX_RESULT} STREQUAL "0")
+ message (STATUS "Failed: The output of ${TEST_PROGRAM} did not contain ${TEST_MATCH}")
+ endif ()
+endif ()
+
message (STATUS "COMMAND Result: ${TEST_RESULT}")
# if the .err file exists and ERRROR_APPEND is enabled
@@ -84,7 +107,13 @@ endif ()
# if the return value is !=${TEST_EXPECT} bail out
if (NOT ${TEST_RESULT} STREQUAL ${TEST_EXPECT})
- message ( FATAL_ERROR "Failed: Test program ${TEST_PROGRAM} exited != ${TEST_EXPECT}.\n${TEST_ERROR}")
+ if (NOT TEST_NOERRDISPLAY)
+ if (EXISTS ${TEST_FOLDER}/${TEST_OUTPUT})
+ file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
+ message (STATUS "Output :\n${TEST_STREAM}")
+ endif ()
+ endif ()
+ message (FATAL_ERROR "Failed: Test program ${TEST_PROGRAM} exited != ${TEST_EXPECT}.\n${TEST_ERROR}")
endif ()
message (STATUS "COMMAND Error: ${TEST_ERROR}")
@@ -125,15 +154,22 @@ if (TEST_MASK_ERROR)
else ()
file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT}.err "${TEST_STREAM}")
endif ()
-endif (TEST_MASK_ERROR)
+endif ()
# remove text from the output file
if (TEST_FILTER)
file (READ ${TEST_FOLDER}/${TEST_OUTPUT} TEST_STREAM)
- string (REGEX REPLACE "${TEST_FILTER}" "" TEST_STREAM "${TEST_STREAM}")
+ string (REGEX REPLACE "${TEST_FILTER}" "${TEST_FILTER_REPLACE}" TEST_STREAM "${TEST_STREAM}")
file (WRITE ${TEST_FOLDER}/${TEST_OUTPUT} "${TEST_STREAM}")
endif ()
+if (TEST_REF_FILTER)
+ #message (STATUS "TEST_REF_FILTER: ${TEST_APPEND}${TEST_REF_FILTER}")
+ file (READ ${TEST_FOLDER}/${TEST_REFERENCE} TEST_STREAM)
+ STRING(REGEX REPLACE "${TEST_REF_APPEND}" "${TEST_REF_FILTER}" TEST_STREAM "${TEST_STREAM}")
+ file (WRITE ${TEST_FOLDER}/${TEST_REFERENCE} "${TEST_STREAM}")
+endif ()
+
# compare output files to references unless this must be skipped
if (NOT TEST_SKIP_COMPARE)
if (WIN32 AND NOT MINGW)
@@ -141,11 +177,22 @@ if (NOT TEST_SKIP_COMPARE)
file (WRITE ${TEST_FOLDER}/${TEST_REFERENCE} "${TEST_STREAM}")
endif ()
- # now compare the output with the reference
- execute_process (
- COMMAND ${CMAKE_COMMAND} -E compare_files ${TEST_FOLDER}/${TEST_OUTPUT} ${TEST_FOLDER}/${TEST_REFERENCE}
- RESULT_VARIABLE TEST_RESULT
- )
+ if (NOT TEST_SORT_COMPARE)
+ # now compare the output with the reference
+ execute_process (
+ COMMAND ${CMAKE_COMMAND} -E compare_files ${TEST_FOLDER}/${TEST_OUTPUT} ${TEST_FOLDER}/${TEST_REFERENCE}
+ RESULT_VARIABLE TEST_RESULT
+ )
+ else ()
+ file (STRINGS ${TEST_FOLDER}/${TEST_OUTPUT} v1)
+ file (STRINGS ${TEST_FOLDER}/${TEST_REFERENCE} v2)
+ list (SORT v1)
+ list (SORT v2)
+ if (NOT v1 STREQUAL v2)
+ set(TEST_RESULT 1)
+ endif ()
+ endif ()
+
if (NOT ${TEST_RESULT} STREQUAL 0)
set (TEST_RESULT 0)
file (STRINGS ${TEST_FOLDER}/${TEST_OUTPUT} test_act)
@@ -175,7 +222,7 @@ if (NOT TEST_SKIP_COMPARE)
if (NOT ${len_act} STREQUAL ${len_ref})
set (TEST_RESULT 1)
endif ()
- endif (NOT ${TEST_RESULT} STREQUAL 0)
+ endif ()
message (STATUS "COMPARE Result: ${TEST_RESULT}")
@@ -214,7 +261,7 @@ if (NOT TEST_SKIP_COMPARE)
message ("line = ${line}\n***ACTUAL: ${str_act}\n****REFER: ${str_ref}\n")
endif ()
endif ()
- endforeach (line RANGE 0 ${_FP_LEN})
+ endforeach ()
else ()
if (${len_act} STREQUAL "0")
message (STATUS "COMPARE Failed: ${TEST_FOLDER}/${TEST_OUTPUT}.err is empty")
@@ -222,7 +269,7 @@ if (NOT TEST_SKIP_COMPARE)
if (${len_ref} STREQUAL "0")
message (STATUS "COMPARE Failed: ${TEST_FOLDER}/${TEST_ERRREF} is empty")
endif ()
- endif()
+ endif ()
if (NOT ${len_act} STREQUAL ${len_ref})
set (TEST_RESULT 1)
endif ()
@@ -234,8 +281,8 @@ if (NOT TEST_SKIP_COMPARE)
if (NOT ${TEST_RESULT} STREQUAL 0)
message (FATAL_ERROR "Failed: The error output of ${TEST_OUTPUT}.err did not match ${TEST_ERRREF}")
endif ()
- endif (TEST_ERRREF)
-endif (NOT TEST_SKIP_COMPARE)
+ endif ()
+endif ()
# everything went fine...
message ("Passed: The output of ${TEST_PROGRAM} matches ${TEST_REFERENCE}")
diff --git a/config/commence.am b/config/commence.am
index 1a26a85..5fe21a6 100644
--- a/config/commence.am
+++ b/config/commence.am
@@ -7,12 +7,10 @@
##
## This file is part of HDF5. The full HDF5 copyright notice, including
## terms governing use, modification, and redistribution, is contained in
-## the files COPYING and Copyright.html. COPYING can be found at the root
-## of the source code distribution tree; Copyright.html can be found at the
-## root level of an installed copy of the electronic HDF5 document set and
-## is linked from the top-level documents page. It can also be found at
-## http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-## access to either file, you may request a copy from help@hdfgroup.org.
+## the COPYING file, which can be found at the root of the source code
+## distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+## If you do not have access to either file, you may request a copy from
+## help@hdfgroup.org.
## Textually included in the beginning of every HDF5 Makefile.am
diff --git a/config/conclude.am b/config/conclude.am
index 1ba56fb..55b805a 100644
--- a/config/conclude.am
+++ b/config/conclude.am
@@ -7,28 +7,28 @@
##
## This file is part of HDF5. The full HDF5 copyright notice, including
## terms governing use, modification, and redistribution, is contained in
-## the files COPYING and Copyright.html. COPYING can be found at the root
-## of the source code distribution tree; Copyright.html can be found at the
-## root level of an installed copy of the electronic HDF5 document set and
-## is linked from the top-level documents page. It can also be found at
-## http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-## access to either file, you may request a copy from help@hdfgroup.org.
+## the COPYING file, which can be found at the root of the source code
+## distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+## If you do not have access to either file, you may request a copy from
+## help@hdfgroup.org.
## Textually included at the end of most HDF5 Makefiles.am.
## Contains build rules.
-# Automake needs to be taught how to build lib, progs, and tests targets.
+# Automake needs to be taught how to build lib, dyn, progs and tests targets.
# These will be filled in automatically for the most part (e.g.,
# lib_LIBRARIES are built for lib target), but EXTRA_LIB, EXTRA_PROG, and
# EXTRA_TEST variables are supplied to allow the user to force targets to
-# be built at certain times.
+# be built at certain times.
LIB = $(lib_LIBRARIES) $(lib_LTLIBRARIES) $(noinst_LIBRARIES) \
$(noinst_LTLIBRARIES) $(check_LIBRARIES) $(check_LTLIBRARIES) $(EXTRA_LIB)
+DYN = $(dyn_LTLIBRARIES)
PROGS = $(bin_PROGRAMS) $(bin_SCRIPTS) $(noinst_PROGRAMS) $(noinst_SCRIPTS) \
$(EXTRA_PROG)
chk_TESTS = $(check_PROGRAMS) $(check_SCRIPTS) $(EXTRA_TEST)
TESTS = $(TEST_PROG) $(TEST_SCRIPT) $(EXTRA_TEST)
+dyndir=$(libdir)
TEST_EXTENSIONS = .sh
SH_LOG_COMPILER = $(SHELL)
@@ -36,6 +36,7 @@ AM_SH_LOG_FLAGS =
# lib/progs/tests targets recurse into subdirectories. build-* targets
# build files in this directory.
+build-dyn: $(DYN)
build-lib: $(LIB)
build-progs: $(LIB) $(PROGS)
build-tests: $(LIB) $(PROGS) $(chk_TESTS)
@@ -43,7 +44,7 @@ build-tests: $(LIB) $(PROGS) $(chk_TESTS)
# General rule for recursive building targets.
# BUILT_SOURCES contain targets that need to be built before anything else
# in the directory (e.g., for Fortran type detection)
-lib progs tests check-s check-p :: $(BUILT_SOURCES)
+lib dyn progs tests check-s check-p :: $(BUILT_SOURCES)
@$(MAKE) $(AM_MAKEFLAGS) build-$@ || exit 1;
@for d in X $(SUBDIRS); do \
if test $$d != X && test $$d != .; then \
diff --git a/config/conclude_fc.am b/config/conclude_fc.am
index 6402412..15d04a7 100644
--- a/config/conclude_fc.am
+++ b/config/conclude_fc.am
@@ -7,12 +7,10 @@
##
## This file is part of HDF5. The full HDF5 copyright notice, including
## terms governing use, modification, and redistribution, is contained in
-## the files COPYING and Copyright.html. COPYING can be found at the root
-## of the source code distribution tree; Copyright.html can be found at the
-## root level of an installed copy of the electronic HDF5 document set and
-## is linked from the top-level documents page. It can also be found at
-## http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-## access to either file, you may request a copy from help@hdfgroup.org.
+## the COPYING file, which can be found at the root of the source code
+## distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+## If you do not have access to either file, you may request a copy from
+## help@hdfgroup.org.
## Textually included at the end of the Fortran HDF5 Makefiles.am.
diff --git a/config/cygwin b/config/cygwin
index 7423403..6ead871 100644
--- a/config/cygwin
+++ b/config/cygwin
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/config/examples.am b/config/examples.am
index 8597a16..247dfa8 100644
--- a/config/examples.am
+++ b/config/examples.am
@@ -7,12 +7,10 @@
##
## This file is part of HDF5. The full HDF5 copyright notice, including
## terms governing use, modification, and redistribution, is contained in
-## the files COPYING and Copyright.html. COPYING can be found at the root
-## of the source code distribution tree; Copyright.html can be found at the
-## root level of an installed copy of the electronic HDF5 document set and
-## is linked from the top-level documents page. It can also be found at
-## http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-## access to either file, you may request a copy from help@hdfgroup.org.
+## the COPYING file, which can be found at the root of the source code
+## distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+## If you do not have access to either file, you may request a copy from
+## help@hdfgroup.org.
## Textually included near the end of HDF5 Makefiles in example directories.
## Contains boilerplate for building, installing, and cleaning example
diff --git a/config/freebsd b/config/freebsd
index e42ca60..936c29f 100644
--- a/config/freebsd
+++ b/config/freebsd
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/config/gnu-cxxflags b/config/gnu-cxxflags
index 46fd048..e0f2999 100644
--- a/config/gnu-cxxflags
+++ b/config/gnu-cxxflags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/gnu-fflags b/config/gnu-fflags
index 4585735..e92e054 100644
--- a/config/gnu-fflags
+++ b/config/gnu-fflags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/gnu-flags b/config/gnu-flags
index 0b5283c..44ed480 100644
--- a/config/gnu-flags
+++ b/config/gnu-flags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
@@ -94,7 +92,7 @@ case "$cc_vendor-$cc_version" in
esac
# General
- H5_CFLAGS="$H5_CFLAGS $arch -std=c99 -pedantic -Wall -W -Wundef -Wshadow -Wpointer-arith -Wbad-function-cast -Wcast-qual -Wcast-align -Wwrite-strings -Wconversion -Waggregate-return -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -Wredundant-decls -Wnested-externs -Winline"
+ H5_CFLAGS="$H5_CFLAGS $arch -std=c99 -pedantic -Wall -W -Wundef -Wshadow -Wpointer-arith -Wbad-function-cast -Wcast-qual -Wcast-align -Wwrite-strings -Wconversion -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations -Wredundant-decls -Wnested-externs"
# Production
# NDEBUG is handled explicitly by the configure script
@@ -125,6 +123,10 @@ case "$cc_vendor-$cc_version" in
esac
#DEBUG_CFLAGS="-fsanitize=undefined"
+ # Developer warnings (suggestions from gcc, not code problems)
+ DEVELOPER_WARNING_CFLAGS="-Winline -Waggregate-return"
+ NO_DEVELOPER_WARNING_CFLAGS="-Wno-inline -Wno-aggregate-return"
+
# Symbols
NO_SYMBOLS_CFLAGS="-s"
SYMBOLS_CFLAGS="-g"
@@ -183,7 +185,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -223,13 +227,18 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wstrict-overflow=5 -Wjump-misses-init -Wunsuffixed-float-constants"
# Append more extra warning flags that only gcc 4.6+ know about
- H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wsuggest-attribute=const -Wtrampolines"
+ H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wtrampolines"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=const"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=const"
# Append more extra warning flags that only gcc 4.7+ know about
- H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=pure -Wno-suggest-attribute=noreturn"
# Append more extra warning flags that only gcc 4.8+ know about
- H5_CFLAGS="$H5_CFLAGS -Wsuggest-attribute=format"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=format"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=format"
# Append more extra warning flags that only gcc 4.9+ know about
H5_CFLAGS="$H5_CFLAGS -Wdate-time"
@@ -257,7 +266,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -297,13 +308,18 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wstrict-overflow=5 -Wjump-misses-init -Wunsuffixed-float-constants"
# Append more extra warning flags that only gcc 4.6+ know about
- H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wsuggest-attribute=const -Wtrampolines"
+ H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wtrampolines"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=const"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=const"
# Append more extra warning flags that only gcc 4.7+ know about
- H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=pure -Wno-suggest-attribute=noreturn"
# Append more extra warning flags that only gcc 4.8+ know about
- H5_CFLAGS="$H5_CFLAGS -Wsuggest-attribute=format"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=format"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=format"
# Append more extra warning flags that only gcc 4.9+ know about
H5_CFLAGS="$H5_CFLAGS -Wdate-time"
@@ -328,7 +344,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -368,13 +386,18 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wstrict-overflow=5 -Wjump-misses-init -Wunsuffixed-float-constants"
# Append more extra warning flags that only gcc 4.6+ know about
- H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wsuggest-attribute=const -Wtrampolines"
+ H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wtrampolines"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=const"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=const"
# Append more extra warning flags that only gcc 4.7+ know about
- H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=pure -Wno-suggest-attribute=noreturn"
# Append more extra warning flags that only gcc 4.8+ know about
- H5_CFLAGS="$H5_CFLAGS -Wsuggest-attribute=format"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=format"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=format"
# Append more extra warning flags that only gcc 4.9+ know about
H5_CFLAGS="$H5_CFLAGS -Wdate-time"
@@ -396,7 +419,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -436,13 +461,18 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wstrict-overflow=5 -Wjump-misses-init"
# Append more extra warning flags that only gcc 4.6+ know about
- H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wsuggest-attribute=const -Wtrampolines"
+ H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wtrampolines"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=const"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=const"
# Append more extra warning flags that only gcc 4.7+ know about
- H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=pure -Wno-suggest-attribute=noreturn"
# Append more extra warning flags that only gcc 4.8+ know about
- H5_CFLAGS="$H5_CFLAGS -Wsuggest-attribute=format"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=format"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=format"
;;
gcc-4.7*)
@@ -461,7 +491,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -502,10 +534,14 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wstrict-overflow=5 -Wjump-misses-init -Wunsuffixed-float-constants"
# Append more extra warning flags that only gcc 4.6+ know about
- H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wsuggest-attribute=const -Wtrampolines"
+ H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wtrampolines"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=const"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=const"
# Append more extra warning flags that only gcc 4.7+ know about
- H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ H5_CFLAGS="$H5_CFLAGS -Wstack-usage=8192 -Wvector-operation-performance"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=pure -Wsuggest-attribute=noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=pure -Wno-suggest-attribute=noreturn"
;;
gcc-4.6*)
@@ -520,7 +556,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -561,7 +599,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wstrict-aliasing -Wstrict-overflow=5 -Wjump-misses-init -Wunsuffixed-float-constants"
# Append more extra warning flags that only gcc 4.6+ know about
- H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wsuggest-attribute=const -Wtrampolines"
+ H5_CFLAGS="$H5_CFLAGS -Wdouble-promotion -Wtrampolines"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wsuggest-attribute=const"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-suggest-attribute=const"
;;
gcc-4.5*)
@@ -576,7 +616,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -629,7 +671,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -678,7 +722,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -724,7 +770,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -752,7 +800,7 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wstrict-overflow"
;;
- gcc-4.1.*)
+ gcc-4.1*)
# Disable warnings about using 'long long' type
H5_CFLAGS="$H5_CFLAGS -Wno-long-long"
@@ -764,7 +812,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# Enable more format checking flags, beyond the basic -Wformat included
# in -Wall
@@ -801,7 +851,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# The "format=2" warning generates too many warnings about valid
# usage in the library.
@@ -835,7 +887,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# The "format=2" warning generates too many warnings about valid
# usage in the library.
@@ -866,7 +920,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append warning flags from gcc-3.2* case
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# The "format=2" warning generates too many warnings about valid
# usage in the library.
@@ -891,7 +947,9 @@ case "$cc_vendor-$cc_version" in
H5_CFLAGS="$H5_CFLAGS -Wfloat-equal -Wmissing-format-attribute"
# Append more extra warning flags that only gcc3.2+ know about
- H5_CFLAGS="$H5_CFLAGS -Wmissing-noreturn -Wpacked -Wdisabled-optimization"
+ H5_CFLAGS="$H5_CFLAGS -Wpacked -Wdisabled-optimization"
+ DEVELOPER_WARNING_CFLAGS="$DEVELOPER_WARNING_CFLAGS -Wmissing-noreturn"
+ NO_DEVELOPER_WARNING_CFLAGS="$NO_DEVELOPER_WARNING_CFLAGS -Wno-missing-noreturn"
# The "format=2" warning generates too many warnings about valid
# usage in the library.
diff --git a/config/ibm-aix b/config/ibm-aix
index ef052fb..805ec34 100644
--- a/config/ibm-aix
+++ b/config/ibm-aix
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# Configuration file for building on the IBM AIX platforms.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/config/ibm-flags b/config/ibm-flags
index 412817c..881b81c 100644
--- a/config/ibm-flags
+++ b/config/ibm-flags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/intel-fflags b/config/intel-fflags
index db9543e..8b1110e 100644
--- a/config/intel-fflags
+++ b/config/intel-fflags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/intel-flags b/config/intel-flags
index ee0d054..dad210c 100644
--- a/config/intel-flags
+++ b/config/intel-flags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/linux-gnu b/config/linux-gnu
index 912a93b..243b087 100644
--- a/config/linux-gnu
+++ b/config/linux-gnu
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This is the same as linux-gnulibc1
diff --git a/config/linux-gnuaout b/config/linux-gnuaout
index 912a93b..243b087 100644
--- a/config/linux-gnuaout
+++ b/config/linux-gnuaout
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This is the same as linux-gnulibc1
diff --git a/config/linux-gnulibc1 b/config/linux-gnulibc1
index 1b4785e..1785e0e 100644
--- a/config/linux-gnulibc1
+++ b/config/linux-gnulibc1
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/config/linux-gnulibc2 b/config/linux-gnulibc2
index 76526b8..01a0d20 100644
--- a/config/linux-gnulibc2
+++ b/config/linux-gnulibc2
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/config/lt_vers.am b/config/lt_vers.am
index eb630a7..fcc2a40 100644
--- a/config/lt_vers.am
+++ b/config/lt_vers.am
@@ -7,12 +7,10 @@
##
## This file is part of HDF5. The full HDF5 copyright notice, including
## terms governing use, modification, and redistribution, is contained in
-## the files COPYING and Copyright.html. COPYING can be found at the root
-## of the source code distribution tree; Copyright.html can be found at the
-## root level of an installed copy of the electronic HDF5 document set and
-## is linked from the top-level documents page. It can also be found at
-## http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-## access to either file, you may request a copy from help@hdfgroup.org.
+## the COPYING file, which can be found at the root of the source code
+## distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+## If you do not have access to either file, you may request a copy from
+## help@hdfgroup.org.
##
# Add libtool shared library version numbers to the HDF5 library
# See libtool versioning documentation online.
@@ -69,19 +67,3 @@ LT_TOOLS_VERS_INTERFACE = 1000
LT_TOOLS_VERS_REVISION = 0
LT_TOOLS_VERS_AGE = 0
-
-# Copyright by The HDF Group.
-# Copyright by the Board of Trustees of the University of Illinois.
-# All rights reserved.
-#
-# This file is part of HDF5. The full HDF5 copyright notice, including
-# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
-
-
-
diff --git a/config/pgi-fflags b/config/pgi-fflags
index 08dfe6e..46f861d 100644
--- a/config/pgi-fflags
+++ b/config/pgi-fflags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/pgi-flags b/config/pgi-flags
index f6878e6..52828c1 100644
--- a/config/pgi-flags
+++ b/config/pgi-flags
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file should be sourced into configure if the compiler is the
diff --git a/config/site-specific/BlankForm b/config/site-specific/BlankForm
index c31383c..03d421c 100644
--- a/config/site-specific/BlankForm
+++ b/config/site-specific/BlankForm
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/config/solaris b/config/solaris
index 72d7423..656fee1 100644
--- a/config/solaris
+++ b/config/solaris
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
# This file is part of the HDF5 build script. It is processed shortly
diff --git a/configure.ac b/configure.ac
index f4c5855..9695fe3 100644
--- a/configure.ac
+++ b/configure.ac
@@ -6,12 +6,10 @@
##
## This file is part of HDF5. The full HDF5 copyright notice, including
## terms governing use, modification, and redistribution, is contained in
-## the files COPYING and Copyright.html. COPYING can be found at the root
-## of the source code distribution tree; Copyright.html can be found at the
-## root level of an installed copy of the electronic HDF5 document set and
-## is linked from the top-level documents page. It can also be found at
-## http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-## access to either file, you may request a copy from help@hdfgroup.org.
+## the COPYING file, which can be found at the root of the source code
+## distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+## If you do not have access to either file, you may request a copy from
+## help@hdfgroup.org.
## ----------------------------------------------------------------------
## Initialize configure.
@@ -26,7 +24,7 @@ AC_PREREQ([2.69])
## NOTE: Do not forget to change the version number here when we do a
## release!!!
##
-AC_INIT([HDF5], [1.9.236], [help@hdfgroup.org])
+AC_INIT([HDF5], [1.11.0], [help@hdfgroup.org])
AC_CONFIG_SRCDIR([src/H5.c])
AC_CONFIG_HEADERS([src/H5config.h])
@@ -37,7 +35,7 @@ AC_CONFIG_MACRO_DIR([m4])
## AM_INIT_AUTOMAKE takes a list of options that should be applied to
## every Makefile.am when automake is run.
AM_INIT_AUTOMAKE([foreign subdir-objects])
-AM_SILENT_RULES([yes])
+m4_ifdef([AM_SILENT_RULES], [AM_SILENT_RULES([yes])]) # use silent rules where available - automake 1.11
## AM_MAINTAINER_MODE turns off "rebuild rules" that contain dependencies
## for Makefiles, configure, src/H5config.h, etc. If AM_MAINTAINER_MODE
@@ -920,14 +918,13 @@ H5_FORTRAN_SHARED="no"
if test "X${HDF_FORTRAN}" = "Xyes" && test "X${enable_shared}" != "Xno"; then
AC_MSG_CHECKING([if shared Fortran libraries are supported])
H5_FORTRAN_SHARED="yes"
-
- ## Disable fortran shared libraries on Mac. (MAM - 03/30/11)
-
+ ## tell libtool to do the right thing with COMMON symbols, this fixes
+ ## corrupt values with COMMON and EQUIVALENCE when building shared
+ ## Fortran libraries on OSX with gnu and Intel compilers (HDFFV-2772).
case "`uname`" in
Darwin*)
- H5_FORTRAN_SHARED="no"
- CHECK_WARN="Shared Fortran libraries not currently supported on Mac."
- ;;
+ H5_LDFLAGS="$H5_LDFLAGS -Wl,-commons,use_dylibs"
+ ;;
esac
## Report results of check(s)
@@ -1851,6 +1848,7 @@ AC_CHECK_FUNCS([frexpl gethostname getrusage gettimeofday])
AC_CHECK_FUNCS([lstat rand_r random setsysinfo])
AC_CHECK_FUNCS([signal longjmp setjmp siglongjmp sigsetjmp sigprocmask])
AC_CHECK_FUNCS([snprintf srandom strdup symlink system])
+AC_CHECK_FUNCS([strtoll strtoull])
AC_CHECK_FUNCS([tmpfile asprintf vasprintf vsnprintf waitpid])
AC_CHECK_FUNCS([roundf lroundf llroundf round lround llround])
@@ -2055,6 +2053,48 @@ case "X-$ASSERTS" in
esac
## ----------------------------------------------------------------------
+## Check if developer warnings should be turned on
+## These are warnings that provide suggestions like gcc's -Wsuggest-attribute.
+## They do not indicate code problems.
+##
+## Note that developers don't need to build with these regularly. They
+## are just handy to check once in a while (before releases, etc.).
+##
+AC_MSG_CHECKING([enable developer warnings])
+AC_ARG_ENABLE([developer-warnings],
+ [AS_HELP_STRING([--enable-developer-warnings],
+ [Determines whether developer warnings will be
+ emitted. These are usually performance suggestions
+ (e.g. -Wsuggest-attribute) and do not flag poor code
+ quality.
+ [default=no]
+ ])],
+ [DEV_WARNINGS=$enableval])
+
+## Set default
+if test "X-$DEV_WARNINGS" = X- ; then
+ DEV_WARNINGS=no
+fi
+
+## Allow this variable to be substituted in
+## other files (src/libhdf5.settings.in, etc.)
+AC_SUBST([DEV_WARNINGS])
+
+case "X-$DEV_WARNINGS" in
+ X-yes)
+ H5_CFLAGS="$H5_CFLAGS $DEVELOPER_WARNING_CFLAGS"
+ AC_MSG_RESULT([yes])
+ ;;
+ X-no)
+ H5_CFLAGS="$H5_CFLAGS $NO_DEVELOPER_WARNING_CFLAGS"
+ AC_MSG_RESULT([no])
+ ;;
+ *)
+ AC_MSG_ERROR([Unrecognized value: $DEV_WARNINGS])
+ ;;
+esac
+
+## ----------------------------------------------------------------------
## Check if the compiler should use profiling flags/settings
##
AC_MSG_CHECKING([profiling])
@@ -3317,10 +3357,14 @@ AC_CONFIG_FILES([src/libhdf5.settings
test/Makefile
test/testcheck_version.sh
test/testerror.sh
+ test/testflushrefresh.sh
test/H5srcdir_str.h
test/testlibinfo.sh
test/testlinks_env.sh
+ test/testswmr.sh
test/test_plugin.sh
+ test/test_usecases.sh
+ test/testvdsswmr.sh
testpar/Makefile
tools/Makefile
tools/lib/Makefile
@@ -3337,16 +3381,19 @@ AC_CONFIG_FILES([src/libhdf5.settings
tools/src/h5stat/Makefile
tools/test/Makefile
tools/test/h5dump/Makefile
+ tools/test/h5dump/h5dump_plugin.sh
tools/test/h5dump/testh5dump.sh
tools/test/h5dump/testh5dumppbits.sh
tools/test/h5dump/testh5dumpvds.sh
tools/test/h5dump/testh5dumpxml.sh
tools/test/h5ls/Makefile
+ tools/test/h5ls/h5ls_plugin.sh
tools/test/h5ls/testh5ls.sh
tools/test/h5ls/testh5lsvds.sh
tools/test/h5import/Makefile
tools/test/h5import/h5importtestutil.sh
tools/test/h5diff/Makefile
+ tools/test/h5diff/h5diff_plugin.sh
tools/test/h5diff/testh5diff.sh
tools/test/h5diff/testph5diff.sh
tools/src/h5format_convert/Makefile
@@ -3360,7 +3407,7 @@ AC_CONFIG_FILES([src/libhdf5.settings
tools/test/h5copy/Makefile
tools/test/h5copy/testh5copy.sh
tools/test/misc/Makefile
- tools/test/misc/testh5clear.sh
+ tools/test/misc/testh5clear.sh
tools/test/misc/testh5mkgrp.sh
tools/test/misc/testh5repart.sh
tools/test/misc/vds/Makefile
@@ -3408,7 +3455,7 @@ AC_CONFIG_FILES([src/libhdf5.settings
hl/tools/Makefile
hl/tools/gif2h5/Makefile
hl/tools/gif2h5/h52giftest.sh
- hl/tools/h5watch/Makefile
+ hl/tools/h5watch/Makefile
hl/tools/h5watch/testh5watch.sh
hl/examples/Makefile
hl/examples/run-hlc-ex.sh
@@ -3436,6 +3483,14 @@ chmod 755 tools/src/misc/h5cc
if test "X$HDF_FORTRAN" = "Xyes"; then
chmod 755 fortran/src/h5fc
+ ## libtool does not pass the correct argument linker (wl=) for the Intel Fortran compiler
+ ## on OS X, which is needed when building shared libraries on OS X. This script
+ ## replaces the 3rd occurrence, which is for Fortran, of wl="" with wl="-Wl," (HDFFV-2772)
+ case "`uname`" in
+ Darwin*)
+ cat libtool | awk '/wl=\"/{c++;if(c==3){sub("wl=\"\"","wl=\"-Wl,\"");c=0}}1' > libtool.tmp && mv -f libtool.tmp libtool && chmod 755 libtool
+ ;;
+ esac
fi
if test "X$HDF_CXX" = "Xyes"; then
diff --git a/examples/CMakeLists.txt b/examples/CMakeLists.txt
index 8849ce7..d8eb1a2 100644
--- a/examples/CMakeLists.txt
+++ b/examples/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_EXAMPLES)
#-----------------------------------------------------------------------------
@@ -56,8 +56,8 @@ foreach (example ${examples})
TARGET_C_PROPERTIES (${example}-shared SHARED " " " ")
target_link_libraries (${example}-shared ${HDF5_LIBSH_TARGET})
set_target_properties (${example}-shared PROPERTIES FOLDER examples)
- endif (BUILD_SHARED_LIBS)
-endforeach (example ${examples})
+ endif ()
+endforeach ()
if (H5_HAVE_PARALLEL)
add_executable (ph5example ${HDF5_EXAMPLES_SOURCE_DIR}/ph5example.c)
@@ -71,9 +71,9 @@ if (H5_HAVE_PARALLEL)
TARGET_C_PROPERTIES (ph5example-shared SHARED " " " ")
target_link_libraries (ph5example-shared ${HDF5_LIBSH_TARGET})
set_target_properties (ph5example-shared PROPERTIES FOLDER examples)
- endif (BUILD_SHARED_LIBS)
-endif (H5_HAVE_PARALLEL)
+ endif ()
+endif ()
if (BUILD_TESTING)
include (CMakeTests.cmake)
-endif (BUILD_TESTING)
+endif ()
diff --git a/examples/CMakeTests.cmake b/examples/CMakeTests.cmake
index 4a4728e..dd4766a 100644
--- a/examples/CMakeTests.cmake
+++ b/examples/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -8,19 +19,19 @@
if (BUILD_SHARED_LIBS)
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5EX-shared")
file (MAKE_DIRECTORY ${PROJECT_BINARY_DIR}/H5EX-shared/red ${PROJECT_BINARY_DIR}/H5EX-shared/blue ${PROJECT_BINARY_DIR}/H5EX-shared/u2w)
- endif (BUILD_SHARED_LIBS)
+ endif ()
# Remove any output file left over from previous test run
add_test (
NAME EXAMPLES-clear-objects
COMMAND ${CMAKE_COMMAND}
- -E remove
+ -E remove
Attributes.h5
btrees_file.h5
cmprss.h5
default_file.h5
dset.h5
- extend.h5
+ extend.h5
extlink_prefix_source.h5
extlink_source.h5
extlink_target.h5
@@ -62,29 +73,42 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (EXAMPLES-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "EXAMPLES-clear-objects")
foreach (example ${examples})
- add_test (NAME EXAMPLES-${example} COMMAND $<TARGET_FILE:${example}>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME EXAMPLES-${example} COMMAND $<TARGET_FILE:${example}>)
+ else ()
+ add_test (NAME EXAMPLES-${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:${example}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=${example}.txt"
+ #-D "TEST_REFERENCE=${example}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (EXAMPLES-${example} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "EXAMPLES-${example}")
- endforeach (example ${examples})
+ endforeach ()
if (BUILD_SHARED_LIBS)
# Remove any output file left over from previous test run
add_test (
NAME EXAMPLES-shared-clear-objects
COMMAND ${CMAKE_COMMAND}
- -E remove
+ -E remove
Attributes.h5
btrees_file.h5
cmprss.h5
default_file.h5
dset.h5
- extend.h5
+ extend.h5
extlink_prefix_source.h5
extlink_source.h5
extlink_target.h5
@@ -128,32 +152,71 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (EXAMPLES-shared-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "EXAMPLES-shared-clear-objects")
foreach (example ${examples})
- add_test (NAME EXAMPLES-shared-${example} COMMAND $<TARGET_FILE:${example}-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME EXAMPLES-shared-${example} COMMAND $<TARGET_FILE:${example}-shared>)
+ else ()
+ add_test (NAME EXAMPLES-shared-${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:${example}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=${example}.txt"
+ #-D "TEST_REFERENCE=${example}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/H5EX-shared"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (EXAMPLES-shared-${example} PROPERTIES WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/H5EX-shared)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (EXAMPLES-shared-${example} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "EXAMPLES-shared-${example}")
- endforeach (example ${examples})
- endif (BUILD_SHARED_LIBS)
+ endforeach ()
+ endif ()
### Windows pops up a modal permission dialog on this test
if (H5_HAVE_PARALLEL AND NOT WIN32)
- add_test (NAME EXAMPLES-ph5example COMMAND $<TARGET_FILE:ph5example>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME EXAMPLES-ph5example COMMAND $<TARGET_FILE:ph5example>)
+ else ()
+ add_test (NAME EXAMPLES-ph5example COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:ph5example>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=ph5example.txt"
+ #-D "TEST_REFERENCE=ph5example.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (EXAMPLES-ph5example PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "EXAMPLES-ph5example")
if (BUILD_SHARED_LIBS)
- add_test (NAME EXAMPLES-shared-ph5example COMMAND $<TARGET_FILE:ph5example-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME EXAMPLES-shared-ph5example COMMAND $<TARGET_FILE:ph5example-shared>)
+ else ()
+ add_test (NAME EXAMPLES-shared-ph5example COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:ph5example-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=ph5example-shared.txt"
+ #-D "TEST_REFERENCE=ph5example-shared.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/H5EX-shared"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (EXAMPLES-shared-ph5example PROPERTIES WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/H5EX-shared)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (EXAMPLES-shared-ph5example PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "EXAMPLES-shared-ph5example")
- endif (BUILD_SHARED_LIBS)
- endif (H5_HAVE_PARALLEL AND NOT WIN32)
+ endif ()
+ endif ()
diff --git a/examples/Makefile.am b/examples/Makefile.am
index 883b99d..8c6540f 100644
--- a/examples/Makefile.am
+++ b/examples/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/examples/h5_attribute.c b/examples/h5_attribute.c
index 0ea0153..335f9c2 100644
--- a/examples/h5_attribute.c
+++ b/examples/h5_attribute.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_chunk_read.c b/examples/h5_chunk_read.c
index 98b0bb4..c3455f4 100644
--- a/examples/h5_chunk_read.c
+++ b/examples/h5_chunk_read.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_cmprss.c b/examples/h5_cmprss.c
index 8d365a3..ebc7712 100644
--- a/examples/h5_cmprss.c
+++ b/examples/h5_cmprss.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_compound.c b/examples/h5_compound.c
index 3fca2a5..b3b3a4a 100644
--- a/examples/h5_compound.c
+++ b/examples/h5_compound.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_crtatt.c b/examples/h5_crtatt.c
index 5e1378c..ade17ba 100644
--- a/examples/h5_crtatt.c
+++ b/examples/h5_crtatt.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_crtdat.c b/examples/h5_crtdat.c
index f9327e7..4a876d2 100644
--- a/examples/h5_crtdat.c
+++ b/examples/h5_crtdat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_crtgrp.c b/examples/h5_crtgrp.c
index a626ed8..89bddce 100644
--- a/examples/h5_crtgrp.c
+++ b/examples/h5_crtgrp.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_crtgrpar.c b/examples/h5_crtgrpar.c
index e8cf7c3..6f8c6e8 100644
--- a/examples/h5_crtgrpar.c
+++ b/examples/h5_crtgrpar.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_crtgrpd.c b/examples/h5_crtgrpd.c
index d6a320b..35c4389 100644
--- a/examples/h5_crtgrpd.c
+++ b/examples/h5_crtgrpd.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_drivers.c b/examples/h5_drivers.c
index 7245794..43c1fc7 100644
--- a/examples/h5_drivers.c
+++ b/examples/h5_drivers.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_dtransform.c b/examples/h5_dtransform.c
index 71ec10a..0b718ad 100644
--- a/examples/h5_dtransform.c
+++ b/examples/h5_dtransform.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_elink_unix2win.c b/examples/h5_elink_unix2win.c
index 9c0918c..df52015 100644
--- a/examples/h5_elink_unix2win.c
+++ b/examples/h5_elink_unix2win.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This program demonstrates how to translate an external link created on
diff --git a/examples/h5_extend.c b/examples/h5_extend.c
index 105e553..6e3cff2 100644
--- a/examples/h5_extend.c
+++ b/examples/h5_extend.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_extend_write.c b/examples/h5_extend_write.c
index 56bd025..f3f6077 100644
--- a/examples/h5_extend_write.c
+++ b/examples/h5_extend_write.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_extlink.c b/examples/h5_extlink.c
index e8a24b8..ba632f5 100644
--- a/examples/h5_extlink.c
+++ b/examples/h5_extlink.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This program demonstrates how to create and use "external links" in
diff --git a/examples/h5_group.c b/examples/h5_group.c
index 6b73210..8e89165 100644
--- a/examples/h5_group.c
+++ b/examples/h5_group.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_interm_group.c b/examples/h5_interm_group.c
index bd9c7c4..6507fd1 100644
--- a/examples/h5_interm_group.c
+++ b/examples/h5_interm_group.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_mount.c b/examples/h5_mount.c
index 6da71a1..a2e16c5 100644
--- a/examples/h5_mount.c
+++ b/examples/h5_mount.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_rdwt.c b/examples/h5_rdwt.c
index 0e290ca..6cd7f0f 100644
--- a/examples/h5_rdwt.c
+++ b/examples/h5_rdwt.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_read.c b/examples/h5_read.c
index 6fe75be..7fd8ad4 100644
--- a/examples/h5_read.c
+++ b/examples/h5_read.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_ref2reg.c b/examples/h5_ref2reg.c
index c4e8d3d..dc2964c 100644
--- a/examples/h5_ref2reg.c
+++ b/examples/h5_ref2reg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
This program shows how to create, store and dereference references
diff --git a/examples/h5_reference.c b/examples/h5_reference.c
index 38e6146..32a5f59 100644
--- a/examples/h5_reference.c
+++ b/examples/h5_reference.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_select.c b/examples/h5_select.c
index ceb9c2c..bbc877c 100644
--- a/examples/h5_select.c
+++ b/examples/h5_select.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_shared_mesg.c b/examples/h5_shared_mesg.c
index 0c7f2f0..4e1f92a 100644
--- a/examples/h5_shared_mesg.c
+++ b/examples/h5_shared_mesg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_subset.c b/examples/h5_subset.c
index 66872ea..904d3f8 100644
--- a/examples/h5_subset.c
+++ b/examples/h5_subset.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/h5_vds-eiger.c b/examples/h5_vds-eiger.c
index ea22243..13b5d93 100644
--- a/examples/h5_vds-eiger.c
+++ b/examples/h5_vds-eiger.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
This example illustrates the concept of the virtual dataset.
diff --git a/examples/h5_vds-exc.c b/examples/h5_vds-exc.c
index 039cdb8..aaf1fa8 100644
--- a/examples/h5_vds-exc.c
+++ b/examples/h5_vds-exc.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
This example illustrates the concept of the virtual dataset.
diff --git a/examples/h5_vds-exclim.c b/examples/h5_vds-exclim.c
index 4933471..fea096a 100644
--- a/examples/h5_vds-exclim.c
+++ b/examples/h5_vds-exclim.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
This example illustrates the concept of the virtual dataset.
diff --git a/examples/h5_vds-percival-unlim-maxmin.c b/examples/h5_vds-percival-unlim-maxmin.c
index a6eecfb..0273bbc 100644
--- a/examples/h5_vds-percival-unlim-maxmin.c
+++ b/examples/h5_vds-percival-unlim-maxmin.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
This example illustrates the concept of the virtual dataset.
diff --git a/examples/h5_vds-percival-unlim.c b/examples/h5_vds-percival-unlim.c
index 2496c37..f6a5f50 100644
--- a/examples/h5_vds-percival-unlim.c
+++ b/examples/h5_vds-percival-unlim.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
This example illustrates the concept of the virtual dataset.
diff --git a/examples/h5_vds-percival.c b/examples/h5_vds-percival.c
index 757bb69..11a974b 100644
--- a/examples/h5_vds-percival.c
+++ b/examples/h5_vds-percival.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
This example illustrates the concept of the virtual dataset.
diff --git a/examples/h5_vds-simpleIO.c b/examples/h5_vds-simpleIO.c
index 6b12dc2..56fa72b 100644
--- a/examples/h5_vds-simpleIO.c
+++ b/examples/h5_vds-simpleIO.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
This example illustrates the concept of virtual dataset I/O
diff --git a/examples/h5_vds.c b/examples/h5_vds.c
index 1e502c6..76b849a 100644
--- a/examples/h5_vds.c
+++ b/examples/h5_vds.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/examples/h5_write.c b/examples/h5_write.c
index 93d40ea..1a7cfe7 100644
--- a/examples/h5_write.c
+++ b/examples/h5_write.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/ph5example.c b/examples/ph5example.c
index 7a41db2..d718479 100644
--- a/examples/ph5example.c
+++ b/examples/ph5example.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/examples/run-all-ex.sh b/examples/run-all-ex.sh
index 4ff2c55..878e0f8 100755
--- a/examples/run-all-ex.sh
+++ b/examples/run-all-ex.sh
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file: run-hl-ex.sh
diff --git a/examples/run-c-ex.sh.in b/examples/run-c-ex.sh.in
index 1661344..4d5d594 100644
--- a/examples/run-c-ex.sh.in
+++ b/examples/run-c-ex.sh.in
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file: run-c-ex.sh
diff --git a/examples/testh5cc.sh.in b/examples/testh5cc.sh.in
index d3f1cfc..800d4d4 100644
--- a/examples/testh5cc.sh.in
+++ b/examples/testh5cc.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5cc compiler tool
# Created: Albert Cheng, 2007/4/11
diff --git a/fortran/CMakeLists.txt b/fortran/CMakeLists.txt
index c725047..f7179cf 100644
--- a/fortran/CMakeLists.txt
+++ b/fortran/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_F90 C CXX Fortran)
if (H5_HAVE_PARALLEL)
@@ -6,8 +6,8 @@ if (H5_HAVE_PARALLEL)
set (LINK_LIBS ${LINK_LIBS} ${MPI_Fortran_LIBRARIES})
if (MPI_Fortran_LINK_FLAGS)
set (CMAKE_EXE_LINKER_FLAGS "${MPI_Fortran_LINK_FLAGS} ${CMAKE_EXE_LINKER_FLAGS}")
- endif (MPI_Fortran_LINK_FLAGS)
-endif (H5_HAVE_PARALLEL)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Traverse source subdirectory
@@ -19,7 +19,7 @@ add_subdirectory (${HDF5_F90_SOURCE_DIR}/src ${HDF5_F90_BINARY_DIR}/src)
#-----------------------------------------------------------------------------
if (HDF5_BUILD_EXAMPLES)
add_subdirectory (${HDF5_F90_SOURCE_DIR}/examples ${HDF5_F90_BINARY_DIR}/examples)
-endif (HDF5_BUILD_EXAMPLES)
+endif ()
#-----------------------------------------------------------------------------
# Testing
@@ -28,5 +28,5 @@ if (BUILD_TESTING)
add_subdirectory (${HDF5_F90_SOURCE_DIR}/test ${HDF5_F90_BINARY_DIR}/test)
if (MPI_Fortran_FOUND)
add_subdirectory (${HDF5_F90_SOURCE_DIR}/testpar ${HDF5_F90_BINARY_DIR}/testpar)
- endif (MPI_Fortran_FOUND)
-endif (BUILD_TESTING)
+ endif ()
+endif ()
diff --git a/fortran/COPYING b/fortran/COPYING
index 6903daf..6497ace 100644
--- a/fortran/COPYING
+++ b/fortran/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/fortran/Makefile.am b/fortran/Makefile.am
index 9ddd6dd..38084b9 100644
--- a/fortran/Makefile.am
+++ b/fortran/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This makefile mostly just reinvokes make in the various subdirectories
# but does so in the correct order. You can alternatively invoke make from
diff --git a/fortran/examples/CMakeLists.txt b/fortran/examples/CMakeLists.txt
index aad5f33..a5c3422 100644
--- a/fortran/examples/CMakeLists.txt
+++ b/fortran/examples/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_F90_EXAMPLES C CXX Fortran)
# --------------------------------------------------------------------
# Notes: When creating examples they should be prefixed
@@ -66,8 +66,8 @@ foreach (example ${examples})
FOLDER examples/fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
-endforeach (example ${examples})
+ endif ()
+endforeach ()
foreach (example ${F2003_examples})
add_executable (f03_ex_${example} ${HDF5_F90_EXAMPLES_SOURCE_DIR}/${example}.f90)
@@ -97,8 +97,8 @@ foreach (example ${F2003_examples})
FOLDER examples/fortran03
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
-endforeach (example ${F2003_examples})
+ endif ()
+endforeach ()
if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
add_executable (f90_ex_ph5example ${HDF5_F90_EXAMPLES_SOURCE_DIR}/ph5example.f90)
@@ -130,9 +130,9 @@ if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
FOLDER examples/fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
-endif (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
+ endif ()
+endif ()
if (BUILD_TESTING)
include (CMakeTests.cmake)
-endif (BUILD_TESTING)
+endif ()
diff --git a/fortran/examples/CMakeTests.cmake b/fortran/examples/CMakeTests.cmake
index 34230c8..3403571 100644
--- a/fortran/examples/CMakeTests.cmake
+++ b/fortran/examples/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -28,7 +39,7 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (f90_ex-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "f90_ex-clear-objects")
if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
add_test (
@@ -53,45 +64,97 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (f90_ex-shared-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "f90_ex-shared-clear-objects")
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+ endif ()
foreach (example ${examples})
- add_test (NAME f90_ex_${example} COMMAND $<TARGET_FILE:f90_ex_${example}>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME f90_ex_${example} COMMAND $<TARGET_FILE:f90_ex_${example}>)
+ else ()
+ add_test (NAME f90_ex_${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:f90_ex_${example}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=f90_ex_${example}.txt"
+ #-D "TEST_REFERENCE=f90_ex_${example}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (f90_ex_${example} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "f90_ex_${example}")
if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
- add_test (NAME f90_ex-shared_${example} COMMAND $<TARGET_FILE:f90_ex_${example}-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME f90_ex-shared_${example} COMMAND $<TARGET_FILE:f90_ex_${example}-shared>)
+ else ()
+ add_test (NAME f90_ex-shared_${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:f90_ex_${example}-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=f90_ex_${example}-shared.txt"
+ #-D "TEST_REFERENCE=f90_ex_${example}-shared.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (f90_ex-shared_${example} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "f90_ex-shared_${example}")
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
-endforeach (example ${examples})
+ endif ()
+endforeach ()
if (HDF5_ENABLE_F2003)
foreach (example ${F2003_examples})
- add_test (NAME f03_ex_${example} COMMAND $<TARGET_FILE:f03_ex_${example}>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME f03_ex_${example} COMMAND $<TARGET_FILE:f03_ex_${example}>)
+ else ()
+ add_test (NAME f03_ex_${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:f03_ex_${example}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=f03_ex_${example}.txt"
+ #-D "TEST_REFERENCE=f03_ex_${example}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (f03_ex_${example} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "f03_ex_${example}")
if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
- add_test (NAME f03_ex-shared_${example} COMMAND $<TARGET_FILE:f03_ex_${example}-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME f03_ex-shared_${example} COMMAND $<TARGET_FILE:f03_ex_${example}-shared>)
+ else ()
+ add_test (NAME f03_ex-shared_${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:f03_ex_${example}-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=f03_ex_${example}-shared.txt"
+ #-D "TEST_REFERENCE=f03_ex_${example}-shared.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (f03_ex-shared_${example} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "f03_ex-shared_${example}")
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
- endforeach (example ${F2003_examples})
-endif (HDF5_ENABLE_F2003)
+ endif ()
+ endforeach ()
+endif ()
if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
add_test (NAME f90_ex_ph5example COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:f90_ex_ph5example>)
if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
add_test (NAME f90_ex-shared_ph5example COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:f90_ex_ph5example>)
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
-endif (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
+ endif ()
+endif ()
diff --git a/fortran/examples/Makefile.am b/fortran/examples/Makefile.am
index 5a3b09e..db85c09 100644
--- a/fortran/examples/Makefile.am
+++ b/fortran/examples/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/fortran/examples/compound.f90 b/fortran/examples/compound.f90
index 2005f41..f5e91d6 100644
--- a/fortran/examples/compound.f90
+++ b/fortran/examples/compound.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/compound_complex_fortran2003.f90 b/fortran/examples/compound_complex_fortran2003.f90
index 19671f7..6d0f291 100644
--- a/fortran/examples/compound_complex_fortran2003.f90
+++ b/fortran/examples/compound_complex_fortran2003.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! This example shows how to create an array of a compound datatype which
diff --git a/fortran/examples/compound_fortran2003.f90 b/fortran/examples/compound_fortran2003.f90
index a55d1a6..0168177 100644
--- a/fortran/examples/compound_fortran2003.f90
+++ b/fortran/examples/compound_fortran2003.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! This example shows how to create a compound data type,
diff --git a/fortran/examples/h5_cmprss.f90 b/fortran/examples/h5_cmprss.f90
index 9ab28f7..61dd7b0 100644
--- a/fortran/examples/h5_cmprss.f90
+++ b/fortran/examples/h5_cmprss.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! This example illustrates how to create a compressed dataset.
diff --git a/fortran/examples/h5_crtatt.f90 b/fortran/examples/h5_crtatt.f90
index 79bc576..7e287c6 100644
--- a/fortran/examples/h5_crtatt.f90
+++ b/fortran/examples/h5_crtatt.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! This example shows how to create and write a dataset attribute.
diff --git a/fortran/examples/h5_crtdat.f90 b/fortran/examples/h5_crtdat.f90
index 6e4c3a4..dce4408 100644
--- a/fortran/examples/h5_crtdat.f90
+++ b/fortran/examples/h5_crtdat.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/h5_crtgrp.f90 b/fortran/examples/h5_crtgrp.f90
index 278d175..12f07e7 100644
--- a/fortran/examples/h5_crtgrp.f90
+++ b/fortran/examples/h5_crtgrp.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/h5_crtgrpar.f90 b/fortran/examples/h5_crtgrpar.f90
index 4ef008a..341b648 100644
--- a/fortran/examples/h5_crtgrpar.f90
+++ b/fortran/examples/h5_crtgrpar.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/h5_crtgrpd.f90 b/fortran/examples/h5_crtgrpd.f90
index d35f03d..41c1f53 100644
--- a/fortran/examples/h5_crtgrpd.f90
+++ b/fortran/examples/h5_crtgrpd.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/h5_extend.f90 b/fortran/examples/h5_extend.f90
index 315d84f..20b91ff 100644
--- a/fortran/examples/h5_extend.f90
+++ b/fortran/examples/h5_extend.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! This example extends an HDF5 dataset. It is used in the HDF5 Tutorial.
diff --git a/fortran/examples/h5_rdwt.f90 b/fortran/examples/h5_rdwt.f90
index ba05b2f..2fbd85d 100644
--- a/fortran/examples/h5_rdwt.f90
+++ b/fortran/examples/h5_rdwt.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/h5_subset.f90 b/fortran/examples/h5_subset.f90
index 6cb8f7a..ab33c86 100644
--- a/fortran/examples/h5_subset.f90
+++ b/fortran/examples/h5_subset.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! This example shows how to write and read a hyperslab.
diff --git a/fortran/examples/hyperslab.f90 b/fortran/examples/hyperslab.f90
index 7823ff6..d340161 100644
--- a/fortran/examples/hyperslab.f90
+++ b/fortran/examples/hyperslab.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/mountexample.f90 b/fortran/examples/mountexample.f90
index 5bdec2a..4a2821d 100644
--- a/fortran/examples/mountexample.f90
+++ b/fortran/examples/mountexample.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/nested_derived_type.f90 b/fortran/examples/nested_derived_type.f90
index 65e7e75..68354ac 100644
--- a/fortran/examples/nested_derived_type.f90
+++ b/fortran/examples/nested_derived_type.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! This example shows how to create a nested compound data type,
diff --git a/fortran/examples/ph5example.f90 b/fortran/examples/ph5example.f90
index a0db200..9d4281e 100644
--- a/fortran/examples/ph5example.f90
+++ b/fortran/examples/ph5example.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! Fortran parallel example. Copied from Tutorial's example program of
diff --git a/fortran/examples/refobjexample.f90 b/fortran/examples/refobjexample.f90
index c8622a7..d017598 100644
--- a/fortran/examples/refobjexample.f90
+++ b/fortran/examples/refobjexample.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/refregexample.f90 b/fortran/examples/refregexample.f90
index 68fbd24..ab45598 100644
--- a/fortran/examples/refregexample.f90
+++ b/fortran/examples/refregexample.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/run-fortran-ex.sh.in b/fortran/examples/run-fortran-ex.sh.in
index a4d4550..cace1ae 100644
--- a/fortran/examples/run-fortran-ex.sh.in
+++ b/fortran/examples/run-fortran-ex.sh.in
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file: run-hlfortran-ex.sh
diff --git a/fortran/examples/rwdset_fortran2003.f90 b/fortran/examples/rwdset_fortran2003.f90
index 682676f..36ebf0d 100644
--- a/fortran/examples/rwdset_fortran2003.f90
+++ b/fortran/examples/rwdset_fortran2003.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/selectele.f90 b/fortran/examples/selectele.f90
index dcd2379..4281ea6 100644
--- a/fortran/examples/selectele.f90
+++ b/fortran/examples/selectele.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/examples/testh5fc.sh.in b/fortran/examples/testh5fc.sh.in
index cd3b86e..f384909 100644
--- a/fortran/examples/testh5fc.sh.in
+++ b/fortran/examples/testh5fc.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5fc compiler tool
# Created: Albert Cheng, 2007/3/14
diff --git a/fortran/src/CMakeLists.txt b/fortran/src/CMakeLists.txt
index a74459f..6d51feb 100644
--- a/fortran/src/CMakeLists.txt
+++ b/fortran/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_F90_SRC C CXX Fortran)
#-----------------------------------------------------------------------------
@@ -8,16 +8,16 @@ if (WIN32)
if (MSVC)
if (NOT H5_HAVE_PARALLEL)
set (H5_NOPAREXP ";")
- endif (NOT H5_HAVE_PARALLEL)
+ endif ()
if (NOT HDF5_ENABLE_F2003)
set (H5_NOF03EXP ";")
- else (NOT HDF5_ENABLE_F2003)
+ else ()
set (H5_F03EXP ";")
- endif (NOT HDF5_ENABLE_F2003)
+ endif ()
configure_file (${HDF5_F90_SRC_SOURCE_DIR}/hdf5_fortrandll.def.in ${HDF5_F90_SRC_BINARY_DIR}/hdf5_fortrandll.def @ONLY)
- endif (MSVC)
- endif (BUILD_SHARED_LIBS)
-endif (WIN32)
+ endif ()
+ endif ()
+endif ()
# configure for Fortran preprocessor
@@ -25,12 +25,12 @@ endif (WIN32)
set (CMAKE_H5_HAVE_PARALLEL 0)
if (H5_HAVE_PARALLEL)
set (CMAKE_H5_HAVE_PARALLEL 1)
-endif (H5_HAVE_PARALLEL)
+endif ()
set (CMAKE_H5_HAVE_FLOAT128 0)
if (HAVE_FLOAT128)
set (CMAKE_H5_HAVE_FLOAT128 1)
-endif(HAVE_FLOAT128)
+endif ()
configure_file (${HDF5_F90_SRC_SOURCE_DIR}/H5config_f.inc.cmake ${CMAKE_BINARY_DIR}/H5config_f.inc @ONLY)
configure_file (${HDF5_F90_SRC_SOURCE_DIR}/H5fort_type_defines.h.in ${HDF5_F90_BINARY_DIR}/H5fort_type_defines.h @ONLY)
@@ -53,12 +53,12 @@ if (WIN32 AND MSVC)
PROPERTIES
COMPILE_FLAGS "/MT"
)
- endif (BUILD_SHARED_LIBS)
+ endif ()
set_target_properties (H5_buildiface
PROPERTIES
LINK_FLAGS "/SUBSYSTEM:CONSOLE"
)
-endif (WIN32 AND MSVC)
+endif ()
set_target_properties (H5_buildiface PROPERTIES
LINKER_LANGUAGE Fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}
@@ -68,15 +68,15 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
file (MAKE_DIRECTORY "${HDF5_F90_BINARY_DIR}/shared")
if (WIN32)
set (MODSH_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/shared/${CMAKE_BUILD_TYPE})
- else (WIN32)
+ else ()
set (MODSH_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
- endif (WIN32)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+ endif ()
+endif ()
if (WIN32)
set (MOD_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/static/${CMAKE_BUILD_TYPE})
-else (WIN32)
+else ()
set (MOD_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
-endif (WIN32)
+endif ()
INCLUDE_DIRECTORIES (${HDF5_F90_BINARY_DIR} ${CMAKE_Fortran_MODULE_DIRECTORY})
@@ -150,7 +150,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
INTERFACE_COMPILE_DEFINITIONS H5_BUILT_AS_DYNAMIC_LIB=1
)
set (install_targets ${install_targets} ${HDF5_F90_C_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-----------------------------------------------------------------------------
# Fortran Modules
@@ -186,7 +186,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
DEPENDS ${HDF5_F90_BINARY_DIR}/shared/H5_gen.F90
)
set_source_files_properties (${HDF5_F90_BINARY_DIR}/shared/H5_gen.F90 PROPERTIES GENERATED TRUE)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
set (f90_F_BASE_SOURCES
# normal distribution
@@ -233,7 +233,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
# normal distribution
${HDF5_F90_SRC_SOURCE_DIR}/HDF5.F90
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-----------------------------------------------------------------------------
# Add Main fortran library
@@ -244,7 +244,7 @@ TARGET_FORTRAN_PROPERTIES (${HDF5_F90_LIB_TARGET} STATIC " " " ")
target_link_libraries (${HDF5_F90_LIB_TARGET} ${HDF5_F90_C_LIB_TARGET} ${HDF5_LIB_TARGET})
if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
target_link_libraries (${HDF5_F90_LIB_TARGET} ${MPI_Fortran_LIBRARIES})
-endif (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
+endif ()
set_global_variable (HDF5_LIBRARIES_TO_EXPORT "${HDF5_LIBRARIES_TO_EXPORT};${HDF5_F90_LIB_TARGET}")
H5_SET_LIB_OPTIONS (${HDF5_F90_LIB_TARGET} ${HDF5_F90_LIB_NAME} STATIC)
set_target_properties (${HDF5_F90_LIB_TARGET} PROPERTIES
@@ -257,7 +257,7 @@ if (WIN32)
set_property (TARGET ${HDF5_F90_LIB_TARGET}
APPEND PROPERTY COMPILE_DEFINITIONS "HDF5F90_WINDOWS"
)
-endif (WIN32)
+endif ()
set (install_targets ${install_targets} ${HDF5_F90_LIB_TARGET})
add_dependencies(${HDF5_F90_LIB_TARGET} H5gen)
@@ -266,12 +266,12 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
set (SHARED_LINK_FLAGS " ")
if (WIN32 AND MSVC)
set (SHARED_LINK_FLAGS "/DLL /DEF:${HDF5_F90_SRC_BINARY_DIR}/hdf5_fortrandll.def")
- endif (WIN32 AND MSVC)
+ endif ()
TARGET_FORTRAN_PROPERTIES (${HDF5_F90_LIBSH_TARGET} SHARED " " ${SHARED_LINK_FLAGS})
target_link_libraries (${HDF5_F90_LIBSH_TARGET} ${HDF5_F90_C_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
if (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
target_link_libraries (${HDF5_F90_LIBSH_TARGET} ${MPI_Fortran_LIBRARIES})
- endif (H5_HAVE_PARALLEL AND MPI_Fortran_FOUND)
+ endif ()
set_global_variable (HDF5_LIBRARIES_TO_EXPORT "${HDF5_LIBRARIES_TO_EXPORT};${HDF5_F90_LIBSH_TARGET}")
H5_SET_LIB_OPTIONS (${HDF5_F90_LIBSH_TARGET} ${HDF5_F90_LIB_NAME} SHARED ${HDF5_F_PACKAGE_SOVERSION})
set_target_properties (${HDF5_F90_LIBSH_TARGET} PROPERTIES
@@ -286,10 +286,10 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
set_property (TARGET ${HDF5_F90_LIBSH_TARGET}
APPEND PROPERTY COMPILE_DEFINITIONS "BUILD_HDF5_DLL;HDF5F90_WINDOWS"
)
- endif (WIN32)
+ endif ()
set (install_targets ${install_targets} ${HDF5_F90_LIBSH_TARGET})
add_dependencies(${HDF5_F90_LIBSH_TARGET} H5genSH)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
@@ -367,7 +367,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
COMPONENT
fortheaders
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-----------------------------------------------------------------------------
# Add Target(s) to CMake Install for import into other projects
@@ -376,7 +376,9 @@ if (HDF5_EXPORTED_TARGETS)
if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
INSTALL_TARGET_PDB (${HDF5_F90_C_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} fortlibraries)
#INSTALL_TARGET_PDB (${HDF5_F90_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} fortlibraries)
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+ endif ()
+ INSTALL_TARGET_PDB (${HDF5_F90_C_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} fortlibraries)
+ #INSTALL_TARGET_PDB (${HDF5_F90_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} fortlibraries)
install (
TARGETS
@@ -389,4 +391,4 @@ if (HDF5_EXPORTED_TARGETS)
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT fortlibraries
INCLUDES DESTINATION include
)
-endif (HDF5_EXPORTED_TARGETS)
+endif ()
diff --git a/fortran/src/H5Af.c b/fortran/src/H5Af.c
index 8f012cf..23dd936 100644
--- a/fortran/src/H5Af.c
+++ b/fortran/src/H5Af.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Aff.F90 b/fortran/src/H5Aff.F90
index 132bf41..827b803 100644
--- a/fortran/src/H5Aff.F90
+++ b/fortran/src/H5Aff.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Df.c b/fortran/src/H5Df.c
index 57b4d4d..588ea9f 100644
--- a/fortran/src/H5Df.c
+++ b/fortran/src/H5Df.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Dff.F90 b/fortran/src/H5Dff.F90
index cb0b292..3915f72 100644
--- a/fortran/src/H5Dff.F90
+++ b/fortran/src/H5Dff.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Ef.c b/fortran/src/H5Ef.c
index 4b1d4c9..0a2f2a3 100644
--- a/fortran/src/H5Ef.c
+++ b/fortran/src/H5Ef.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Eff.F90 b/fortran/src/H5Eff.F90
index 4198321..fcd08ff 100644
--- a/fortran/src/H5Eff.F90
+++ b/fortran/src/H5Eff.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Ff.c b/fortran/src/H5Ff.c
index c1cdb91..2d5f6ed 100644
--- a/fortran/src/H5Ff.c
+++ b/fortran/src/H5Ff.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Fff.F90 b/fortran/src/H5Fff.F90
index 165fba0..a3bef37 100644
--- a/fortran/src/H5Fff.F90
+++ b/fortran/src/H5Fff.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Gf.c b/fortran/src/H5Gf.c
index b9c44bb..720bd49 100644
--- a/fortran/src/H5Gf.c
+++ b/fortran/src/H5Gf.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Gff.F90 b/fortran/src/H5Gff.F90
index 30076a4..0684508 100644
--- a/fortran/src/H5Gff.F90
+++ b/fortran/src/H5Gff.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5If.c b/fortran/src/H5If.c
index fdab0b6..8222817 100644
--- a/fortran/src/H5If.c
+++ b/fortran/src/H5If.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Iff.F90 b/fortran/src/H5Iff.F90
index c91a8aa..351dd4b 100644
--- a/fortran/src/H5Iff.F90
+++ b/fortran/src/H5Iff.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Lf.c b/fortran/src/H5Lf.c
index 0d3aac2..32de037 100644
--- a/fortran/src/H5Lf.c
+++ b/fortran/src/H5Lf.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Lff.F90 b/fortran/src/H5Lff.F90
index bc91072..d5bb1d1 100644
--- a/fortran/src/H5Lff.F90
+++ b/fortran/src/H5Lff.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Of.c b/fortran/src/H5Of.c
index 9e3ddc4..7d065a5 100644
--- a/fortran/src/H5Of.c
+++ b/fortran/src/H5Of.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Off.F90 b/fortran/src/H5Off.F90
index 8d4fb16..243ec29 100644
--- a/fortran/src/H5Off.F90
+++ b/fortran/src/H5Off.F90
@@ -18,12 +18,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Pf.c b/fortran/src/H5Pf.c
index 3989512..a114e8b 100644
--- a/fortran/src/H5Pf.c
+++ b/fortran/src/H5Pf.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Pff.F90 b/fortran/src/H5Pff.F90
index 6ba5aeb..b69d4d0 100644
--- a/fortran/src/H5Pff.F90
+++ b/fortran/src/H5Pff.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Rf.c b/fortran/src/H5Rf.c
index 9cd9950..6a3181f 100644
--- a/fortran/src/H5Rf.c
+++ b/fortran/src/H5Rf.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Rff.F90 b/fortran/src/H5Rff.F90
index 7ba91c4..6d6371b 100644
--- a/fortran/src/H5Rff.F90
+++ b/fortran/src/H5Rff.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Sf.c b/fortran/src/H5Sf.c
index 741bf96..2eae0d9 100644
--- a/fortran/src/H5Sf.c
+++ b/fortran/src/H5Sf.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Sff.F90 b/fortran/src/H5Sff.F90
index cb1388e..3434fba 100644
--- a/fortran/src/H5Sff.F90
+++ b/fortran/src/H5Sff.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Tf.c b/fortran/src/H5Tf.c
index 9928d0a..c40abae 100644
--- a/fortran/src/H5Tf.c
+++ b/fortran/src/H5Tf.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Tff.F90 b/fortran/src/H5Tff.F90
index 6b8f896..b63c61d 100644
--- a/fortran/src/H5Tff.F90
+++ b/fortran/src/H5Tff.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5Zf.c b/fortran/src/H5Zf.c
index a8cf1c2..7afafd5 100644
--- a/fortran/src/H5Zf.c
+++ b/fortran/src/H5Zf.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5Zff.F90 b/fortran/src/H5Zff.F90
index 0259959..848f047 100644
--- a/fortran/src/H5Zff.F90
+++ b/fortran/src/H5Zff.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES!
diff --git a/fortran/src/H5_buildiface.F90 b/fortran/src/H5_buildiface.F90
index 4b00d80..d4ebdd3 100644
--- a/fortran/src/H5_buildiface.F90
+++ b/fortran/src/H5_buildiface.F90
@@ -33,12 +33,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! AUTHOR
@@ -124,12 +122,10 @@ PROGRAM H5_buildiface
'! *',&
'! This file is part of HDF5. The full HDF5 copyright notice, including *',&
'! terms governing use, modification, and redistribution, is contained in *',&
-'! the files COPYING and Copyright.html. COPYING can be found at the root *',&
-'! of the source code distribution tree; Copyright.html can be found at the *',&
-'! root level of an installed copy of the electronic HDF5 document set and *',&
-'! is linked from the top-level documents page. It can also be found at *',&
-'! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *',&
-'! access to either file, you may request a copy from help@hdfgroup.org. *',&
+'! the COPYING file, which can be found at the root of the source code *',&
+'! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *',&
+'! If you do not have access to either file, you may request a copy from *',&
+'! help@hdfgroup.org. *',&
'! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *',&
'!',&
'! AUTHOR',&
diff --git a/fortran/src/H5_f.c b/fortran/src/H5_f.c
index 860f9cb..352ffab 100644
--- a/fortran/src/H5_f.c
+++ b/fortran/src/H5_f.c
@@ -10,12 +10,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5_ff.F90 b/fortran/src/H5_ff.F90
index f2036ee..ef89d2f 100644
--- a/fortran/src/H5_ff.F90
+++ b/fortran/src/H5_ff.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/src/H5config_f.inc.cmake b/fortran/src/H5config_f.inc.cmake
index f0b3472..f85db6c 100644
--- a/fortran/src/H5config_f.inc.cmake
+++ b/fortran/src/H5config_f.inc.cmake
@@ -1,3 +1,14 @@
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+! Copyright by The HDF Group. *
+! All rights reserved. *
+! *
+! This file is part of HDF5. The full HDF5 copyright notice, including *
+! terms governing use, modification, and redistribution, is contained in *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
! fortran/src/H5config_f.inc. Generated from fortran/src/H5config_f.inc.in by configure
! Define if we have parallel support
diff --git a/fortran/src/H5config_f.inc.in b/fortran/src/H5config_f.inc.in
index 9f094d2..8921493 100644
--- a/fortran/src/H5config_f.inc.in
+++ b/fortran/src/H5config_f.inc.in
@@ -1,3 +1,14 @@
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+! Copyright by The HDF Group. *
+! All rights reserved. *
+! *
+! This file is part of HDF5. The full HDF5 copyright notice, including *
+! terms governing use, modification, and redistribution, is contained in *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
! fortran/src/H5config_f.inc. Generated from fortran/src/H5config_f.inc.in by configure
! The script to replace the defines in H5config_f.inc.in is
diff --git a/fortran/src/H5f90.h b/fortran/src/H5f90.h
index 7082d1d..eabe3d0 100644
--- a/fortran/src/H5f90.h
+++ b/fortran/src/H5f90.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/fortran/src/H5f90global.F90 b/fortran/src/H5f90global.F90
index a85ee50..dd2b171 100644
--- a/fortran/src/H5f90global.F90
+++ b/fortran/src/H5f90global.F90
@@ -28,12 +28,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! AUTHOR
diff --git a/fortran/src/H5f90i.h b/fortran/src/H5f90i.h
index f3c0160..7d066cd 100644
--- a/fortran/src/H5f90i.h
+++ b/fortran/src/H5f90i.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/fortran/src/H5f90kit.c b/fortran/src/H5f90kit.c
index c6c874f..6e8c793 100644
--- a/fortran/src/H5f90kit.c
+++ b/fortran/src/H5f90kit.c
@@ -13,12 +13,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/src/H5f90proto.h b/fortran/src/H5f90proto.h
index 67f28db..46ef8ef 100644
--- a/fortran/src/H5f90proto.h
+++ b/fortran/src/H5f90proto.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/fortran/src/H5fort_type_defines.h.in b/fortran/src/H5fort_type_defines.h.in
index 6feed4f..b6a35f9 100644
--- a/fortran/src/H5fort_type_defines.h.in
+++ b/fortran/src/H5fort_type_defines.h.in
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* If you are reading this file and it has a '.h' suffix, it was automatically
* generated from the '.in' version. Make changes there.
*/
diff --git a/fortran/src/H5fortkit.F90 b/fortran/src/H5fortkit.F90
index 3062c28..f5eba8a 100644
--- a/fortran/src/H5fortkit.F90
+++ b/fortran/src/H5fortkit.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/src/H5match_types.c b/fortran/src/H5match_types.c
index 2337fb3..7e0b7e8 100644
--- a/fortran/src/H5match_types.c
+++ b/fortran/src/H5match_types.c
@@ -19,12 +19,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
@@ -66,12 +64,10 @@ initCfile(void)
* *\n\
* This file is part of HDF5. The full HDF5 copyright notice, including *\n\
* terms governing use, modification, and redistribution, is contained in *\n\
- * the files COPYING and Copyright.html. COPYING can be found at the root *\n\
- * of the source code distribution tree; Copyright.html can be found at the *\n\
- * root level of an installed copy of the electronic HDF5 document set and *\n\
- * is linked from the top-level documents page. It can also be found at *\n\
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *\n\
- * access to either file, you may request a copy from help@hdfgroup.org. *\n\
+ * the COPYING file, which can be found at the root of the source code *\n\
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *\n\
+ * If you do not have access to either file, you may request a copy from *\n\
+ * help@hdfgroup.org. *\n\
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */\n\
\n\n\
#ifndef _H5f90i_gen_H\n\
@@ -93,12 +89,10 @@ initFfile(void)
! *\n\
! This file is part of HDF5. The full HDF5 copyright notice, including *\n\
! terms governing use, modification, and redistribution, is contained in *\n\
-! the files COPYING and Copyright.html. COPYING can be found at the root *\n\
-! of the source code distribution tree; Copyright.html can be found at the *\n\
-! root level of an installed copy of the electronic HDF5 document set and *\n\
-! is linked from the top-level documents page. It can also be found at *\n\
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *\n\
-! access to either file, you may request a copy from help@hdfgroup.org. *\n\
+! the COPYING file, which can be found at the root of the source code *\n\
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *\n\
+! If you do not have access to either file, you may request a copy from *\n\
+! help@hdfgroup.org. *\n\
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *\n\
!\n!\n\
! This file is automatically generated by H5match_types.c and contains HDF5 Fortran90 type definitions.\n!\n\
diff --git a/fortran/src/HDF5.F90 b/fortran/src/HDF5.F90
index cbe4c83..0370224 100644
--- a/fortran/src/HDF5.F90
+++ b/fortran/src/HDF5.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/src/Makefile.am b/fortran/src/Makefile.am
index a271666..a863a67 100644
--- a/fortran/src/Makefile.am
+++ b/fortran/src/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -57,9 +55,9 @@ libhdf5_fortran_la_LIBADD=$(LIBHDF5)
# Remove it only when distclean.
DISTCLEANFILES=h5fc
-# H5fortran_types.F90 and H5f90i.h are automatically generaed by
+# H5fortran_types.F90 and H5f90i.h are automatically generated by
# H5match_types, and must be cleaned explicitly.
-MOSTLYCLEANFILES=H5fortran_types.F90 H5f90i_gen.h
+MOSTLYCLEANFILES=H5fortran_types.F90 H5f90i_gen.h H5_gen.F90
# Fortran module files can have different extensions and different names
# (e.g., different capitalizations) on different platforms. Write rules
diff --git a/fortran/src/README b/fortran/src/README
index 7af5df3..a258e07 100644
--- a/fortran/src/README
+++ b/fortran/src/README
@@ -1,122 +1,18 @@
+===================================
+README for the Fortran APIs to HDF5
+===================================
- README for the FORTRAN90 Prototype APIs to HDF5
-
-
-This distribution contains the HDF5 FORTRAN90 APIs source code (prototype)
-based on the HDF5 1.2.2 release (ftp://ftp.ncsa.uiuc.edu/HDF/HDF5/current),
-tests and examples.
-
-This prototype supports a selected subset of the HDF5 Library functionality.
-A complete list of the Fortran subroutines can be found in the HDF5
-Reference Manual provided with this release.
-Check the online documentation at http://hdf.ncsa.uiuc.edu/HDF5/doc (select
-the "HDF5 Fortran90 Docs" link at the bottom of the left-hand column) or
-H5_F90.R1.2.2.RefMan.tar at ftp://hdf.ncsa.uiuc.edu/HDF5/fortran .
-
-Changes since last release (October 1999)
-=========================================
-* Support for Linux
-* Support for parallel features (tested on O2K platform only)
-* Most of the functions from the H5R, H5P, H5T, H5E and H5I interfaces were
- implemented. See Reference Manual for complete list. The new functions
- include support for object and dataset region references, and for
- compound datatypes.
-* This prototype supports more predefined types. See list below in
- the "About the Fortran APIs" section.
-* This prototype supports T3E and T3E with mpt 1.3. One has to modify
- H5Dff.f90, H5Aff.f90, H5Pff.f90 to comment lines with the module procedures for
- double precision datatypes. See source code.
-
-Supported platforms
-===================
-The FORTRAN90 APIs provided here are known to work with the
-following platforms and compilers:
-
- * SunOS 5.6 with WorkshopCompilers 4.2 Fortran 90 1.2
- * SunOS 5.7 with WorkshopCompilers 5.0 Fortran 90 2.0
- * OSF1 V4.0 with Digital Fortran 90 4.1
- * Linux RedHat 6.1, Kernel 2.2.12 with PGF90
- * T3E with Cray Fortran: Version 3.4.0.0
- with mpt 1.3
-
-Compilation
-===========
-
-1. Install HDF5 Release 1.2.2 on your system
- (ftp://ftp.ncsa.uiuc.edu/HDF/HDF5/current). If you are using a
- binary distribution provided by the HDF group, make sure that a GZIP
- library is installed on your system. If you do not have a GZIP library,
- you may copy it from the HDF FTP server.
-
-2. In the src directory copy H5fortran_types.f90_<system> to
- H5fortran_types.f90, where <system> is one of the following:
-
- solaris
- digunix
- linux
-
- Example: On Digital Unix systems use the following command
- cp H5fortran_types.f90_digunix H5fortran_types.f90
-
-3. Edit Makefile_<system >in the src/, test/ and examples/ directories
- to specify the locations of the HDF5 C Library, the GZIP Library, and the
- corresponding include files on your system.
-
-4. In the src directory, run make to create the HDF5 FORTRAN90 library
- hdf5_fortran.a
- make -f Makefile_<system>
-
- Example: On Solaris run
- make -f Makefile_solaris
-
- The Fortran library hdf5_fortran.a will be created.
-
-5. In the test directory, build tests by running
- make -f Makefile_<system>
- This command will build fortranlib_test, fflush1 and fflush2 executables.
- Run those executables to make sure that the library works on your system.
-
-6. In the examples directory, run
- make -f Makefile_<system>
- to build the following examples:
-
- fileexample - creates an HDF5 file
- dsetexample - creates an empty dataset of integers
- rwdsetexample - writes and reads to the dataset created by dsetexample
- groupexample - creates a group in the file
- grpsexample - creates groups using absolute and relative names
- grpdsetexample - creates datasets in the groups
- hyperslabexample - writes and reads a hyperslab
- selectele - writes element selections
- attrexample - creates and writes a dataset attribute
- compound - creates, writes and reads one dim array of structures
- mountexample - shows how to use mounting files to access a dataset
- refobjexample - creates and stores references to the objects
- refregexample - creates and stores references to the dataset regions
-
- The script run_example.sh runs the examples in the appropriate order.
-
- Use the HDF5 utility, h5dump, to see the content of the created HDF5 files.
-
-7. Install the HDF5 Reference Manual (in HTML format). The manual
- can be found in the Unix tar file H5_F90.R1.2.2.RefMan.tar
- on the ftp server and is served over the Web from
- http://hdf.ncsa.uiuc.edu/HDF5/doc/ (select the "HDF5 Fortran90 Docs"
- link at the bottom of the left-hand column).
-
-
-8. Send bug reports and comments to hdfhelp@ncsa.uiuc.edu
-
-User's Guide Notes
-+++++++++++++++++++
+This directory contains Fortran APIs for HDF5 Library functionality.
+A complete list of implemented Fortran subroutines can be found in the HDF5
+Reference Manual.
About the source code organization
==================================
The Fortran APIs are organized in modules parallel to the HDF5 Interfaces.
-Each module is in a separate file with the name H5*ff.f. Corresponding C
+Each module is in a separate file with the name H5*ff.F90. Corresponding C
stubs are in the H5*f.c files. For example, the Fortran File APIs are in
-the file H5Fff.f and the corresponding C stubs are in the file H5Ff.c.
+the file H5Fff.F90 and the corresponding C stubs are in the file H5Ff.c.
Each module contains Fortran definitions of the constants, interfaces to
the subroutines if needed, and the subroutines themselves.
@@ -124,31 +20,29 @@ the subroutines if needed, and the subroutines themselves.
Users must use constant names in their programs instead of the numerical
values, as the numerical values are subject to change without notice.
-About the Fortran APIs
-=======================
+Quick overview of the Fortran APIs
+==============================================
+
+* An in-depth description of each Fortran API and its parameters can
+ be found in the HDF5 Reference Manual.
* The Fortran APIs come in the form of Fortran subroutines.
* Each Fortran subroutine name is derived from the corresponding C function
name by adding "_f" to the name. For example, the name of the C function
to create an HDF5 file is H5Fcreate; the corresponding Fortran subroutine
- is h5fcreate_f.
-
-* A description of each Fortran subroutine and its parameters can be found
- following the description of the corresponding C function in the
- Reference Manual provided with this release. The manual can be found in
- the Unix tar file H5_F90.R1.2.2.tar in this directory and
- is served over the Web from http://hdf.ncsa.uiuc.edu/HDF5/doc/ (select
- the "HDF5 Fortran90 Docs" link at the bottom of the left-hand column).
+ is h5fcreate_f.
-* The parameter list for each Fortran subroutine has two more parameters
- than the corresponding C function. These additional parameters hold
+* The parameter list for each Fortran subroutine usually has two more parameters
+ than the corresponding C function. These additional parameters typically hold
the return value and an error code. The order of the Fortran subroutine
- parameters may differ from the order of the C function parameters.
- The Fortran subroutine parameters are listed in the following order:
+ parameters may differ from the order of the C function parameters.
+
+ The Fortran subroutine parameters are usually listed in the following order:
-- required input parameters,
-- output parameters, including return value and error code, and
- -- optional input parameters.
+ optional input parameters.
+
For example, the C function to create a dataset has the following
prototype:
@@ -158,72 +52,35 @@ About the Fortran APIs
The corresponding Fortran subroutine has the following form:
- SUBROUTINE h5dcreate_f(loc_id, name, type_id, space_id, dset_id,
- hdferr, creation_prp)
-
+ SUBROUTINE h5dcreate_f(loc_id, name, type_id, space_id, dset_id, &
+ hdferr, dset_creation_prp, link_creation_prp, dset_access_prop)
+
The first four parameters of the Fortran subroutine correspond to the
C function parameters. The fifth parameter dset_id is an output
parameter and contains a valid dataset identifier if the value of the
- sixth output parameter hdferr indicates successful completion.
+ sixth output parameter, hdferr, indicates successful completion.
(Error code descriptions are provided with the subroutine descriptions
- in the Reference Manual.) The seventh input parameter creation_prp
- is optional, and may be omitted when the default creation property
- list is used.
- (XXX: Update this! - QAK)
-
-* Parameters to the Fortran subroutines have one of the following
- predefined datatypes (see the file H5fortran_types.f90 for KIND
- definitions):
+ in the Reference Manual.) The last three input parameters are optional
+ and may be omitted, resulting in default values being used.
+
+* Parameters to the Fortran subroutines typically include
+ predefined datatypes (see the build-time generated file
+ H5fortran_types.F90 for a complete listing):
INTEGER(HID_T) compares with hid_t type in HDF5 C APIs
INTEGER(HSIZE_T) compares with hsize_t in HDF5 C APIs
INTEGER(HSSIZE_T) compares with hssize_t in HDF5 C APIs
INTEGER(SIZE_T) compares with the C size_t type
+
These integer types usually correspond to 4 or 8 byte integers,
- depending on the FORTRAN90 compiler and corresponding HDF5
+ depending on the Fortran compiler and corresponding HDF5
C library definitions.
- The H5R module defines two types:
- TYPE(HOBJ_REF_T_F) compares to the hobj_ref_t in HDF5 C API
- TYPE(HDSET_REG_REF_T_F) compares to hdset_reg_ref_t in HDF5 C API
- These types are represented by character arrays now.
- The internal representation can be changed in the future.
-
-* Each Fortran application must call the h5init_types subroutine to
+* Each Fortran application must call the h5open_f subroutine to
initialize the Fortran predefined datatypes before calling the HDF5 Fortran
- subroutines. The application must call the h5close_types subroutine
- after all calls to the HDF5 Fortran Library.
-
-* The following predefined types are implemented in this prototype:
-
- H5T_NATIVE_INTEGER
- H5T_NATIVE_REAL
- H5T_NATIVE_DOUBLE
- H5T_NATIVE_CHARACTER
- H5T_STD_REF_OBJ
- H5T_STD_REF_DSETREG
- H5T_IEEE_F32BE
- H5T_IEEE_F32LE
- H5T_IEEE_F64BE
- H5T_IEEE_F64LE
- H5T_STD_I8BE
- H5T_STD_I8LE
- H5T_STD_I16BE
- H5T_STD_I16LE
- H5T_STD_I32BE
- H5T_STD_I32LE
- H5T_STD_I64BE
- H5T_STD_I64LE
- H5T_STD_U8BE
- H5T_STD_U8LE
- H5T_STD_U16BE
- H5T_STD_U16LE
- H5T_STD_U32BE
- H5T_STD_U32LE
- H5T_STD_U64BE
- H5T_STD_U64LE
-
-
+ subroutines. The application should call the h5close_f subroutine
+ after all calls to the HDF5 Fortran Library.
+
* When a C application reads data stored from a Fortran program, the data
will appear to be transposed due to the difference in the C - Fortran
storage order. For example, if Fortran writes a 4x6 two-dimensional dataset
@@ -233,6 +90,45 @@ About the Fortran APIs
* Fortran indices are 1 based.
-* Compound datatype datasets can be written or read by atomic fields only.
+============================
+FOR DEVELOPERS
+============================
+
+Procedure to add a new function
+----------------------------------
+
+(1) Edit the fortran/src/H5*ff.F90 file
+(2) Edit the fortran/src/H5*f.c file
+(3) Edit the fortran/src/H5f90proto.h file
+(4) Add the new function to fortran/src/hdf5_fortrandll.def.in
+
+Procedure for passing C variables to Fortran
+---------------------------------------------
+
+(1) Find the C struct name you are interested in:
+ (a) src/H5public.h if it is a generic type, i.e. H5_*
+ or
+ (b) src/H5*public.h if is a specific type, i.e. H5*_
+
+(2) Put that structure into an array that will be passed to fortran in:
+ (a) fortran/src/H5_f.c (add to nh5init_flags_c subroutine)
+ (b) edit fortran/src/H5f90proto.h and edit nh5init_flags_c interface call
+
+(3) Edit the function call in fortran/src/H5_ff.F90
+ (a) edit the call: FUNCTION h5init_flags_c
+ (b) edit h5init_flags_c call in h5open_f to match the number of arguments passing
+
+(4) add the size of the array and array to fortran/src/H5f90global.F90
+ - must match the size found it H5_f.c
+
+NOTE: To just add a default C value argument, do steps (2a) and (4)
+
+
+Procedure for adding a new file to the repository
+--------------------------------------------------
+
+Add the name of the file to the:
+ (1) Makefile.am located in the same directory as the newfile
+ (2) CMakeLists.txt located in the same directory as the newfile
+ (3) MANIFEST located in the top level directory
-Not all of the APIs provided with this prototype have been fully tested.
diff --git a/fortran/src/README_DEVELOPEMENT b/fortran/src/README_DEVELOPEMENT
deleted file mode 100644
index 43982e1..0000000
--- a/fortran/src/README_DEVELOPEMENT
+++ /dev/null
@@ -1,38 +0,0 @@
-Procedure to add a new function
----------------------------------
-
-(1) Edit the fortran/src/H5*ff.f90 file
-(2) Edit the fortran/src/H5*f.c file
-(3) Edit the fortran/src/H5f90proto.h file
-(4) Add the new function to fortran/src/hdf5_fortrandll.def.in
-
-Procedure for passing C variables to Fortran
----------------------------------------------
-
-(1) Find the C struct name you are interested in:
- (a) src/H5public.h if it is a generic type, i.e. H5_*
- or
- (b) src/H5*public.h if is a specific type, i.e. H5*_
-
-(2) Put that structure into an array that will be passed to fortran in:
- (a) fortran/src/H5_f.c (add to nh5init_flags_c subroutine)
- (b) edit fortran/src/H5f90proto.h and edit nh5init_flags_c interface call
-
-(3) Edit the function call in fortran/src/H5_ff.f90
- (a) edit the call: FUNCTION h5init_flags_c
- (b) edit h5init_flags_c call in h5open_f to match the number of arguments passing
-
-(4) add the size of the array and array to fortran/src/H5f90global.f90
- - must match the size found it H5_f.c
-
-NOTE: To just add a default C value argument, do steps (2a) and (4)
-
-
-Procedure for adding a new file to the repository
---------------------------------------------------
-
-Add the name of the file to the:
- (1) Makefile.am located in the same directory as the newfile
- (2) CMakeLists.txt located in the same directory as the newfile
- (3) MANIFEST located in the top level directory
-
diff --git a/fortran/src/h5fc.in b/fortran/src/h5fc.in
index f8d2182..47642c9 100644
--- a/fortran/src/h5fc.in
+++ b/fortran/src/h5fc.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
############################################################################
diff --git a/fortran/test/CMakeLists.txt b/fortran/test/CMakeLists.txt
index 5b11a0a..1661a10 100644
--- a/fortran/test/CMakeLists.txt
+++ b/fortran/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_FORTRAN_TESTS C CXX Fortran)
#-----------------------------------------------------------------------------
@@ -21,12 +21,12 @@ if (WIN32 AND MSVC)
PROPERTIES
COMPILE_FLAGS "/MT"
)
- endif (BUILD_SHARED_LIBS)
+ endif ()
set_target_properties (H5_test_buildiface
PROPERTIES
LINK_FLAGS "/SUBSYSTEM:CONSOLE"
)
-endif (WIN32 AND MSVC)
+endif ()
set_target_properties (H5_test_buildiface PROPERTIES
LINKER_LANGUAGE Fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}
@@ -36,15 +36,15 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
file (MAKE_DIRECTORY "${HDF5_F90_BINARY_DIR}/shared")
if (WIN32)
set (MODSH_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/shared/${CMAKE_BUILD_TYPE})
- else (WIN32)
+ else ()
set (MODSH_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
- endif (WIN32)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+ endif ()
+endif ()
if (WIN32)
set (MOD_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/static/${CMAKE_BUILD_TYPE})
-else (WIN32)
+else ()
set (MOD_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
-endif (WIN32)
+endif ()
INCLUDE_DIRECTORIES (${CMAKE_Fortran_MODULE_DIRECTORY} ${MOD_BUILD_DIR})
@@ -79,7 +79,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
INTERFACE_INCLUDE_DIRECTORIES "$<INSTALL_INTERFACE:$<INSTALL_PREFIX>/include>"
INTERFACE_COMPILE_DEFINITIONS H5_BUILT_AS_DYNAMIC_LIB=1
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
set (CMD $<TARGET_FILE:H5_test_buildiface>)
add_custom_command (
@@ -107,7 +107,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
DEPENDS ${HDF5_F90_BINARY_DIR}/shared/tf_gen.F90
)
set_source_files_properties (${HDF5_F90_BINARY_DIR}/shared/tf_gen.F90 PROPERTIES GENERATED TRUE)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
set (HDF5_F90_TF_SOURCES
# generated files
@@ -126,7 +126,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
tf.F90
)
set_source_files_properties (${HDF5_F90_TF_SOURCES_SHARED} PROPERTIES LANGUAGE Fortran)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
add_library (${HDF5_F90_TEST_LIB_TARGET} STATIC ${HDF5_F90_TF_SOURCES})
TARGET_FORTRAN_PROPERTIES (${HDF5_F90_TEST_LIB_TARGET} STATIC " " " ")
@@ -147,7 +147,7 @@ if (WIN32)
set_property (TARGET ${HDF5_F90_TEST_LIB_TARGET} APPEND PROPERTY
COMPILE_DEFINITIONS "HDF5F90_WINDOWS"
)
-endif (WIN32)
+endif ()
add_dependencies(${HDF5_F90_TEST_LIB_TARGET} H5testgen)
if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
@@ -155,7 +155,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
set (SHARED_LINK_FLAGS " ")
if (WIN32 AND MSVC)
set (SHARED_LINK_FLAGS "/DLL")
- endif (WIN32 AND MSVC)
+ endif ()
TARGET_FORTRAN_PROPERTIES (${HDF5_F90_TEST_LIBSH_TARGET} SHARED " " ${SHARED_LINK_FLAGS})
target_link_libraries (${HDF5_F90_TEST_LIBSH_TARGET}
${HDF5_F90_C_TEST_LIBSH_TARGET}
@@ -176,9 +176,9 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
set_property (TARGET ${HDF5_F90_TEST_LIBSH_TARGET} APPEND PROPERTY
COMPILE_DEFINITIONS "BUILD_HDF5_TEST_DLL;HDF5F90_WINDOWS"
)
- endif (WIN32)
+ endif ()
add_dependencies(${HDF5_F90_TEST_LIBSH_TARGET} H5testgenSH)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-----------------------------------------------------------------------------
# Add Tests
@@ -211,7 +211,7 @@ target_link_libraries (testhdf5_fortran
)
if (WIN32 AND MSVC)
target_link_libraries (testhdf5_fortran "ws2_32.lib")
-endif (WIN32 AND MSVC)
+endif ()
target_include_directories (testhdf5_fortran PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
set_target_properties (testhdf5_fortran PROPERTIES
LINKER_LANGUAGE Fortran
@@ -247,7 +247,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
)
if (WIN32 AND MSVC)
target_link_libraries (testhdf5_fortran-shared "ws2_32.lib")
- endif (WIN32 AND MSVC)
+ endif ()
target_include_directories (testhdf5_fortran-shared PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
set_target_properties (testhdf5_fortran-shared PROPERTIES
LINKER_LANGUAGE Fortran
@@ -255,7 +255,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
add_dependencies (testhdf5_fortran-shared ${HDF5_F90_TEST_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-- Adding test for testhdf5_fortran_1_8
add_executable (testhdf5_fortran_1_8
@@ -275,7 +275,7 @@ target_link_libraries (testhdf5_fortran_1_8
)
if (WIN32 AND MSVC)
target_link_libraries (testhdf5_fortran_1_8 "ws2_32.lib")
-endif (WIN32 AND MSVC)
+endif ()
target_include_directories (testhdf5_fortran_1_8 PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
set_target_properties (testhdf5_fortran_1_8 PROPERTIES
LINKER_LANGUAGE Fortran
@@ -302,7 +302,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
)
if (WIN32 AND MSVC)
target_link_libraries (testhdf5_fortran_1_8-shared "ws2_32.lib")
- endif (WIN32 AND MSVC)
+ endif ()
target_include_directories (testhdf5_fortran_1_8-shared PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
set_target_properties (testhdf5_fortran_1_8-shared PROPERTIES
LINKER_LANGUAGE Fortran
@@ -310,7 +310,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
add_dependencies (testhdf5_fortran_1_8-shared ${HDF5_F90_TEST_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-- Adding test for fortranlib_test_F03
add_executable (fortranlib_test_F03
@@ -332,7 +332,7 @@ target_link_libraries (fortranlib_test_F03
)
if (WIN32 AND MSVC)
target_link_libraries (fortranlib_test_F03 "ws2_32.lib")
-endif (WIN32 AND MSVC)
+endif ()
target_include_directories (fortranlib_test_F03 PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
set_target_properties (fortranlib_test_F03 PROPERTIES
LINKER_LANGUAGE Fortran
@@ -361,7 +361,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
)
if (WIN32 AND MSVC)
target_link_libraries (fortranlib_test_F03-shared "ws2_32.lib")
- endif (WIN32 AND MSVC)
+ endif ()
target_include_directories (fortranlib_test_F03-shared PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
set_target_properties (fortranlib_test_F03-shared PROPERTIES
@@ -370,7 +370,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
add_dependencies (fortranlib_test_F03-shared ${HDF5_F90_TEST_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-- Adding test for fflush1
add_executable (fflush1 fflush1.F90)
@@ -383,7 +383,7 @@ target_link_libraries (fflush1
)
if (WIN32 AND MSVC)
target_link_libraries (fflush1 "ws2_32.lib")
-endif (WIN32 AND MSVC)
+endif ()
target_include_directories (fflush1 PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
set_target_properties (fflush1 PROPERTIES
LINKER_LANGUAGE Fortran
@@ -403,7 +403,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
)
if (WIN32 AND MSVC)
target_link_libraries (fflush1-shared "ws2_32.lib")
- endif (WIN32 AND MSVC)
+ endif ()
target_include_directories (fflush1-shared PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
set_target_properties (fflush1-shared PROPERTIES
LINKER_LANGUAGE Fortran
@@ -411,7 +411,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
add_dependencies (fflush1-shared ${HDF5_F90_TEST_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-- Adding test for fflush2
add_executable (fflush2 fflush2.F90)
@@ -424,7 +424,7 @@ target_link_libraries (fflush2
)
if (WIN32 AND MSVC)
target_link_libraries (fflush2 "ws2_32.lib")
-endif (WIN32 AND MSVC)
+endif ()
target_include_directories (fflush2 PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
set_target_properties (fflush2 PROPERTIES
LINKER_LANGUAGE Fortran
@@ -444,7 +444,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
)
if (WIN32 AND MSVC)
target_link_libraries (fflush2-shared "ws2_32.lib")
- endif (WIN32 AND MSVC)
+ endif ()
target_include_directories (fflush2-shared PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
set_target_properties (fflush2-shared PROPERTIES
LINKER_LANGUAGE Fortran
@@ -452,6 +452,6 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
add_dependencies (fflush2-shared ${HDF5_F90_TEST_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
include (CMakeTests.cmake)
diff --git a/fortran/test/CMakeTests.cmake b/fortran/test/CMakeTests.cmake
index e91e820..e171e7c 100644
--- a/fortran/test/CMakeTests.cmake
+++ b/fortran/test/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -6,7 +17,7 @@
##############################################################################
if (BUILD_SHARED_LIBS)
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/fshared")
-endif (BUILD_SHARED_LIBS)
+endif ()
# Remove any output file left over from previous test run
add_test (
@@ -49,24 +60,69 @@ add_test (
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (FORTRAN_testhdf5-clear-objects PROPERTIES DEPENDS ${last_test})
-endif (NOT "${last_test}" STREQUAL "")
+endif ()
set (last_test "FORTRAN_testhdf5-clear-objects")
-add_test (NAME FORTRAN_testhdf5_fortran COMMAND $<TARGET_FILE:testhdf5_fortran>)
-set_tests_properties (FORTRAN_testhdf5_fortran PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME FORTRAN_testhdf5_fortran COMMAND $<TARGET_FILE:testhdf5_fortran>)
+else ()
+ add_test (NAME FORTRAN_testhdf5_fortran COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:testhdf5_fortran>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_REGEX= 0 error.s."
+ -D "TEST_MATCH= 0 error(s)"
+ -D "TEST_OUTPUT=testhdf5_fortran.txt"
+ #-D "TEST_REFERENCE=testhdf5_fortran.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
+#set_tests_properties (FORTRAN_testhdf5_fortran PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
set_tests_properties (FORTRAN_testhdf5_fortran PROPERTIES DEPENDS FORTRAN_testhdf5-clear-objects)
#-- Adding test for testhdf5_fortran_1_8
-add_test (NAME FORTRAN_testhdf5_fortran_1_8 COMMAND $<TARGET_FILE:testhdf5_fortran_1_8>)
-set_tests_properties (FORTRAN_testhdf5_fortran_1_8 PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME FORTRAN_testhdf5_fortran_1_8 COMMAND $<TARGET_FILE:testhdf5_fortran_1_8>)
+else ()
+ add_test (NAME FORTRAN_testhdf5_fortran_1_8 COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:testhdf5_fortran_1_8>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_REGEX= 0 error.s."
+ -D "TEST_MATCH= 0 error(s)"
+ -D "TEST_OUTPUT=testhdf5_fortran_1_8.txt"
+ #-D "TEST_REFERENCE=testhdf5_fortran_1_8.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
+#set_tests_properties (FORTRAN_testhdf5_fortran_1_8 PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
set_tests_properties (FORTRAN_testhdf5_fortran_1_8 PROPERTIES DEPENDS FORTRAN_testhdf5_fortran)
#-- Adding test for fortranlib_test_F03
if (HDF5_ENABLE_F2003)
- add_test (NAME FORTRAN_fortranlib_test_F03 COMMAND $<TARGET_FILE:fortranlib_test_F03>)
- set_tests_properties (FORTRAN_fortranlib_test_F03 PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME FORTRAN_fortranlib_test_F03 COMMAND $<TARGET_FILE:fortranlib_test_F03>)
+ else ()
+ add_test (NAME FORTRAN_fortranlib_test_F03 COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:fortranlib_test_F03>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_REGEX= 0 error.s."
+ -D "TEST_MATCH= 0 error(s)"
+ -D "TEST_OUTPUT=fortranlib_test_F03.txt"
+ #-D "TEST_REFERENCE=fortranlib_test_F03.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
+# set_tests_properties (FORTRAN_fortranlib_test_F03 PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
set_tests_properties (FORTRAN_fortranlib_test_F03 PROPERTIES DEPENDS FORTRAN_testhdf5_fortran_1_8)
-endif (HDF5_ENABLE_F2003)
+endif ()
#-- Adding test for fflush1
add_test (NAME FORTRAN_fflush1 COMMAND $<TARGET_FILE:fflush1>)
@@ -119,21 +175,66 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
)
set_tests_properties (FORTRAN_testhdf5-shared-clear-objects PROPERTIES DEPENDS FORTRAN_testhdf5-clear-objects)
- add_test (NAME FORTRAN_testhdf5_fortran-shared COMMAND $<TARGET_FILE:testhdf5_fortran-shared>)
- set_tests_properties (FORTRAN_testhdf5_fortran-shared PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME FORTRAN_testhdf5_fortran-shared COMMAND $<TARGET_FILE:testhdf5_fortran-shared>)
+ else ()
+ add_test (NAME FORTRAN_testhdf5_fortran-shared COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:testhdf5_fortran-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_REGEX= 0 error.s."
+ -D "TEST_MATCH= 0 error(s)"
+ -D "TEST_OUTPUT=testhdf5_fortran.txt"
+ #-D "TEST_REFERENCE=testhdf5_fortran.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/fshared"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
+# set_tests_properties (FORTRAN_testhdf5_fortran-shared PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
set_tests_properties (FORTRAN_testhdf5_fortran-shared PROPERTIES DEPENDS "FORTRAN_testhdf5_fortran;FORTRAN_testhdf5-shared-clear-objects")
#-- Adding test for testhdf5_fortran_1_8
- add_test (NAME FORTRAN_testhdf5_fortran_1_8-shared COMMAND $<TARGET_FILE:testhdf5_fortran_1_8-shared>)
- set_tests_properties (FORTRAN_testhdf5_fortran_1_8-shared PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME FORTRAN_testhdf5_fortran_1_8-shared COMMAND $<TARGET_FILE:testhdf5_fortran_1_8-shared>)
+ else ()
+ add_test (NAME FORTRAN_testhdf5_fortran_1_8-shared COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:testhdf5_fortran_1_8-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_REGEX= 0 error.s."
+ -D "TEST_MATCH= 0 error(s)"
+ -D "TEST_OUTPUT=testhdf5_fortran_1_8.txt"
+ #-D "TEST_REFERENCE=testhdf5_fortran_1_8.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/fshared"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
+# set_tests_properties (FORTRAN_testhdf5_fortran_1_8-shared PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
set_tests_properties (FORTRAN_testhdf5_fortran_1_8-shared PROPERTIES DEPENDS FORTRAN_testhdf5_fortran_1_8)
#-- Adding test for fortranlib_test_F03
if (HDF5_ENABLE_F2003)
- add_test (NAME FORTRAN_fortranlib_test_F03-shared COMMAND $<TARGET_FILE:fortranlib_test_F03-shared>)
- set_tests_properties (FORTRAN_fortranlib_test_F03-shared PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME FORTRAN_fortranlib_test_F03-shared COMMAND $<TARGET_FILE:fortranlib_test_F03-shared>)
+ else ()
+ add_test (NAME FORTRAN_fortranlib_test_F03-shared COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:fortranlib_test_F03-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_REGEX= 0 error.s."
+ -D "TEST_MATCH= 0 error(s)"
+ -D "TEST_OUTPUT=fortranlib_test_F03.txt"
+ #-D "TEST_REFERENCE=fortranlib_test_F03.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/fshared"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
+# set_tests_properties (FORTRAN_fortranlib_test_F03-shared PROPERTIES PASS_REGULAR_EXPRESSION "[ ]*0 error.s")
set_tests_properties (FORTRAN_fortranlib_test_F03-shared PROPERTIES DEPENDS FORTRAN_fortranlib_test_F03)
- endif (HDF5_ENABLE_F2003)
+ endif ()
#-- Adding test for fflush1
add_test (NAME FORTRAN_fflush1-shared COMMAND $<TARGET_FILE:fflush1-shared>)
@@ -142,4 +243,4 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
#-- Adding test for fflush2
add_test (NAME FORTRAN_fflush2-shared COMMAND $<TARGET_FILE:fflush2-shared>)
set_tests_properties (FORTRAN_fflush2-shared PROPERTIES DEPENDS FORTRAN_fflush1-shared)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
diff --git a/fortran/test/H5_test_buildiface.F90 b/fortran/test/H5_test_buildiface.F90
index 8b27a96..636ded4 100644
--- a/fortran/test/H5_test_buildiface.F90
+++ b/fortran/test/H5_test_buildiface.F90
@@ -33,12 +33,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! AUTHOR
@@ -82,12 +80,10 @@ PROGRAM H5_test_buildiface
'! *',&
'! This file is part of HDF5. The full HDF5 copyright notice, including *',&
'! terms governing use, modification, and redistribution, is contained in *',&
-'! the files COPYING and Copyright.html. COPYING can be found at the root *',&
-'! of the source code distribution tree; Copyright.html can be found at the *',&
-'! root level of an installed copy of the electronic HDF5 document set and *',&
-'! is linked from the top-level documents page. It can also be found at *',&
-'! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *',&
-'! access to either file, you may request a copy from help@hdfgroup.org. *',&
+'! the COPYING file, which can be found at the root of the source code *',&
+'! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *',&
+'! If you do not have access to either file, you may request a copy from *',&
+'! help@hdfgroup.org. *',&
'! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *',&
'!',&
'! AUTHOR',&
diff --git a/fortran/test/Makefile.am b/fortran/test/Makefile.am
index 3def26e..8613cf9 100644
--- a/fortran/test/Makefile.am
+++ b/fortran/test/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/fortran/test/fflush1.F90 b/fortran/test/fflush1.F90
index ca2550f..bd1f551 100644
--- a/fortran/test/fflush1.F90
+++ b/fortran/test/fflush1.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/test/fflush2.F90 b/fortran/test/fflush2.F90
index 4230832..278f73d 100644
--- a/fortran/test/fflush2.F90
+++ b/fortran/test/fflush2.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/test/fortranlib_test.F90 b/fortran/test/fortranlib_test.F90
index 2f88c45..8d5b32b 100644
--- a/fortran/test/fortranlib_test.F90
+++ b/fortran/test/fortranlib_test.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
@@ -52,20 +50,20 @@ PROGRAM fortranlibtest
IF(total_error .EQ. 0) THEN
WRITE(*, '(" FORTRANLIB_TEST is linked with HDF5 Library version ")', advance="NO")
- WRITE(*, '(I1)', advance="NO") majnum
+ WRITE(*, '(I0)', advance="NO") majnum
WRITE(*, '(".")', advance="NO")
- WRITE(*, '(I1)', advance="NO") minnum
+ WRITE(*, '(I0)', advance="NO") minnum
WRITE(*, '(" release ")', advance="NO")
- WRITE(*, '(I3)') relnum
+ WRITE(*, '(I0)') relnum
ELSE
total_error = total_error + 1
ENDIF
WRITE(*,*)
-! CALL h5check_version_f(1,4,4,total_error)
-! write(*,*) '========================================='
-! write(*,*) 'Testing FILE Interface '
-! write(*,*) '========================================='
+! CALL h5check_version_f(1,4,4,total_error)
+! '========================================='
+! 'Testing FILE Interface '
+! '========================================='
ret_total_error = 0
CALL mountingtest(cleanup, ret_total_error)
@@ -83,10 +81,10 @@ PROGRAM fortranlibtest
CALL file_space("file_space",cleanup, ret_total_error)
CALL write_test_status(ret_total_error, ' File free space test', total_error)
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing DATASET Interface '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing DATASET Interface '
+! '========================================='
ret_total_error = 0
CALL datasettest(cleanup, ret_total_error)
@@ -96,25 +94,22 @@ PROGRAM fortranlibtest
CALL extenddsettest(cleanup, ret_total_error)
CALL write_test_status(ret_total_error, ' Extendible dataset test', total_error)
-! -- DISABLE TEST FOR PGI COMPILER DUE TO COMPILER BUG -- 8/2015 -- HDFFV-9498
-!#if H5_Fortran_COMPILER_ID!=PGI
-! CALL test_userblock_offset(cleanup, ret_total_error)
-! CALL write_test_status(ret_total_error, ' Dataset offset with user block', total_error)
-!#endif
+ CALL test_userblock_offset(cleanup, ret_total_error)
+ CALL write_test_status(ret_total_error, ' Dataset offset with user block', total_error)
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing DATASPACE Interface '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing DATASPACE Interface '
+! '========================================='
ret_total_error = 0
CALL dataspace_basic_test(cleanup, ret_total_error)
CALL write_test_status(ret_total_error, ' Basic dataspace test', total_error)
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing REFERENCE Interface '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing REFERENCE Interface '
+! '========================================='
ret_total_error = 0
CALL refobjtest(cleanup, ret_total_error)
@@ -124,10 +119,10 @@ PROGRAM fortranlibtest
CALL refregtest(cleanup, ret_total_error)
CALL write_test_status(ret_total_error, ' Reference to dataset region test', total_error)
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing selection functionalities '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing selection functionalities '
+! '========================================='
ret_total_error = 0
CALL test_basic_select(cleanup, ret_total_error)
@@ -154,10 +149,10 @@ PROGRAM fortranlibtest
CALL test_select_bounds(ret_total_error)
CALL write_test_status(ret_total_error, ' Selection bounds test ', total_error)
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing DATATYPE interface '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing DATATYPE interface '
+! '========================================='
ret_total_error = 0
CALL basic_data_type_test(ret_total_error)
CALL write_test_status(ret_total_error, ' Basic datatype test', total_error)
@@ -174,10 +169,10 @@ PROGRAM fortranlibtest
CALL test_derived_flt(cleanup, ret_total_error)
CALL write_test_status(ret_total_error, ' Derived float datatype test', total_error)
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing PROPERTY interface '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing PROPERTY interface '
+! '========================================='
ret_total_error = 0
CALL external_test(cleanup, ret_total_error)
@@ -191,19 +186,19 @@ PROGRAM fortranlibtest
CALL test_chunk_cache (cleanup, ret_total_error)
CALL write_test_status(ret_total_error, ' Dataset chunk cache configuration', total_error)
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing ATTRIBUTE interface '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing ATTRIBUTE interface '
+! '========================================='
ret_total_error = 0
CALL attribute_test(cleanup, ret_total_error)
CALL write_test_status(ret_total_error, ' Attribute test', total_error)
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing IDENTIFIER interface '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing IDENTIFIER interface '
+! '========================================='
ret_total_error = 0
CALL identifier_test(cleanup, ret_total_error)
@@ -222,10 +217,10 @@ PROGRAM fortranlibtest
CALL write_test_status(ret_total_error, ' SZIP filter test', total_error)
ENDIF
-! write(*,*)
-! write(*,*) '========================================='
-! write(*,*) 'Testing GROUP interface '
-! write(*,*) '========================================='
+!
+! '========================================='
+! 'Testing GROUP interface '
+! '========================================='
ret_total_error = 0
CALL group_test(cleanup, ret_total_error)
diff --git a/fortran/test/fortranlib_test_1_8.F90 b/fortran/test/fortranlib_test_1_8.F90
index 320d661..1306da5 100644
--- a/fortran/test/fortranlib_test_1_8.F90
+++ b/fortran/test/fortranlib_test_1_8.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
@@ -48,11 +46,11 @@ PROGRAM fortranlibtest
CALL h5get_libversion_f(majnum, minnum, relnum, total_error)
IF(total_error .EQ. 0) THEN
WRITE(*, '(" FORTRANLIB_TEST is linked with HDF5 Library version ")', advance="NO")
- WRITE(*, '(I1)', advance="NO") majnum
+ WRITE(*, '(I0)', advance="NO") majnum
WRITE(*, '(".")', advance="NO")
- WRITE(*, '(I1)', advance="NO") minnum
+ WRITE(*, '(I0)', advance="NO") minnum
WRITE(*, '(" release ")', advance="NO")
- WRITE(*, '(I3)') relnum
+ WRITE(*, '(I0)') relnum
ELSE
total_error = total_error + 1
ENDIF
diff --git a/fortran/test/fortranlib_test_F03.F90 b/fortran/test/fortranlib_test_F03.F90
index 5f5fd2d..9a74120 100644
--- a/fortran/test/fortranlib_test_F03.F90
+++ b/fortran/test/fortranlib_test_F03.F90
@@ -15,12 +15,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
@@ -49,11 +47,11 @@ PROGRAM fortranlibtest_F03
CALL h5get_libversion_f(majnum, minnum, relnum, total_error)
IF(total_error .EQ. 0) THEN
WRITE(*, '(" FORTRANLIB_TEST is linked with HDF5 Library version ")', advance="NO")
- WRITE(*, '(I1)', advance="NO") majnum
+ WRITE(*, '(I0)', advance="NO") majnum
WRITE(*, '(".")', advance="NO")
- WRITE(*, '(I1)', advance="NO") minnum
+ WRITE(*, '(I0)', advance="NO") minnum
WRITE(*, '(" release ")', advance="NO")
- WRITE(*, '(I3)') relnum
+ WRITE(*, '(I0)') relnum
ELSE
total_error = total_error + 1
ENDIF
diff --git a/fortran/test/t.c b/fortran/test/t.c
index fe69143..107e8d5 100644
--- a/fortran/test/t.c
+++ b/fortran/test/t.c
@@ -14,12 +14,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
*
******
diff --git a/fortran/test/t.h b/fortran/test/t.h
index 738aa59..81d2b5d 100644
--- a/fortran/test/t.h
+++ b/fortran/test/t.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/fortran/test/tH5A.F90 b/fortran/test/tH5A.F90
index 5b814fa..1e6fdeb 100644
--- a/fortran/test/tH5A.F90
+++ b/fortran/test/tH5A.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5A_1_8.F90 b/fortran/test/tH5A_1_8.F90
index c70e288..f2f11aa 100644
--- a/fortran/test/tH5A_1_8.F90
+++ b/fortran/test/tH5A_1_8.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5D.F90 b/fortran/test/tH5D.F90
index 183d969..7001b98 100644
--- a/fortran/test/tH5D.F90
+++ b/fortran/test/tH5D.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/test/tH5E.F90 b/fortran/test/tH5E.F90
index 10ecaf6..3cda6e1 100644
--- a/fortran/test/tH5E.F90
+++ b/fortran/test/tH5E.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/test/tH5E_F03.F90 b/fortran/test/tH5E_F03.F90
index 63e70a3..1878966 100644
--- a/fortran/test/tH5E_F03.F90
+++ b/fortran/test/tH5E_F03.F90
@@ -15,12 +15,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! USES
diff --git a/fortran/test/tH5F.F90 b/fortran/test/tH5F.F90
index 8334b30..d91ee9c 100644
--- a/fortran/test/tH5F.F90
+++ b/fortran/test/tH5F.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
@@ -729,7 +727,7 @@ CONTAINS
CALL h5fget_freespace_f(fid, free_space, error)
CALL check("h5fget_freespace_f",error,total_error)
- if(error .eq.0 .and. free_space .ne. 0) then
+ if(error .eq.0 .and. free_space .ne. 1248) then
total_error = total_error + 1
write(*,*) "1: Wrong amount of free space reported, ", free_space
endif
@@ -745,7 +743,7 @@ CONTAINS
! Check the free space now
CALL h5fget_freespace_f(fid, free_space, error)
CALL check("h5fget_freespace_f",error,total_error)
- if(error .eq.0 .and. free_space .ne. 0) then
+ if(error .eq.0 .and. free_space .ne. 216) then
total_error = total_error + 1
write(*,*) "2: Wrong amount of free space reported, ", free_space
endif
@@ -757,7 +755,7 @@ CONTAINS
! Check the free space now
CALL h5fget_freespace_f(fid, free_space, error)
CALL check("h5fget_freespace_f",error,total_error)
- if(error .eq.0 .and. free_space .ne. 0) then
+ if(error .eq.0 .and. free_space .ne. 1248) then
total_error = total_error + 1
write(*,*) "3: Wrong amount of free space reported, ", free_space
endif
diff --git a/fortran/test/tH5F_F03.F90 b/fortran/test/tH5F_F03.F90
index 0f08257..8cc6b83 100644
--- a/fortran/test/tH5F_F03.F90
+++ b/fortran/test/tH5F_F03.F90
@@ -15,12 +15,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/test/tH5G.F90 b/fortran/test/tH5G.F90
index 2ba174c..0b6cc1a 100644
--- a/fortran/test/tH5G.F90
+++ b/fortran/test/tH5G.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5G_1_8.F90 b/fortran/test/tH5G_1_8.F90
index f894edd..d3be525 100644
--- a/fortran/test/tH5G_1_8.F90
+++ b/fortran/test/tH5G_1_8.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5I.F90 b/fortran/test/tH5I.F90
index 97c48c6..5ce37fd 100644
--- a/fortran/test/tH5I.F90
+++ b/fortran/test/tH5I.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5L_F03.F90 b/fortran/test/tH5L_F03.F90
index 40afdbc..bdb5c55 100644
--- a/fortran/test/tH5L_F03.F90
+++ b/fortran/test/tH5L_F03.F90
@@ -15,12 +15,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! USES
diff --git a/fortran/test/tH5MISC_1_8.F90 b/fortran/test/tH5MISC_1_8.F90
index ba3f095..79fbf3e 100644
--- a/fortran/test/tH5MISC_1_8.F90
+++ b/fortran/test/tH5MISC_1_8.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/test/tH5O.F90 b/fortran/test/tH5O.F90
index 51e1d64..fa3787e 100644
--- a/fortran/test/tH5O.F90
+++ b/fortran/test/tH5O.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5O_F03.F90 b/fortran/test/tH5O_F03.F90
index 834308b..44c4bff 100644
--- a/fortran/test/tH5O_F03.F90
+++ b/fortran/test/tH5O_F03.F90
@@ -15,12 +15,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/test/tH5P.F90 b/fortran/test/tH5P.F90
index 39d8c1e..563926b 100644
--- a/fortran/test/tH5P.F90
+++ b/fortran/test/tH5P.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5P_F03.F90 b/fortran/test/tH5P_F03.F90
index aaf1496..69d493b 100644
--- a/fortran/test/tH5P_F03.F90
+++ b/fortran/test/tH5P_F03.F90
@@ -15,12 +15,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! USES
diff --git a/fortran/test/tH5R.F90 b/fortran/test/tH5R.F90
index ef392b4..f7cccfa 100644
--- a/fortran/test/tH5R.F90
+++ b/fortran/test/tH5R.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/test/tH5S.F90 b/fortran/test/tH5S.F90
index 7223772..a4f5f4a 100644
--- a/fortran/test/tH5S.F90
+++ b/fortran/test/tH5S.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/test/tH5Sselect.F90 b/fortran/test/tH5Sselect.F90
index aeb80e9..8415bce 100644
--- a/fortran/test/tH5Sselect.F90
+++ b/fortran/test/tH5Sselect.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! NOTES
diff --git a/fortran/test/tH5T.F90 b/fortran/test/tH5T.F90
index 1a3a382..a986ba6 100644
--- a/fortran/test/tH5T.F90
+++ b/fortran/test/tH5T.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5T_F03.F90 b/fortran/test/tH5T_F03.F90
index c596d31..800e84b 100644
--- a/fortran/test/tH5T_F03.F90
+++ b/fortran/test/tH5T_F03.F90
@@ -15,12 +15,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/fortran/test/tH5VL.F90 b/fortran/test/tH5VL.F90
index 834fbde..7ef9c19 100644
--- a/fortran/test/tH5VL.F90
+++ b/fortran/test/tH5VL.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tH5Z.F90 b/fortran/test/tH5Z.F90
index 4201960..0fd7b1b 100644
--- a/fortran/test/tH5Z.F90
+++ b/fortran/test/tH5Z.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/test/tHDF5.F90 b/fortran/test/tHDF5.F90
index e9e0892..459b74f 100644
--- a/fortran/test/tHDF5.F90
+++ b/fortran/test/tHDF5.F90
@@ -17,12 +17,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/test/tHDF5_1_8.F90 b/fortran/test/tHDF5_1_8.F90
index 6a3f74b..7e1f862 100644
--- a/fortran/test/tHDF5_1_8.F90
+++ b/fortran/test/tHDF5_1_8.F90
@@ -18,12 +18,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/test/tHDF5_F03.F90 b/fortran/test/tHDF5_F03.F90
index b3b1885..96959d8 100644
--- a/fortran/test/tHDF5_F03.F90
+++ b/fortran/test/tHDF5_F03.F90
@@ -18,12 +18,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!*****
diff --git a/fortran/test/tf.F90 b/fortran/test/tf.F90
index 26b3e99..4df53bd 100644
--- a/fortran/test/tf.F90
+++ b/fortran/test/tf.F90
@@ -14,12 +14,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! CONTAINS SUBROUTINES
diff --git a/fortran/testpar/CMakeLists.txt b/fortran/testpar/CMakeLists.txt
index 4f21419..909cbaf 100644
--- a/fortran/testpar/CMakeLists.txt
+++ b/fortran/testpar/CMakeLists.txt
@@ -1,10 +1,10 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_FORTRAN_TESTPAR C CXX Fortran)
#-----------------------------------------------------------------------------
# Setup include Directories
#-----------------------------------------------------------------------------
-INCLUDE_DIRECTORIES (${HDF5_F90_BINARY_DIR} ${HDF5_F90_SRC_DIR}/src)
+INCLUDE_DIRECTORIES (${MPI_Fortran_INCLUDE_PATH} ${HDF5_F90_BINARY_DIR} ${HDF5_F90_SRC_DIR}/src)
#-----------------------------------------------------------------------------
# Add Tests
@@ -18,15 +18,15 @@ add_executable (parallel_test
)
TARGET_NAMING (parallel_test STATIC)
TARGET_FORTRAN_PROPERTIES (parallel_test STATIC " " " ")
-target_link_libraries (parallel_test
+target_link_libraries (parallel_test
${HDF5_F90_TEST_LIB_TARGET}
${HDF5_F90_LIB_TARGET}
${HDF5_LIB_TARGET}
- ${MPI_Fortran_LIBRARIES}
+ ${LINK_Fortran_LIBS}
)
if (WIN32 AND MSVC)
target_link_libraries (parallel_test "ws2_32.lib")
-endif (WIN32 AND MSVC)
+endif ()
target_include_directories (parallel_test PRIVATE ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
set_target_properties (parallel_test PROPERTIES LINKER_LANGUAGE Fortran)
set_target_properties (parallel_test PROPERTIES FOLDER test/fortran)
diff --git a/fortran/testpar/CMakeTests.cmake b/fortran/testpar/CMakeTests.cmake
index 61085e6..5736256 100644
--- a/fortran/testpar/CMakeTests.cmake
+++ b/fortran/testpar/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
diff --git a/fortran/testpar/Makefile.am b/fortran/testpar/Makefile.am
index 380f475..e597a84 100644
--- a/fortran/testpar/Makefile.am
+++ b/fortran/testpar/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/fortran/testpar/hyper.f90 b/fortran/testpar/hyper.f90
index 28c0b53..22f641d 100644
--- a/fortran/testpar/hyper.f90
+++ b/fortran/testpar/hyper.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
diff --git a/fortran/testpar/mdset.f90 b/fortran/testpar/mdset.f90
index 7fe431b..70d2939 100644
--- a/fortran/testpar/mdset.f90
+++ b/fortran/testpar/mdset.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
diff --git a/fortran/testpar/ptest.f90 b/fortran/testpar/ptest.f90
index 82dcc09..14ac3b2 100644
--- a/fortran/testpar/ptest.f90
+++ b/fortran/testpar/ptest.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
diff --git a/hl/CMakeLists.txt b/hl/CMakeLists.txt
index bed8291..a47e023 100644
--- a/hl/CMakeLists.txt
+++ b/hl/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL C CXX)
#-----------------------------------------------------------------------------
@@ -17,16 +17,16 @@ add_subdirectory (${HDF5_HL_SOURCE_DIR}/src ${HDF5_HL_BINARY_DIR}/src)
#-- Build the High level Tools
if (HDF5_BUILD_TOOLS)
add_subdirectory (${HDF5_HL_SOURCE_DIR}/tools ${HDF5_HL_BINARY_DIR}/tools)
-endif (HDF5_BUILD_TOOLS)
+endif ()
#-- Add High Level Examples
if (HDF5_BUILD_EXAMPLES)
add_subdirectory (${HDF5_HL_SOURCE_DIR}/examples ${HDF5_HL_BINARY_DIR}/examples)
-endif (HDF5_BUILD_EXAMPLES)
+endif ()
#-- Build the Unit testing if requested
if (NOT HDF5_EXTERNALLY_CONFIGURED)
if (BUILD_TESTING)
add_subdirectory (${HDF5_HL_SOURCE_DIR}/test ${HDF5_HL_BINARY_DIR}/test)
- endif (BUILD_TESTING)
-endif (NOT HDF5_EXTERNALLY_CONFIGURED)
+ endif ()
+endif ()
diff --git a/hl/COPYING b/hl/COPYING
index 6903daf..6497ace 100644
--- a/hl/COPYING
+++ b/hl/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/hl/Makefile.am b/hl/Makefile.am
index 9f31719..aee1f86 100644
--- a/hl/Makefile.am
+++ b/hl/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#
# This makefile mostly just reinvokes make in the various subdirectories
diff --git a/hl/c++/CMakeLists.txt b/hl/c++/CMakeLists.txt
index 36f4c30..91bfa14 100644
--- a/hl/c++/CMakeLists.txt
+++ b/hl/c++/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_CPP)
#-----------------------------------------------------------------------------
@@ -11,7 +11,7 @@ add_subdirectory (${HDF5_HL_CPP_SOURCE_DIR}/src ${HDF5_HL_CPP_BINARY_DIR}/src)
# --------------------------------------------------------------------
if (HDF5_BUILD_EXAMPLES)
add_subdirectory (${HDF5_HL_CPP_SOURCE_DIR}/examples ${HDF5_HL_CPP_BINARY_DIR}/examples)
-endif (HDF5_BUILD_EXAMPLES)
+endif ()
# --------------------------------------------------------------------
# Add in the unit tests for the packet table c++ wrapper
@@ -19,4 +19,4 @@ endif (HDF5_BUILD_EXAMPLES)
if (BUILD_TESTING)
add_subdirectory (${HDF5_HL_CPP_SOURCE_DIR}/test ${HDF5_HL_CPP_BINARY_DIR}/test)
-endif (BUILD_TESTING)
+endif ()
diff --git a/hl/c++/COPYING b/hl/c++/COPYING
index 6903daf..6497ace 100644
--- a/hl/c++/COPYING
+++ b/hl/c++/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/hl/c++/Makefile.am b/hl/c++/Makefile.am
index 80445f7..1968bf5 100644
--- a/hl/c++/Makefile.am
+++ b/hl/c++/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/c++/examples/CMakeLists.txt b/hl/c++/examples/CMakeLists.txt
index 77a50bf..33ad1e5 100644
--- a/hl/c++/examples/CMakeLists.txt
+++ b/hl/c++/examples/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_CPP_EXAMPLES)
#-----------------------------------------------------------------------------
@@ -23,4 +23,4 @@ set_target_properties (ptExampleFL PROPERTIES FOLDER examples/hl/cpp)
if (BUILD_TESTING)
include (CMakeTests.cmake)
-endif (BUILD_TESTING)
+endif ()
diff --git a/hl/c++/examples/CMakeTests.cmake b/hl/c++/examples/CMakeTests.cmake
index bac9fdf..103ec2b 100644
--- a/hl/c++/examples/CMakeTests.cmake
+++ b/hl/c++/examples/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -8,9 +19,22 @@
add_test (
NAME HL_CPP_ex_ptExampleFL-clear-objects
COMMAND ${CMAKE_COMMAND}
- -E remove
+ -E remove
PTcppexampleFL.h5
)
-add_test (NAME HL_CPP_ex_ptExampleFL COMMAND $<TARGET_FILE:ptExampleFL>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_CPP_ex_ptExampleFL COMMAND $<TARGET_FILE:ptExampleFL>)
+else ()
+ add_test (NAME HL_CPP_ex_ptExampleFL COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:ptExampleFL>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=ptExampleFL.txt"
+ #-D "TEST_REFERENCE=ptExampleFL.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
set_tests_properties (HL_CPP_ex_ptExampleFL PROPERTIES DEPENDS HL_CPP_ex_ptExampleFL-clear-objects)
diff --git a/hl/c++/examples/Makefile.am b/hl/c++/examples/Makefile.am
index 7202a96..ce719f5 100644
--- a/hl/c++/examples/Makefile.am
+++ b/hl/c++/examples/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/c++/examples/ptExampleFL.cpp b/hl/c++/examples/ptExampleFL.cpp
index 27cbd92..26cbf09 100644
--- a/hl/c++/examples/ptExampleFL.cpp
+++ b/hl/c++/examples/ptExampleFL.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5PacketTable.h"
diff --git a/hl/c++/examples/run-hlc++-ex.sh.in b/hl/c++/examples/run-hlc++-ex.sh.in
index 5aa1032..9539724 100644
--- a/hl/c++/examples/run-hlc++-ex.sh.in
+++ b/hl/c++/examples/run-hlc++-ex.sh.in
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file: run-hlc++-ex.sh
diff --git a/hl/c++/src/CMakeLists.txt b/hl/c++/src/CMakeLists.txt
index f30e684..38604cd 100644
--- a/hl/c++/src/CMakeLists.txt
+++ b/hl/c++/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_CPP_SRC)
#-----------------------------------------------------------------------------
@@ -42,7 +42,7 @@ if (BUILD_SHARED_LIBS)
INTERFACE_COMPILE_DEFINITIONS H5_BUILT_AS_DYNAMIC_LIB=1
)
set (install_targets ${install_targets} ${HDF5_HL_CPP_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
@@ -62,7 +62,8 @@ install (
if (HDF5_EXPORTED_TARGETS)
if (BUILD_SHARED_LIBS)
INSTALL_TARGET_PDB (${HDF5_HL_CPP_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} hlcpplibraries)
- endif (BUILD_SHARED_LIBS)
+ endif ()
+ INSTALL_TARGET_PDB (${HDF5_HL_CPP_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} hlcpplibraries)
install (
TARGETS
@@ -75,4 +76,4 @@ if (HDF5_EXPORTED_TARGETS)
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT hlcpplibraries
INCLUDES DESTINATION include
)
-endif (HDF5_EXPORTED_TARGETS)
+endif ()
diff --git a/hl/c++/src/H5PacketTable.cpp b/hl/c++/src/H5PacketTable.cpp
index c4d7bbf..544df0b 100644
--- a/hl/c++/src/H5PacketTable.cpp
+++ b/hl/c++/src/H5PacketTable.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Packet Table wrapper classes
diff --git a/hl/c++/src/H5PacketTable.h b/hl/c++/src/H5PacketTable.h
index be0b130..2665984 100644
--- a/hl/c++/src/H5PacketTable.h
+++ b/hl/c++/src/H5PacketTable.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Packet Table wrapper classes
diff --git a/hl/c++/src/Makefile.am b/hl/c++/src/Makefile.am
index c78f5fa..363ba3b 100644
--- a/hl/c++/src/Makefile.am
+++ b/hl/c++/src/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/c++/test/CMakeLists.txt b/hl/c++/test/CMakeLists.txt
index de5b363..a2f9429 100644
--- a/hl/c++/test/CMakeLists.txt
+++ b/hl/c++/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_CPP_TEST)
#-----------------------------------------------------------------------------
diff --git a/hl/c++/test/CMakeTests.cmake b/hl/c++/test/CMakeTests.cmake
index e36b5aa..785abca 100644
--- a/hl/c++/test/CMakeTests.cmake
+++ b/hl/c++/test/CMakeTests.cmake
@@ -1,8 +1,32 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
### T E S T I N G ###
##############################################################################
##############################################################################
-
+
+if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME HL_CPP_ptableTest COMMAND $<TARGET_FILE:hl_ptableTest>)
+else ()
+ add_test (NAME HL_CPP_ptableTest COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_ptableTest>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_ptableTest.txt"
+ #-D "TEST_REFERENCE=hl_ptableTest.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
diff --git a/hl/c++/test/Makefile.am b/hl/c++/test/Makefile.am
index c835843..7031c34 100644
--- a/hl/c++/test/Makefile.am
+++ b/hl/c++/test/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/c++/test/ptableTest.cpp b/hl/c++/test/ptableTest.cpp
index c388af3..340912e 100644
--- a/hl/c++/test/ptableTest.cpp
+++ b/hl/c++/test/ptableTest.cpp
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* ptableTest.cpp */
diff --git a/hl/c++/test/ptableTest.h b/hl/c++/test/ptableTest.h
index 60b55ca..8af7bff 100644
--- a/hl/c++/test/ptableTest.h
+++ b/hl/c++/test/ptableTest.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Test header for Packet Table C++ wrapper API */
diff --git a/hl/examples/CMakeLists.txt b/hl/examples/CMakeLists.txt
index f5e6f5e..1144e0f 100644
--- a/hl/examples/CMakeLists.txt
+++ b/hl/examples/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_EXAMPLES )
#-----------------------------------------------------------------------------
@@ -32,10 +32,10 @@ foreach (example ${examples})
TARGET_C_PROPERTIES (hl_ex_${example} STATIC " " " ")
target_link_libraries (hl_ex_${example} ${HDF5_HL_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (hl_ex_${example} PROPERTIES FOLDER examples/hl)
-endforeach (example ${examples})
+endforeach ()
if (BUILD_TESTING)
include (CMakeTests.cmake)
-endif (BUILD_TESTING)
+endif ()
diff --git a/hl/examples/CMakeTests.cmake b/hl/examples/CMakeTests.cmake
index 6384aa5..166fa7c 100644
--- a/hl/examples/CMakeTests.cmake
+++ b/hl/examples/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -12,7 +23,7 @@ set (HDF5_TEST_FILES
foreach (h5_file ${HDF5_TEST_FILES})
HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/${h5_file}" "${PROJECT_BINARY_DIR}/${h5_file}" "hl_ex_ex_ds1_files")
-endforeach (h5_file ${HDF5_TEST_FILES})
+endforeach ()
add_custom_target(hl_ex_ex_ds1_files ALL COMMENT "Copying files needed by hl_ex_ex_ds1 tests" DEPENDS ${hl_ex_ex_ds1_files_list})
# Remove any output file left over from previous test run
@@ -46,9 +57,22 @@ add_custom_target(hl_ex_ex_ds1_files ALL COMMENT "Copying files needed by hl_ex_
set (last_test "HL_ex-clear-objects")
foreach (example ${examples})
- add_test (NAME HL_ex_${example} COMMAND $<TARGET_FILE:hl_ex_${example}>)
- if (NOT "${last_test}" STREQUAL "")
- set_tests_properties (HL_ex_${example} PROPERTIES DEPENDS ${last_test})
- endif ()
- set (last_test "HL_ex_${example}")
-endforeach (example ${examples})
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_ex_${example} COMMAND $<TARGET_FILE:hl_ex_${example}>)
+ else ()
+ add_test (NAME HL_ex_${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_ex_${example}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_ex_${example}.txt"
+ #-D "TEST_REFERENCE=hl_ex_${example}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (HL_ex_${example} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "HL_ex_${example}")
+endforeach ()
diff --git a/hl/examples/Makefile.am b/hl/examples/Makefile.am
index ba200ed..29e1a48 100644
--- a/hl/examples/Makefile.am
+++ b/hl/examples/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/examples/ex_ds1.c b/hl/examples/ex_ds1.c
index 8b7e530..1e0c592 100644
--- a/hl/examples/ex_ds1.c
+++ b/hl/examples/ex_ds1.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_image1.c b/hl/examples/ex_image1.c
index 18ad903..56a175d 100644
--- a/hl/examples/ex_image1.c
+++ b/hl/examples/ex_image1.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_image2.c b/hl/examples/ex_image2.c
index b57f1b7..5abf723 100644
--- a/hl/examples/ex_image2.c
+++ b/hl/examples/ex_image2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_lite1.c b/hl/examples/ex_lite1.c
index 446c803..89f60dc 100644
--- a/hl/examples/ex_lite1.c
+++ b/hl/examples/ex_lite1.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/hl/examples/ex_lite2.c b/hl/examples/ex_lite2.c
index a696a20..261fc73 100644
--- a/hl/examples/ex_lite2.c
+++ b/hl/examples/ex_lite2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_lite3.c b/hl/examples/ex_lite3.c
index f409ea4..420cbcb 100644
--- a/hl/examples/ex_lite3.c
+++ b/hl/examples/ex_lite3.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_table_01.c b/hl/examples/ex_table_01.c
index 1d381ae..f1d0266 100644
--- a/hl/examples/ex_table_01.c
+++ b/hl/examples/ex_table_01.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_table_02.c b/hl/examples/ex_table_02.c
index 129f87d..923f810 100644
--- a/hl/examples/ex_table_02.c
+++ b/hl/examples/ex_table_02.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_table_03.c b/hl/examples/ex_table_03.c
index c9f94ce..76a9eae 100644
--- a/hl/examples/ex_table_03.c
+++ b/hl/examples/ex_table_03.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/hl/examples/ex_table_04.c b/hl/examples/ex_table_04.c
index 01f2869..203114c 100644
--- a/hl/examples/ex_table_04.c
+++ b/hl/examples/ex_table_04.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_table_05.c b/hl/examples/ex_table_05.c
index 7ed1fec..b43d635 100644
--- a/hl/examples/ex_table_05.c
+++ b/hl/examples/ex_table_05.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/hl/examples/ex_table_06.c b/hl/examples/ex_table_06.c
index 081347e..0397e83 100644
--- a/hl/examples/ex_table_06.c
+++ b/hl/examples/ex_table_06.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_table_07.c b/hl/examples/ex_table_07.c
index 3be788f..d9ea444 100644
--- a/hl/examples/ex_table_07.c
+++ b/hl/examples/ex_table_07.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/ex_table_08.c b/hl/examples/ex_table_08.c
index 5d3659f..a45520d 100644
--- a/hl/examples/ex_table_08.c
+++ b/hl/examples/ex_table_08.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5_hl.h"
diff --git a/hl/examples/ex_table_09.c b/hl/examples/ex_table_09.c
index a4ef611..a9f5f11 100644
--- a/hl/examples/ex_table_09.c
+++ b/hl/examples/ex_table_09.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5_hl.h"
diff --git a/hl/examples/ex_table_10.c b/hl/examples/ex_table_10.c
index 059c7ea..8c4d8ae 100644
--- a/hl/examples/ex_table_10.c
+++ b/hl/examples/ex_table_10.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5_hl.h"
diff --git a/hl/examples/ex_table_11.c b/hl/examples/ex_table_11.c
index 687568c..d6215cb 100644
--- a/hl/examples/ex_table_11.c
+++ b/hl/examples/ex_table_11.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5_hl.h"
diff --git a/hl/examples/ex_table_12.c b/hl/examples/ex_table_12.c
index 125b8be..f287c29 100644
--- a/hl/examples/ex_table_12.c
+++ b/hl/examples/ex_table_12.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/examples/pal_rgb.h b/hl/examples/pal_rgb.h
index 606137e..f3905b3 100644
--- a/hl/examples/pal_rgb.h
+++ b/hl/examples/pal_rgb.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
const unsigned char pal_rgb[256*3] = {255,255,255,
diff --git a/hl/examples/ptExampleFL.c b/hl/examples/ptExampleFL.c
index 37b5035..ba7a3a0 100644
--- a/hl/examples/ptExampleFL.c
+++ b/hl/examples/ptExampleFL.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5_hl.h"
diff --git a/hl/examples/run-hl-ex.sh b/hl/examples/run-hl-ex.sh
index 5fa97c0..6f736cc 100755
--- a/hl/examples/run-hl-ex.sh
+++ b/hl/examples/run-hl-ex.sh
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file: run-hl-ex.sh
diff --git a/hl/examples/run-hlc-ex.sh.in b/hl/examples/run-hlc-ex.sh.in
index d897a02..b12955f 100644
--- a/hl/examples/run-hlc-ex.sh.in
+++ b/hl/examples/run-hlc-ex.sh.in
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file: run-hlc-ex.sh
diff --git a/hl/fortran/CMakeLists.txt b/hl/fortran/CMakeLists.txt
index 892169c..c651ce9 100644
--- a/hl/fortran/CMakeLists.txt
+++ b/hl/fortran/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_F90 C CXX Fortran)
#-----------------------------------------------------------------------------
@@ -11,11 +11,11 @@ add_subdirectory (${HDF5_HL_F90_SOURCE_DIR}/src ${HDF5_HL_F90_BINARY_DIR}/src)
#-----------------------------------------------------------------------------
if (HDF5_BUILD_EXAMPLES)
add_subdirectory (${HDF5_HL_F90_SOURCE_DIR}/examples ${HDF5_HL_F90_BINARY_DIR}/examples)
-endif (HDF5_BUILD_EXAMPLES)
+endif ()
#-----------------------------------------------------------------------------
# Testing
#-----------------------------------------------------------------------------
if (BUILD_TESTING)
add_subdirectory (${HDF5_HL_F90_SOURCE_DIR}/test ${HDF5_HL_F90_BINARY_DIR}/test)
-endif (BUILD_TESTING)
+endif ()
diff --git a/hl/fortran/COPYING b/hl/fortran/COPYING
index 6903daf..6497ace 100644
--- a/hl/fortran/COPYING
+++ b/hl/fortran/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/hl/fortran/Makefile.am b/hl/fortran/Makefile.am
index 646de26..ad18a21 100644
--- a/hl/fortran/Makefile.am
+++ b/hl/fortran/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#
# This makefile mostly just reinvokes make in the various subdirectories
diff --git a/hl/fortran/examples/CMakeLists.txt b/hl/fortran/examples/CMakeLists.txt
index 87838a0..dfe6102 100644
--- a/hl/fortran/examples/CMakeLists.txt
+++ b/hl/fortran/examples/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_F90_EXAMPLES C CXX Fortran)
#-----------------------------------------------------------------------------
@@ -18,7 +18,7 @@ foreach (example ${examples})
add_executable (hl_f90_ex_${example} ${HDF5_HL_F90_EXAMPLES_SOURCE_DIR}/${example}.f90)
TARGET_NAMING (hl_f90_ex_${example} STATIC)
TARGET_FORTRAN_PROPERTIES (hl_f90_ex_${example} STATIC " " " ")
- target_link_libraries (hl_f90_ex_${example}
+ target_link_libraries (hl_f90_ex_${example}
${HDF5_HL_F90_LIB_TARGET}
${HDF5_F90_LIB_TARGET}
${HDF5_LIB_TARGET}
@@ -27,10 +27,10 @@ foreach (example ${examples})
set_target_properties (hl_f90_ex_${example} PROPERTIES LINKER_LANGUAGE Fortran)
set_target_properties (hl_f90_ex_${example} PROPERTIES FOLDER examples/hl/fortran)
-endforeach (example ${examples})
+endforeach ()
if (BUILD_TESTING)
include (CMakeTests.cmake)
-endif (BUILD_TESTING)
+endif ()
diff --git a/hl/fortran/examples/CMakeTests.cmake b/hl/fortran/examples/CMakeTests.cmake
index 2c10295..91cb56b 100644
--- a/hl/fortran/examples/CMakeTests.cmake
+++ b/hl/fortran/examples/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -16,6 +27,19 @@ add_test (
foreach (example ${examples})
- add_test (NAME HL_FORTRAN_f90_ex_${example} COMMAND $<TARGET_FILE:hl_f90_ex_${example}>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_ex_${example} COMMAND $<TARGET_FILE:hl_f90_ex_${example}>)
+ else ()
+ add_test (NAME HL_FORTRAN_f90_ex_${example} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_ex_${example}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_ex_${example}.txt"
+ #-D "TEST_REFERENCE=hl_f90_ex_${example}.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (HL_FORTRAN_f90_ex_${example} PROPERTIES DEPENDS HL_FORTRAN_f90_ex-clear-objects)
-endforeach (example ${examples})
+endforeach ()
diff --git a/hl/fortran/examples/Makefile.am b/hl/fortran/examples/Makefile.am
index 997da73..6709fb7 100644
--- a/hl/fortran/examples/Makefile.am
+++ b/hl/fortran/examples/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/fortran/examples/ex_ds1.f90 b/hl/fortran/examples/ex_ds1.f90
index d77f8e0..14e2b0c 100644
--- a/hl/fortran/examples/ex_ds1.f90
+++ b/hl/fortran/examples/ex_ds1.f90
@@ -5,12 +5,10 @@
! * *
! * This file is part of HDF5. The full HDF5 copyright notice, including *
! * terms governing use, modification, and redistribution, is contained in *
-! * the files COPYING and Copyright.html. COPYING can be found at the root *
-! * of the source code distribution tree; Copyright.html can be found at the *
-! * root level of an installed copy of the electronic HDF5 document set and *
-! * is linked from the top-level documents page. It can also be found at *
-! * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! * access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
PROGRAM example_ds
diff --git a/hl/fortran/examples/exlite.f90 b/hl/fortran/examples/exlite.f90
index 916bcb9..90a33fd 100644
--- a/hl/fortran/examples/exlite.f90
+++ b/hl/fortran/examples/exlite.f90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
diff --git a/hl/fortran/examples/run-hlfortran-ex.sh.in b/hl/fortran/examples/run-hlfortran-ex.sh.in
index 12f9fec..c8f41f8 100644
--- a/hl/fortran/examples/run-hlfortran-ex.sh.in
+++ b/hl/fortran/examples/run-hlfortran-ex.sh.in
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file: run-hlfortran-ex.sh
diff --git a/hl/fortran/src/CMakeLists.txt b/hl/fortran/src/CMakeLists.txt
index 2eaac31..345a739 100644
--- a/hl/fortran/src/CMakeLists.txt
+++ b/hl/fortran/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT(HDF5_HL_F90_SRC C CXX Fortran)
#-----------------------------------------------------------------------------
@@ -7,9 +7,9 @@ if (WIN32)
if (BUILD_SHARED_LIBS)
if (MSVC)
configure_file (${HDF5_HL_F90_SRC_SOURCE_DIR}/hdf5_hl_fortrandll.def.in ${HDF5_HL_F90_SRC_BINARY_DIR}/hdf5_hl_fortrandll.def @ONLY)
- endif (MSVC)
- endif (BUILD_SHARED_LIBS)
-endif (WIN32)
+ endif ()
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Generate the H5LT and H5TB REAL APIs
@@ -25,12 +25,12 @@ if (WIN32 AND MSVC)
PROPERTIES
COMPILE_FLAGS "/MT"
)
- endif (BUILD_SHARED_LIBS)
+ endif ()
set_target_properties (H5HL_buildiface
PROPERTIES
LINK_FLAGS "/SUBSYSTEM:CONSOLE"
)
-endif (WIN32 AND MSVC)
+endif ()
set_target_properties (H5HL_buildiface PROPERTIES
LINKER_LANGUAGE Fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}
@@ -40,15 +40,15 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
file (MAKE_DIRECTORY "${HDF5_HL_F90_SRC_BINARY_DIR}/shared")
if (WIN32)
set (MODSH_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/shared/${CMAKE_BUILD_TYPE})
- else (WIN32)
+ else ()
set (MODSH_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/shared)
- endif (WIN32)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+ endif ()
+endif ()
if (WIN32)
set (MOD_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/static/${CMAKE_BUILD_TYPE})
-else (WIN32)
+else ()
set (MOD_BUILD_DIR ${CMAKE_Fortran_MODULE_DIRECTORY}/static)
-endif (WIN32)
+endif ()
#-----------------------------------------------------------------------------
# Setup include Directories
@@ -101,7 +101,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
INTERFACE_COMPILE_DEFINITIONS H5_BUILT_AS_DYNAMIC_LIB=1
)
set (install_targets ${install_targets} ${HDF5_HL_F90_C_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-----------------------------------------------------------------------------
# Fortran Modules
@@ -146,7 +146,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
${HDF5_HL_F90_SRC_BINARY_DIR}/shared/H5TBff_gen.F90
PROPERTIES GENERATED TRUE
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
set (HDF5_HL_F90_F_SOURCES
${HDF5_HL_F90_F_BASE_SOURCES}
@@ -166,7 +166,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
${HDF5_HL_F90_SRC_BINARY_DIR}/shared/H5TBff_gen.F90
)
set_source_files_properties (${HDF5_HL_F90_F_SOURCES_SHARED} PROPERTIES LANGUAGE Fortran)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
add_library (${HDF5_HL_F90_LIB_TARGET} STATIC ${HDF5_HL_F90_F_SOURCES})
TARGET_FORTRAN_PROPERTIES (${HDF5_HL_F90_LIB_TARGET} STATIC " " " ")
@@ -183,7 +183,7 @@ if (WIN32)
set_property (TARGET ${HDF5_HL_F90_LIB_TARGET}
APPEND PROPERTY COMPILE_DEFINITIONS "HDF5F90_WINDOWS"
)
-endif (WIN32)
+endif ()
set (install_targets ${install_targets} ${HDF5_HL_F90_LIB_TARGET})
add_dependencies(${HDF5_HL_F90_LIB_TARGET} H5HLgen)
@@ -192,7 +192,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
set (SHARED_LINK_FLAGS " ")
if (WIN32 AND MSVC)
set (SHARED_LINK_FLAGS "/DLL /DEF:${HDF5_HL_F90_SRC_BINARY_DIR}/hdf5_hl_fortrandll.def")
- endif (WIN32 AND MSVC)
+ endif ()
TARGET_FORTRAN_PROPERTIES (${HDF5_HL_F90_LIBSH_TARGET} SHARED " " ${SHARED_LINK_FLAGS})
target_link_libraries (${HDF5_HL_F90_LIBSH_TARGET} ${HDF5_HL_F90_C_LIBSH_TARGET} ${HDF5_F90_LIBSH_TARGET})
set_global_variable (HDF5_LIBRARIES_TO_EXPORT "${HDF5_LIBRARIES_TO_EXPORT};${HDF5_HL_F90_LIBSH_TARGET}")
@@ -208,10 +208,10 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
set_property (TARGET ${HDF5_HL_F90_LIBSH_TARGET}
APPEND PROPERTY COMPILE_DEFINITIONS "BUILD_HDF5_HL_DLL;HDF5F90_WINDOWS"
)
- endif (WIN32)
+ endif ()
set (install_targets ${install_targets} ${HDF5_HL_F90_LIBSH_TARGET})
add_dependencies(${HDF5_HL_F90_LIBSH_TARGET} H5HLgenSH)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
@@ -253,7 +253,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
COMPONENT
fortheaders
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-----------------------------------------------------------------------------
# Add Target(s) to CMake Install for import into other projects
@@ -262,7 +262,9 @@ if (HDF5_EXPORTED_TARGETS)
if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
INSTALL_TARGET_PDB (${HDF5_HL_F90_C_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} hlfortlibraries)
#INSTALL_TARGET_PDB (${HDF5_HL_F90_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} hlfortlibraries)
- endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+ endif ()
+ INSTALL_TARGET_PDB (${HDF5_HL_F90_C_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} hlfortlibraries)
+ #INSTALL_TARGET_PDB (${HDF5_HL_F90_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} hlfortlibraries)
install (
TARGETS
@@ -275,4 +277,4 @@ if (HDF5_EXPORTED_TARGETS)
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT hlfortlibraries
INCLUDES DESTINATION include
)
-endif (HDF5_EXPORTED_TARGETS)
+endif ()
diff --git a/hl/fortran/src/H5DSfc.c b/hl/fortran/src/H5DSfc.c
index af054e0..0bb594d 100644
--- a/hl/fortran/src/H5DSfc.c
+++ b/hl/fortran/src/H5DSfc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This files contains C stubs for H5D Fortran APIs */
diff --git a/hl/fortran/src/H5DSff.F90 b/hl/fortran/src/H5DSff.F90
index 00cf4d8..4f0d040 100644
--- a/hl/fortran/src/H5DSff.F90
+++ b/hl/fortran/src/H5DSff.F90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/hl/fortran/src/H5HL_buildiface.F90 b/hl/fortran/src/H5HL_buildiface.F90
index 15897c3..dda8d56 100644
--- a/hl/fortran/src/H5HL_buildiface.F90
+++ b/hl/fortran/src/H5HL_buildiface.F90
@@ -33,12 +33,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
! AUTHOR
@@ -114,12 +112,10 @@ PROGRAM H5HL_buildiface
'! *',&
'! This file is part of HDF5. The full HDF5 copyright notice, including *',&
'! terms governing use, modification, and redistribution, is contained in *',&
-'! the files COPYING and Copyright.html. COPYING can be found at the root *',&
-'! of the source code distribution tree; Copyright.html can be found at the *',&
-'! root level of an installed copy of the electronic HDF5 document set and *',&
-'! is linked from the top-level documents page. It can also be found at *',&
-'! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *',&
-'! access to either file, you may request a copy from help@hdfgroup.org. *',&
+'! the COPYING file, which can be found at the root of the source code *',&
+'! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *',&
+'! If you do not have access to either file, you may request a copy from *',&
+'! help@hdfgroup.org. *',&
'! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *',&
'!',&
'! AUTHOR',&
@@ -584,12 +580,10 @@ PROGRAM H5HL_buildiface
'! *',&
'! This file is part of HDF5. The full HDF5 copyright notice, including *',&
'! terms governing use, modification, and redistribution, is contained in *',&
-'! the files COPYING and Copyright.html. COPYING can be found at the root *',&
-'! of the source code distribution tree; Copyright.html can be found at the *',&
-'! root level of an installed copy of the electronic HDF5 document set and *',&
-'! is linked from the top-level documents page. It can also be found at *',&
-'! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *',&
-'! access to either file, you may request a copy from help@hdfgroup.org. *',&
+'! the COPYING file, which can be found at the root of the source code *',&
+'! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *',&
+'! If you do not have access to either file, you may request a copy from *',&
+'! help@hdfgroup.org. *',&
'! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *',&
'!',&
'! AUTHOR',&
diff --git a/hl/fortran/src/H5IMcc.c b/hl/fortran/src/H5IMcc.c
index ae58fab..c6e4b1b 100644
--- a/hl/fortran/src/H5IMcc.c
+++ b/hl/fortran/src/H5IMcc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5IMcc.h"
@@ -314,7 +312,6 @@ out:
* the FORTRAN interface where the image buffer is defined as type "integer"
*
* based on HDF5 Image and Palette Specification
- * http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -408,7 +405,6 @@ herr_t H5IMmake_palettef(hid_t loc_id,
* the FORTRAN interface where the image buffer is defined as type "integer"
*
* based on HDF5 Image and Palette Specification
- * http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -451,7 +447,6 @@ herr_t H5IMget_palettef(hid_t loc_id,
*
* Comments:
* based on HDF5 Image and Palette Specification
- * http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
diff --git a/hl/fortran/src/H5IMcc.h b/hl/fortran/src/H5IMcc.h
index 0b8a345..a65669d 100644
--- a/hl/fortran/src/H5IMcc.h
+++ b/hl/fortran/src/H5IMcc.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5IMCC_H
diff --git a/hl/fortran/src/H5IMfc.c b/hl/fortran/src/H5IMfc.c
index 7ad50d6..cafd623 100644
--- a/hl/fortran/src/H5IMfc.c
+++ b/hl/fortran/src/H5IMfc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This files contains C stubs for H5D Fortran APIs */
diff --git a/hl/fortran/src/H5IMff.F90 b/hl/fortran/src/H5IMff.F90
index 4408dda..6646828 100644
--- a/hl/fortran/src/H5IMff.F90
+++ b/hl/fortran/src/H5IMff.F90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/hl/fortran/src/H5LTf90proto.h b/hl/fortran/src/H5LTf90proto.h
index 77f941e..35b08a1 100644
--- a/hl/fortran/src/H5LTf90proto.h
+++ b/hl/fortran/src/H5LTf90proto.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/hl/fortran/src/H5LTfc.c b/hl/fortran/src/H5LTfc.c
index a90c24b..0f8a58f 100644
--- a/hl/fortran/src/H5LTfc.c
+++ b/hl/fortran/src/H5LTfc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* This files contains C stubs for H5D Fortran APIs */
diff --git a/hl/fortran/src/H5LTff.F90 b/hl/fortran/src/H5LTff.F90
index 18c36f0..80f6ac5 100644
--- a/hl/fortran/src/H5LTff.F90
+++ b/hl/fortran/src/H5LTff.F90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/hl/fortran/src/H5TBfc.c b/hl/fortran/src/H5TBfc.c
index 2bb7c3b..c0d053f 100644
--- a/hl/fortran/src/H5TBfc.c
+++ b/hl/fortran/src/H5TBfc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <math.h>
diff --git a/hl/fortran/src/H5TBff.F90 b/hl/fortran/src/H5TBff.F90
index 5d1ee35..2575b24 100644
--- a/hl/fortran/src/H5TBff.F90
+++ b/hl/fortran/src/H5TBff.F90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/hl/fortran/src/Makefile.am b/hl/fortran/src/Makefile.am
index 7ac18cd..a495e220 100644
--- a/hl/fortran/src/Makefile.am
+++ b/hl/fortran/src/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
##
## Makefile.am
diff --git a/hl/fortran/test/CMakeLists.txt b/hl/fortran/test/CMakeLists.txt
index 13cb177..3138173 100644
--- a/hl/fortran/test/CMakeLists.txt
+++ b/hl/fortran/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_FORTRAN_TESTS C CXX Fortran)
#-----------------------------------------------------------------------------
@@ -25,7 +25,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
FOLDER test/hl/fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-- Adding test for hl_f90_tstlite
add_executable (hl_f90_tstlite tstlite.F90)
@@ -46,7 +46,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
FOLDER test/hl/fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-- Adding test for hl_f90_tstimage
add_executable (hl_f90_tstimage tstimage.F90)
@@ -67,7 +67,7 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
FOLDER test/hl/fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
#-- Adding test for hl_f90_tsttable
add_executable (hl_f90_tsttable tsttable.F90)
@@ -88,6 +88,6 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
FOLDER test/hl/fortran
Fortran_MODULE_DIRECTORY ${CMAKE_Fortran_MODULE_DIRECTORY}/shared
)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
include (CMakeTests.cmake)
diff --git a/hl/fortran/test/CMakeTests.cmake b/hl/fortran/test/CMakeTests.cmake
index c538ae8..90cc3a3 100644
--- a/hl/fortran/test/CMakeTests.cmake
+++ b/hl/fortran/test/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -21,16 +32,68 @@ add_test (
tstds.h5
)
-add_test (NAME HL_FORTRAN_f90_tstds COMMAND $<TARGET_FILE:hl_f90_tstds>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_tstds COMMAND $<TARGET_FILE:hl_f90_tstds>)
+else ()
+ add_test (NAME HL_FORTRAN_f90_tstds COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_tstds>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_tstds.txt"
+ #-D "TEST_REFERENCE=hl_f90_tstds.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
set_tests_properties (HL_FORTRAN_f90_tstds PROPERTIES DEPENDS HL_FORTRAN_test-clear-objects)
-add_test (NAME HL_FORTRAN_f90_tstlite COMMAND $<TARGET_FILE:hl_f90_tstlite>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_tstlite COMMAND $<TARGET_FILE:hl_f90_tstlite>)
+else ()
+ add_test (NAME HL_FORTRAN_f90_tstlite COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_tstlite>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_tstlite.txt"
+ #-D "TEST_REFERENCE=hl_f90_tstlite.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
set_tests_properties (HL_FORTRAN_f90_tstlite PROPERTIES DEPENDS HL_FORTRAN_test-clear-objects)
-add_test (NAME HL_FORTRAN_f90_tstimage COMMAND $<TARGET_FILE:hl_f90_tstimage>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_tstimage COMMAND $<TARGET_FILE:hl_f90_tstimage>)
+else ()
+ add_test (NAME HL_FORTRAN_f90_tstimage COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_tstimage>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_tstimage.txt"
+ #-D "TEST_REFERENCE=hl_f90_tstimage.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
set_tests_properties (HL_FORTRAN_f90_tstimage PROPERTIES DEPENDS HL_FORTRAN_test-clear-objects)
-add_test (NAME HL_FORTRAN_f90_tsttable COMMAND $<TARGET_FILE:hl_f90_tsttable>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_tsttable COMMAND $<TARGET_FILE:hl_f90_tsttable>)
+else ()
+ add_test (NAME HL_FORTRAN_f90_tsttable COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_tsttable>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_tsttable.txt"
+ #-D "TEST_REFERENCE=hl_f90_tsttable.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
set_tests_properties (HL_FORTRAN_f90_tsttable PROPERTIES DEPENDS HL_FORTRAN_test-clear-objects)
if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
@@ -52,15 +115,67 @@ if (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
PROPERTIES DEPENDS "HL_FORTRAN_f90_tsttable;HL_FORTRAN_f90_tstimage;HL_FORTRAN_f90_tstlite;HL_FORTRAN_f90_tstds"
)
- add_test (NAME HL_FORTRAN_f90_tstds-shared COMMAND $<TARGET_FILE:hl_f90_tstds-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_tstds-shared COMMAND $<TARGET_FILE:hl_f90_tstds-shared>)
+ else ()
+ add_test (NAME HL_FORTRAN_f90_tstds-shared COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_tstds-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_tstds-shared.txt"
+ #-D "TEST_REFERENCE=hl_f90_tstds-shared.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (HL_FORTRAN_f90_tstds-shared PROPERTIES DEPENDS HL_FORTRAN_test-shared-clear-objects)
- add_test (NAME HL_FORTRAN_f90_tstlite-shared COMMAND $<TARGET_FILE:hl_f90_tstlite-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_tstlite-shared COMMAND $<TARGET_FILE:hl_f90_tstlite-shared>)
+ else ()
+ add_test (NAME HL_FORTRAN_f90_tstlite-shared COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_tstlite-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_tstlite-shared.txt"
+ #-D "TEST_REFERENCE=hl_f90_tstlite-shared.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (HL_FORTRAN_f90_tstlite-shared PROPERTIES DEPENDS HL_FORTRAN_test-shared-clear-objects)
- add_test (NAME HL_FORTRAN_f90_tstimage-shared COMMAND $<TARGET_FILE:hl_f90_tstimage-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_tstimage-shared COMMAND $<TARGET_FILE:hl_f90_tstimage-shared>)
+ else ()
+ add_test (NAME HL_FORTRAN_f90_tstimage-shared COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_tstimage-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_tstimage-shared.txt"
+ #-D "TEST_REFERENCE=hl_f90_tstimage-shared.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (HL_FORTRAN_f90_tstimage-shared PROPERTIES DEPENDS HL_FORTRAN_test-shared-clear-objects)
- add_test (NAME HL_FORTRAN_f90_tsttable-shared COMMAND $<TARGET_FILE:hl_f90_tsttable-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_FORTRAN_f90_tsttable-shared COMMAND $<TARGET_FILE:hl_f90_tsttable-shared>)
+ else ()
+ add_test (NAME HL_FORTRAN_f90_tsttable-shared COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_f90_tsttable-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_f90_tsttable-shared.txt"
+ #-D "TEST_REFERENCE=hl_f90_tsttable-shared.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (HL_FORTRAN_f90_tsttable-shared PROPERTIES DEPENDS HL_FORTRAN_test-shared-clear-objects)
-endif (BUILD_SHARED_LIBS AND NOT SKIP_HDF5_FORTRAN_SHARED)
+endif ()
diff --git a/hl/fortran/test/Makefile.am b/hl/fortran/test/Makefile.am
index 4013d39..ba672aa 100644
--- a/hl/fortran/test/Makefile.am
+++ b/hl/fortran/test/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
##
## Makefile.am
diff --git a/hl/fortran/test/tstds.F90 b/hl/fortran/test/tstds.F90
index f5df4ef..387f524 100644
--- a/hl/fortran/test/tstds.F90
+++ b/hl/fortran/test/tstds.F90
@@ -5,12 +5,10 @@
! * *
! * This file is part of HDF5. The full HDF5 copyright notice, including *
! * terms governing use, modification, and redistribution, is contained in *
-! * the files COPYING and Copyright.html. COPYING can be found at the root *
-! * of the source code distribution tree; Copyright.html can be found at the *
-! * root level of an installed copy of the electronic HDF5 document set and *
-! * is linked from the top-level documents page. It can also be found at *
-! * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! * access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
MODULE TSTDS
diff --git a/hl/fortran/test/tstimage.F90 b/hl/fortran/test/tstimage.F90
index 7e90664..d6bd1e2 100644
--- a/hl/fortran/test/tstimage.F90
+++ b/hl/fortran/test/tstimage.F90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/hl/fortran/test/tstlite.F90 b/hl/fortran/test/tstlite.F90
index b19ca5a..673807b 100644
--- a/hl/fortran/test/tstlite.F90
+++ b/hl/fortran/test/tstlite.F90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/hl/fortran/test/tsttable.F90 b/hl/fortran/test/tsttable.F90
index 3cf8fed..38cfa86 100644
--- a/hl/fortran/test/tsttable.F90
+++ b/hl/fortran/test/tsttable.F90
@@ -5,12 +5,10 @@
! *
! This file is part of HDF5. The full HDF5 copyright notice, including *
! terms governing use, modification, and redistribution, is contained in *
-! the files COPYING and Copyright.html. COPYING can be found at the root *
-! of the source code distribution tree; Copyright.html can be found at the *
-! root level of an installed copy of the electronic HDF5 document set and *
-! is linked from the top-level documents page. It can also be found at *
-! http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-! access to either file, you may request a copy from help@hdfgroup.org. *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
!
!
diff --git a/hl/src/CMakeLists.txt b/hl/src/CMakeLists.txt
index 0c71583..86472fb 100644
--- a/hl/src/CMakeLists.txt
+++ b/hl/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_SRC)
#-----------------------------------------------------------------------------
@@ -53,7 +53,7 @@ if (BUILD_SHARED_LIBS)
)
set_global_variable (HDF5_LIBRARIES_TO_EXPORT "${HDF5_LIBRARIES_TO_EXPORT};${HDF5_HL_LIBSH_TARGET}")
set (install_targets ${install_targets} ${HDF5_HL_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
@@ -73,7 +73,8 @@ install (
if (HDF5_EXPORTED_TARGETS)
if (BUILD_SHARED_LIBS)
INSTALL_TARGET_PDB (${HDF5_HL_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} hllibraries)
- endif (BUILD_SHARED_LIBS)
+ endif ()
+ INSTALL_TARGET_PDB (${HDF5_HL_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} hllibraries)
install (
TARGETS
@@ -86,4 +87,4 @@ if (HDF5_EXPORTED_TARGETS)
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT hllibraries
INCLUDES DESTINATION include
)
-endif (HDF5_EXPORTED_TARGETS)
+endif ()
diff --git a/hl/src/COPYING b/hl/src/COPYING
index 6903daf..6497ace 100644
--- a/hl/src/COPYING
+++ b/hl/src/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/hl/src/H5DO.c b/hl/src/H5DO.c
index e2b436a..99cf2f7 100644
--- a/hl/src/H5DO.c
+++ b/hl/src/H5DO.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string.h>
@@ -34,7 +32,7 @@
* Return: Non-negative on success/Negative on failure
*
* Programmer: Raymond Lu
- * 30 July 2012
+ * 30 July 2012
*
*-------------------------------------------------------------------------
*/
@@ -62,7 +60,7 @@ H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters, const hsize_t *o
/* If the user passed in a default DXPL, create one to pass to H5Dwrite() */
if(H5P_DEFAULT == dxpl_id) {
- if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
goto done;
created_dxpl = TRUE;
} /* end if */
@@ -89,16 +87,87 @@ done:
if(H5Pclose(dxpl_id) < 0)
ret_value = FAIL;
} /* end if */
- else
+ else {
/* Reset the direct write flag on user DXPL */
+ do_direct_write = FALSE;
if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_WRITE_FLAG_NAME, &do_direct_write) < 0)
ret_value = FAIL;
+ }
- return(ret_value);
+ return ret_value;
} /* end H5DOwrite_chunk() */
-/*
+/*-------------------------------------------------------------------------
+ * Function: H5DOread_chunk
+ *
+ * Purpose: Reads an entire chunk from the file directly.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Matthew Strong (GE Healthcare)
+ * 14 February 2016
+ *
+ *---------------------------------------------------------------------------
+ */
+herr_t
+H5DOread_chunk(hid_t dset_id, hid_t dxpl_id, const hsize_t *offset, uint32_t *filters,
+ void *buf)
+{
+ hbool_t created_dxpl = FALSE; /* Whether we created a DXPL */
+ hbool_t do_direct_read = TRUE; /* Flag for direct writes */
+ herr_t ret_value = FAIL; /* Return value */
+
+ /* Check arguments */
+ if(dset_id < 0)
+ goto done;
+ if(!buf)
+ goto done;
+ if(!offset)
+ goto done;
+ if(!filters)
+ goto done;
+
+ /* If the user passed in a default DXPL, create one to pass to H5Dwrite() */
+ if(H5P_DEFAULT == dxpl_id) {
+ if((dxpl_id = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto done;
+ created_dxpl = TRUE;
+ } /* end if */
+
+ /* Set direct write parameters */
+ if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, &do_direct_read) < 0)
+ goto done;
+ if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME, &offset) < 0)
+ goto done;
+
+ /* Read chunk */
+ if(H5Dread(dset_id, 0, H5S_ALL, H5S_ALL, dxpl_id, buf) < 0)
+ goto done;
+ /* Get the filter mask */
+ if(H5Pget(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME, filters) < 0)
+ goto done;
+
+ /* Indicate success */
+ ret_value = SUCCEED;
+
+done:
+ if(created_dxpl) {
+ if(H5Pclose(dxpl_id) < 0)
+ ret_value = FAIL;
+ } /* end if */
+ else {
+ /* Reset the direct read flag on user DXPL */
+ do_direct_read = FALSE;
+ if(H5Pset(dxpl_id, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, &do_direct_read) < 0)
+ ret_value = FAIL;
+ }
+
+ return ret_value;
+} /* end H5DOread_chunk() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5DOappend()
*
* Purpose: To append elements to a dataset.
@@ -124,6 +193,8 @@ done:
* ret_value = FAIL;
* goto done;
* }
+ *
+ *-------------------------------------------------------------------------
*/
herr_t
H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis, size_t extension,
diff --git a/hl/src/H5DOpublic.h b/hl/src/H5DOpublic.h
index 1e5eb7a..d5c8de4 100644
--- a/hl/src/H5DOpublic.h
+++ b/hl/src/H5DOpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5DOpublic_H
@@ -30,6 +28,12 @@ extern "C" {
H5_HLDLL herr_t H5DOwrite_chunk(hid_t dset_id, hid_t dxpl_id, uint32_t filters,
const hsize_t *offset, size_t data_size, const void *buf);
+H5_HLDLL herr_t H5DOread_chunk(hid_t dset_id, /*in*/
+ hid_t dxpl_id, /*in*/
+ const hsize_t *offset, /*in*/
+ uint32_t *filters, /*out*/
+ void *buf); /*out*/
+
H5_HLDLL herr_t H5DOappend(hid_t dset_id, hid_t dxpl_id, unsigned axis,
size_t extension, hid_t memtype, const void *buf);
diff --git a/hl/src/H5DS.c b/hl/src/H5DS.c
index de6e66c..527c92b 100644
--- a/hl/src/H5DS.c
+++ b/hl/src/H5DS.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <assert.h>
diff --git a/hl/src/H5DSprivate.h b/hl/src/H5DSprivate.h
index 39c3e74..9d20d48 100644
--- a/hl/src/H5DSprivate.h
+++ b/hl/src/H5DSprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5DSprivate_H
diff --git a/hl/src/H5DSpublic.h b/hl/src/H5DSpublic.h
index 85923f8..615122c 100644
--- a/hl/src/H5DSpublic.h
+++ b/hl/src/H5DSpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5DSpublic_H
diff --git a/hl/src/H5HLprivate2.h b/hl/src/H5HLprivate2.h
index 1b67c70..45591e8 100644
--- a/hl/src/H5HLprivate2.h
+++ b/hl/src/H5HLprivate2.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5HLprivate2_H
diff --git a/hl/src/H5IM.c b/hl/src/H5IM.c
index 6d08f02..a4818b8 100644
--- a/hl/src/H5IM.c
+++ b/hl/src/H5IM.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5IMprivate.h"
@@ -31,7 +29,6 @@
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -86,7 +83,6 @@ herr_t H5IMmake_image_8bit( hid_t loc_id,
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Interlace Mode Dimensions in the Dataspace
* INTERLACE_PIXEL [height][width][pixel components]
@@ -240,7 +236,6 @@ herr_t H5IM_find_palette( hid_t loc_id )
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -422,7 +417,6 @@ out:
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -473,7 +467,6 @@ out:
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -530,7 +523,6 @@ herr_t H5IMmake_palette( hid_t loc_id,
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* An image (dataset) within an HDF5 file may optionally specify an array of
* palettes to be viewed with. The dataset will have an attribute
@@ -708,7 +700,6 @@ out:
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -901,7 +892,6 @@ out:
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -1018,7 +1008,6 @@ out:
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -1126,7 +1115,6 @@ out:
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
@@ -1231,7 +1219,6 @@ out:
*
* Comments:
* based on HDF5 Image and Palette Specification
-* http://hdf.ncsa.uiuc.edu/HDF5/H5Image/ImageSpec.html
*
* Modifications:
*
diff --git a/hl/src/H5IMprivate.h b/hl/src/H5IMprivate.h
index 0cb37cc..6776c9d 100644
--- a/hl/src/H5IMprivate.h
+++ b/hl/src/H5IMprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5IMprivate_H
diff --git a/hl/src/H5IMpublic.h b/hl/src/H5IMpublic.h
index 6833f02..a95e439 100644
--- a/hl/src/H5IMpublic.h
+++ b/hl/src/H5IMpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5IMpublic_H
diff --git a/hl/src/H5LD.c b/hl/src/H5LD.c
index 4abd740..1669a30 100644
--- a/hl/src/H5LD.c
+++ b/hl/src/H5LD.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string.h>
diff --git a/hl/src/H5LDprivate.h b/hl/src/H5LDprivate.h
index 13e0710..b52928b 100644
--- a/hl/src/H5LDprivate.h
+++ b/hl/src/H5LDprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5LDprivate_H
diff --git a/hl/src/H5LDpublic.h b/hl/src/H5LDpublic.h
index 4844d42..e2bb745 100644
--- a/hl/src/H5LDpublic.h
+++ b/hl/src/H5LDpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5LDpublic_H
diff --git a/hl/src/H5LT.c b/hl/src/H5LT.c
index b7e0b4a..0074d02 100644
--- a/hl/src/H5LT.c
+++ b/hl/src/H5LT.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <assert.h>
diff --git a/hl/src/H5LTanalyze.c b/hl/src/H5LTanalyze.c
index 022e24f..b6a7f58 100644
--- a/hl/src/H5LTanalyze.c
+++ b/hl/src/H5LTanalyze.c
@@ -883,12 +883,10 @@ char *H5LTyytext;
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* NOTE!
*
diff --git a/hl/src/H5LTanalyze.l b/hl/src/H5LTanalyze.l
index a0c4f42..b9b492e 100644
--- a/hl/src/H5LTanalyze.l
+++ b/hl/src/H5LTanalyze.l
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* NOTE!
diff --git a/hl/src/H5LTparse.y b/hl/src/H5LTparse.y
index 80dcc42..66a8556 100644
--- a/hl/src/H5LTparse.y
+++ b/hl/src/H5LTparse.y
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* NOTE!
diff --git a/hl/src/H5LTprivate.h b/hl/src/H5LTprivate.h
index 98ac06e..01c5ee6 100644
--- a/hl/src/H5LTprivate.h
+++ b/hl/src/H5LTprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5LTprivate_H
diff --git a/hl/src/H5LTpublic.h b/hl/src/H5LTpublic.h
index 929c6bd..47be98a 100644
--- a/hl/src/H5LTpublic.h
+++ b/hl/src/H5LTpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5LTpublic_H
diff --git a/hl/src/H5PT.c b/hl/src/H5PT.c
index 5f0f94f..07d8bfb 100644
--- a/hl/src/H5PT.c
+++ b/hl/src/H5PT.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/hl/src/H5PTprivate.h b/hl/src/H5PTprivate.h
index adc2fa9..eec9df7 100644
--- a/hl/src/H5PTprivate.h
+++ b/hl/src/H5PTprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5PTprivate_H
diff --git a/hl/src/H5PTpublic.h b/hl/src/H5PTpublic.h
index 420275a..8a12c8c 100644
--- a/hl/src/H5PTpublic.h
+++ b/hl/src/H5PTpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5PTpublic_H
diff --git a/hl/src/H5TB.c b/hl/src/H5TB.c
index bcd3339..a4bcbd4 100644
--- a/hl/src/H5TB.c
+++ b/hl/src/H5TB.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/hl/src/H5TBprivate.h b/hl/src/H5TBprivate.h
index e2b668d..17306fb 100644
--- a/hl/src/H5TBprivate.h
+++ b/hl/src/H5TBprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5TBprivate_H
diff --git a/hl/src/H5TBpublic.h b/hl/src/H5TBpublic.h
index 874ef20..56aa915 100644
--- a/hl/src/H5TBpublic.h
+++ b/hl/src/H5TBpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _H5TBpublic_H
diff --git a/hl/src/Makefile.am b/hl/src/Makefile.am
index fef3450..4ef1f5f 100644
--- a/hl/src/Makefile.am
+++ b/hl/src/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/src/hdf5_hl.h b/hl/src/hdf5_hl.h
index f55aa04..3a7b8ee 100644
--- a/hl/src/hdf5_hl.h
+++ b/hl/src/hdf5_hl.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/hl/test/CMakeLists.txt b/hl/test/CMakeLists.txt
index 42a175f..8845505 100644
--- a/hl/test/CMakeLists.txt
+++ b/hl/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_TEST)
# --------------------------------------------------------------------
# Notes: When creating unit test executables they should be prefixed
@@ -14,8 +14,8 @@ PROJECT (HDF5_HL_TEST)
set (srcdir ${CMAKE_CURRENT_SOURCE_DIR})
configure_file (${HDF5_HL_TEST_SOURCE_DIR}/H5srcdir_str.h.in H5srcdir_str.h @ONLY)
-INCLUDE_DIRECTORIES (${CMAKE_CURRENT_BINARY_DIR})
-INCLUDE_DIRECTORIES (${HDF5_TEST_SRC_DIR})
+include_directories (${CMAKE_CURRENT_BINARY_DIR})
+include_directories (${HDF5_TEST_SRC_DIR})
# --------------------------------------------------------------------
# Macro used to add a unit test
@@ -30,7 +30,7 @@ MACRO (HL_ADD_EXE hl_name)
${HDF5_LIB_TARGET}
)
set_target_properties (hl_${hl_name} PROPERTIES FOLDER test/hl)
-ENDMACRO (HL_ADD_EXE)
+ENDMACRO ()
MACRO (HL_ADD_SHEXE hl_name)
add_executable (hl_${hl_name} ${hl_name}.c)
@@ -42,7 +42,7 @@ MACRO (HL_ADD_SHEXE hl_name)
${HDF5_LIBSH_TARGET}
)
set_target_properties (hl_${hl_name} PROPERTIES FOLDER test/hl)
-ENDMACRO (HL_ADD_SHEXE)
+ENDMACRO ()
HL_ADD_EXE (test_lite)
HL_ADD_EXE (test_image)
@@ -88,6 +88,6 @@ if (HDF5_BUILD_GENERATORS)
${HDF5_LIB_TARGET}
)
set_target_properties (hl_gen_test_ld PROPERTIES FOLDER test/hl/gen)
-endif (HDF5_BUILD_GENERATORS)
+endif ()
include (CMakeTests.cmake)
diff --git a/hl/test/CMakeTests.cmake b/hl/test/CMakeTests.cmake
index 228853d..e5eb58e 100644
--- a/hl/test/CMakeTests.cmake
+++ b/hl/test/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -29,21 +40,34 @@ set (HL_REFERENCE_TEST_FILES
# --------------------------------------------------------------------
foreach (h5_file ${HL_REFERENCE_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_HL_TEST_SOURCE_DIR}/${h5_file}" "${HDF5_HL_TEST_BINARY_DIR}/${h5_file}" "hl_test_files")
-endforeach (h5_file ${HL_REFERENCE_TEST_FILES})
+endforeach ()
add_custom_target(hl_test_files ALL COMMENT "Copying files needed by hl_test tests" DEPENDS ${hl_test_files_list})
# --------------------------------------------------------------------
# Macro used to add a unit test
# --------------------------------------------------------------------
-MACRO (HL_ADD_TEST hl_name)
- add_test (NAME HL_${hl_name} COMMAND $<TARGET_FILE:hl_${hl_name}>)
+macro (HL_ADD_TEST hl_name)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME HL_${hl_name} COMMAND $<TARGET_FILE:hl_${hl_name}>)
+ else ()
+ add_test (NAME HL_${hl_name} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:hl_${hl_name}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=hl_${hl_name}.txt"
+ #-D "TEST_REFERENCE=hl_${hl_name}.out"
+ -D "TEST_FOLDER=${HDF5_HL_TEST_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (HL_${hl_name} PROPERTIES DEPENDS ${last_test}
ENVIRONMENT "srcdir=${HDF5_HL_TEST_BINARY_DIR}"
WORKING_DIRECTORY ${HDF5_HL_TEST_BINARY_DIR}
)
endif ()
-ENDMACRO (HL_ADD_TEST)
+endmacro ()
# Remove any output file left over from previous test run
add_test (
@@ -82,7 +106,7 @@ add_test (
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (HL_test-clear-objects PROPERTIES DEPENDS ${last_test})
-endif (NOT "${last_test}" STREQUAL "")
+endif ()
set (last_test "HL_test-clear-objects")
HL_add_test (test_lite )
diff --git a/hl/test/COPYING b/hl/test/COPYING
index 6903daf..6497ace 100644
--- a/hl/test/COPYING
+++ b/hl/test/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/hl/test/H5srcdir_str.h.in b/hl/test/H5srcdir_str.h.in
index d472124..bab1df3 100644
--- a/hl/test/H5srcdir_str.h.in
+++ b/hl/test/H5srcdir_str.h.in
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* If you are reading this file and it has a '.h' suffix, it was automatically
diff --git a/hl/test/Makefile.am b/hl/test/Makefile.am
index 3f21218..e16550f 100644
--- a/hl/test/Makefile.am
+++ b/hl/test/Makefile.am
@@ -4,12 +4,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#
## Makefile.am
@@ -47,7 +45,8 @@ endif
CHECK_CLEANFILES+=combine_tables[1-2].h5 test_ds[1-9].h5 test_ds10.h5 \
test_image[1-3].h5 file_img[1-2].h5 test_lite[1-4].h5 test_table.h5 \
test_packet_table.h5 test_packet_compress.h5 test_detach.h5 \
- test_dectris.h5 test_append.h5
+ test_packet_table_vlen.h5 testfl_packet_table_vlen.h5 \
+ test_dectris.h5 test_append.h5
# Sources for test_packet executable
test_packet_SOURCES=test_packet.c test_packet_vlen.c
diff --git a/hl/test/dectris_hl_perf.c b/hl/test/dectris_hl_perf.c
index 008bd92..13cfac8 100644
--- a/hl/test/dectris_hl_perf.c
+++ b/hl/test/dectris_hl_perf.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/hl/test/gen_test_ds.c b/hl/test/gen_test_ds.c
index 2fc8140..f8f1d39 100644
--- a/hl/test/gen_test_ds.c
+++ b/hl/test/gen_test_ds.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/hl/test/gen_test_ld.c b/hl/test/gen_test_ld.c
index cf7b321..d717a47 100644
--- a/hl/test/gen_test_ld.c
+++ b/hl/test/gen_test_ld.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
#include "H5LDprivate.h"
diff --git a/hl/test/h5hltest.h b/hl/test/h5hltest.h
index 9a5e702..f368b8e 100644
--- a/hl/test/h5hltest.h
+++ b/hl/test/h5hltest.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/hl/test/pal_rgb.h b/hl/test/pal_rgb.h
index 9610258..4b22bea 100644
--- a/hl/test/pal_rgb.h
+++ b/hl/test/pal_rgb.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
const unsigned char pal_rgb[256*3] = {255,255,255,
diff --git a/hl/test/test_ds.c b/hl/test/test_ds.c
index 091a98b..91f1505 100644
--- a/hl/test/test_ds.c
+++ b/hl/test/test_ds.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/hl/test/test_dset_append.c b/hl/test/test_dset_append.c
index a5c9abc..d890481 100644
--- a/hl/test/test_dset_append.c
+++ b/hl/test/test_dset_append.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/hl/test/test_dset_opt.c b/hl/test/test_dset_opt.c
index b03ab44..4b5fa19 100644
--- a/hl/test/test_dset_opt.c
+++ b/hl/test/test_dset_opt.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
@@ -28,19 +26,28 @@
#define FILE_NAME "test_dectris.h5"
+/* Datasets for Direct Write tests */
#define DATASETNAME1 "direct_write"
#define DATASETNAME2 "skip_one_filter"
#define DATASETNAME3 "skip_two_filters"
#define DATASETNAME4 "data_conv"
#define DATASETNAME5 "contiguous_dset"
#define DATASETNAME6 "invalid_argue"
+#define DATASETNAME7 "overwrite_chunk"
+/* Datasets for Direct Read tests */
+#define DATASETNAME8 "disabled_chunk_cache"
+#define DATASETNAME9 "flush_chunk_cache"
+#define DATASETNAME10 "read_w_valid_cache"
+#define DATASETNAME11 "unallocated_chunk"
+#define DATASETNAME12 "unfiltered_data"
+
#define RANK 2
#define NX 16
#define NY 16
#define CHUNK_NX 4
#define CHUNK_NY 4
-#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*(double)1.001F)+12)
+#define DEFLATE_SIZE_ADJUST(s) (ceil(((double)(s))*1.001)+12)
/* Temporary filter IDs used for testing */
#define H5Z_FILTER_BOGUS1 305
@@ -48,6 +55,13 @@
#define ADD_ON 7
#define FACTOR 3
+/* Constants for the overwrite test */
+#define OVERWRITE_NDIMS 3
+#define OVERWRITE_CHUNK_NX 3
+#define OVERWRITE_CHUNK_2NX 6
+#define OVERWRITE_CHUNK_NY 2
+#define OVERWRITE_VALUE 42
+
/* Local prototypes for filter functions */
static size_t filter_bogus1(unsigned int flags, size_t cd_nelmts,
const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
@@ -78,11 +92,10 @@ const H5Z_class2_t H5Z_BOGUS2[1] = {{
/*-------------------------------------------------------------------------
* Function: test_direct_chunk_write
*
- * Purpose: Test the basic functionality of H5DOwrite_chunk
- *
- * Return: Success: 0
+ * Purpose: Test the basic functionality of H5DOwrite_chunk
*
- * Failure: 1
+ * Return: Success: 0
+ * Failure: 1
*
* Programmer: Raymond Lu
* 30 November 2012
@@ -111,11 +124,11 @@ test_direct_chunk_write (hid_t file)
size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
const Bytef *z_src = (const Bytef*)(direct_buf);
- Bytef *z_dst; /*destination buffer */
+ Bytef *z_dst = NULL; /*destination buffer */
uLongf z_dst_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
uLong z_src_nbytes = (uLong)buf_size;
- int aggression = 9; /* Compression aggression setting */
- void *outbuf = NULL; /* Pointer to new buffer */
+ int aggression = 9; /* Compression aggression setting */
+ void *outbuf = NULL; /* Pointer to new buffer */
hsize_t start[2]; /* Start of hyperslab */
hsize_t stride[2]; /* Stride of hyperslab */
@@ -183,13 +196,13 @@ test_direct_chunk_write (hid_t file)
/* Check for various zlib errors */
if(Z_BUF_ERROR == ret) {
- fprintf(stderr, "overflow");
+ HDfprintf(stderr, "overflow");
goto error;
} else if(Z_MEM_ERROR == ret) {
- fprintf(stderr, "deflate memory error");
+ HDfprintf(stderr, "deflate memory error");
goto error;
} else if(Z_OK != ret) {
- fprintf(stderr, "other deflate error");
+ HDfprintf(stderr, "other deflate error");
goto error;
}
@@ -234,9 +247,9 @@ test_direct_chunk_write (hid_t file)
for(i = 0; i < CHUNK_NX; i++) {
for(j = 0; j < CHUNK_NY; j++) {
if(direct_buf[i][j] != check_chunk[i][j]) {
- printf(" 1. Read different values than written.");
- printf(" At index %d,%d\n", i, j);
- printf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
+ HDprintf(" 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", i, j);
+ HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
goto error;
}
}
@@ -256,13 +269,13 @@ test_direct_chunk_write (hid_t file)
/* Check for various zlib errors */
if(Z_BUF_ERROR == ret) {
- fprintf(stderr, "overflow");
+ HDfprintf(stderr, "overflow");
goto error;
} else if(Z_MEM_ERROR == ret) {
- fprintf(stderr, "deflate memory error");
+ HDfprintf(stderr, "deflate memory error");
goto error;
} else if(Z_OK != ret) {
- fprintf(stderr, "other deflate error");
+ HDfprintf(stderr, "other deflate error");
goto error;
}
@@ -298,9 +311,9 @@ test_direct_chunk_write (hid_t file)
for(i = 0; i < CHUNK_NX; i++) {
for(j = 0; j < CHUNK_NY; j++) {
if(direct_buf[i][j] != check_chunk[i][j]) {
- printf(" 2. Read different values than written.");
- printf(" At index %d,%d\n", i, j);
- printf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
+ HDprintf(" 2. Read different values than written.");
+ HDprintf(" At index %d,%d\n", i, j);
+ HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
goto error;
}
}
@@ -330,19 +343,125 @@ error:
if(outbuf)
HDfree(outbuf);
+ H5_FAILED();
return 1;
-}
+} /* test_direct_chunk_write() */
#endif /* H5_HAVE_FILTER_DEFLATE */
/*-------------------------------------------------------------------------
+ * Function: test_direct_chunk_overwrite_data
+ *
+ * Purpose: Test overwriting a chunk with new data.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Dana Robinson
+ * Spring 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_direct_chunk_overwrite_data(hid_t fid)
+{
+ size_t buf_size = OVERWRITE_CHUNK_NX * OVERWRITE_CHUNK_NY * sizeof(int16_t);
+ int16_t data_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_NX];
+ int16_t overwrite_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_NX];
+ uint32_t filter_mask = 0;
+ hid_t tid = H5T_NATIVE_UINT16;
+ hid_t dcpl_id = -1;
+ hid_t sid = -1;
+ hid_t did = -1;
+ uint16_t fill_value = 0;
+ hsize_t dset_dims[] = {1, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_2NX};
+ hsize_t dset_max_dims[] = {H5S_UNLIMITED, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_2NX};
+ hsize_t chunk_dims[] = {1, OVERWRITE_CHUNK_NY, OVERWRITE_CHUNK_NX};
+ hsize_t offset[] = {0, 0, 0};
+ hsize_t i, j;
+ int16_t n;
+ int16_t read_buf[OVERWRITE_CHUNK_NY][OVERWRITE_CHUNK_2NX];
+
+ TESTING("overwriting existing data with H5DOwrite_chunk");
+
+ /* Create the dataset's data space */
+ if ((sid = H5Screate_simple(OVERWRITE_NDIMS, dset_dims, dset_max_dims)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set chunk size and filll value */
+ if ((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR
+ if (H5Pset_fill_value(dcpl_id, tid, &fill_value) < 0)
+ FAIL_STACK_ERROR
+ if (H5Pset_chunk(dcpl_id, OVERWRITE_NDIMS, chunk_dims) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create dataset */
+ if ((did = H5Dcreate2(fid, DATASETNAME7, tid, sid, H5P_DEFAULT, dcpl_id, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Initialize data buffers */
+ n = 0;
+ for (i = 0; i < OVERWRITE_CHUNK_NY; i++) {
+ for (j = 0; j < OVERWRITE_CHUNK_NX; j++) {
+ data_buf[i][j] = n++;
+ overwrite_buf[i][j] = OVERWRITE_VALUE;
+ }
+ }
+
+ /* Write chunk data using the direct write function. */
+ if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, data_buf) < 0)
+ FAIL_STACK_ERROR
+
+ /* Write second chunk. */
+ offset[2] = OVERWRITE_CHUNK_NX;
+ if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, data_buf) < 0)
+ FAIL_STACK_ERROR
+
+ /* Overwrite first chunk. */
+ offset[2] = 0;
+ if (H5DOwrite_chunk(did, H5P_DEFAULT, filter_mask, offset, buf_size, overwrite_buf) < 0)
+ FAIL_STACK_ERROR
+
+ /* Read the data back out */
+ if (H5Dread(did, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, read_buf) < 0)
+ FAIL_STACK_ERROR
+
+ /* Ensure that the data are correct in chunk 1 */
+ for (i = 0; i < OVERWRITE_CHUNK_NY; i++)
+ for (j = 0; j < OVERWRITE_CHUNK_NX; j++) {
+ if (read_buf[i][j] != OVERWRITE_VALUE)
+ TEST_ERROR
+ }
+
+ if (H5Pclose(dcpl_id) < 0)
+ FAIL_STACK_ERROR
+ if (H5Sclose(sid) < 0)
+ FAIL_STACK_ERROR
+ if (H5Dclose(did) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl_id);
+ H5Sclose(sid);
+ H5Dclose(did);
+ } H5E_END_TRY;
+
+ H5_FAILED();
+ return 1;
+} /* end test_direct_chunk_overwrite_data() */
+
+/*-------------------------------------------------------------------------
* Function: test_skip_compress_write1
*
* Purpose: Test skipping compression filter when it is the only filter
* for the dataset
*
* Return: Success: 0
- *
- * Failure: 1
+ * Failure: 1
*
* Programmer: Raymond Lu
* 30 November 2012
@@ -355,7 +474,7 @@ test_skip_compress_write1(hid_t file)
hid_t dataspace = -1, dataset = -1;
hid_t mem_space = -1;
hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
+ hsize_t dims[2] = {NX, NY};
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
herr_t status;
@@ -368,12 +487,16 @@ test_skip_compress_write1(hid_t file)
size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
int aggression = 9; /* Compression aggression setting */
+ unsigned read_filter_mask = 0; /* filter mask after direct read */
+ int read_direct_buf[CHUNK_NX][CHUNK_NY];
+ hsize_t read_buf_size = 0; /* buf size */
+
hsize_t start[2]; /* Start of hyperslab */
hsize_t stride[2]; /* Stride of hyperslab */
hsize_t count[2]; /* Block count */
hsize_t block[2]; /* Block sizes */
- TESTING("skipping compression filter for H5DOwrite_chunk");
+ TESTING("skipping compression filter for H5DOwrite_chunk/H5DOread_chunk");
/*
* Create the data space with unlimited dimensions.
@@ -410,11 +533,11 @@ test_skip_compress_write1(hid_t file)
/* Initialize data for one chunk */
for(i = n = 0; i < CHUNK_NX; i++)
for(j = 0; j < CHUNK_NY; j++) {
- direct_buf[i][j] = n++;
+ direct_buf[i][j] = n++;
}
- /* write the uncompressed chunk data repeatedly to dataset, using the direct writing function.
- * Indicate skipping the compression filter. */
+ /* write the uncompressed chunk data repeatedly to dataset, using the direct writing function.
+ * Indicate skipping the compression filter. */
offset[0] = CHUNK_NX;
offset[1] = CHUNK_NY;
@@ -423,7 +546,7 @@ test_skip_compress_write1(hid_t file)
if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) < 0)
goto error;
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
+ if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
goto error;
if(H5Dclose(dataset) < 0)
@@ -450,9 +573,34 @@ test_skip_compress_write1(hid_t file)
for(i = 0; i < CHUNK_NX; i++) {
for(j = 0; j < CHUNK_NY; j++) {
if(direct_buf[i][j] != check_chunk[i][j]) {
- printf(" 1. Read different values than written.");
- printf(" At index %d,%d\n", i, j);
- printf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
+ HDprintf(" 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", i, j);
+ HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[i][j], check_chunk[i][j]);
+ goto error;
+ }
+ }
+ }
+
+ /* Query chunk storage size */
+ if((status = H5Dget_chunk_storage_size(dataset, offset, &read_buf_size)) < 0)
+ goto error;
+ if(read_buf_size != buf_size)
+ goto error;
+
+ /* Read the raw chunk back */
+ HDmemset(&read_direct_buf, 0, sizeof(read_direct_buf));
+ if((status = H5DOread_chunk(dataset, H5P_DEFAULT, offset, &read_filter_mask, read_direct_buf)) < 0)
+ goto error;
+ if(read_filter_mask != filter_mask)
+ goto error;
+
+ /* Check that the direct chunk read is the same as the chunk written */
+ for(i = 0; i < CHUNK_NX; i++) {
+ for(j = 0; j < CHUNK_NY; j++) {
+ if(direct_buf[i][j] != read_direct_buf[i][j]) {
+ HDprintf(" 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", i, j);
+ HDprintf(" direct_buf=%d, read_direct_buf=%d\n", direct_buf[i][j], read_direct_buf[i][j]);
goto error;
}
}
@@ -466,7 +614,7 @@ test_skip_compress_write1(hid_t file)
H5Sclose(dataspace);
H5Pclose(cparms);
H5Pclose(dxpl);
-
+
PASSED();
return 0;
@@ -479,15 +627,16 @@ error:
H5Pclose(dxpl);
} H5E_END_TRY;
+ H5_FAILED();
return 1;
-}
+} /* test_skip_compress_write1() */
/*-------------------------------------------------------------------------
* Function: filter_bogus1
*
- * Purpose: A bogus filte that adds ADD_ON to the original value
+ * Purpose: A bogus filter that adds ADD_ON to the original value
*
- * Return: Success: Data chunk size
+ * Return: Success: Data chunk size
*
* Programmer: Raymond Lu
* 30 November 2012
@@ -518,7 +667,7 @@ filter_bogus1(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
} /* end else */
return nbytes;
-}
+} /* filter_bogus1() */
/*-------------------------------------------------------------------------
* Function: filter_bogus2
@@ -555,7 +704,7 @@ filter_bogus2(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
} /* end else */
return nbytes;
-}
+} /* filter_bogus2() */
/*-------------------------------------------------------------------------
* Function: test_skip_compress_write2
@@ -564,8 +713,7 @@ filter_bogus2(unsigned int flags, size_t H5_ATTR_UNUSED cd_nelmts,
* for the dataset
*
* Return: Success: 0
- *
- * Failure: 1
+ * Failure: 1
*
* Programmer: Raymond Lu
* 30 November 2012
@@ -578,24 +726,28 @@ test_skip_compress_write2(hid_t file)
hid_t dataspace = -1, dataset = -1;
hid_t mem_space = -1;
hid_t cparms = -1, dxpl = -1;
- hsize_t dims[2] = {NX, NY};
+ hsize_t dims[2] = {NX, NY};
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
herr_t status;
int i, j, n;
- unsigned filter_mask = 0;
+ unsigned filter_mask = 0; /* orig filter mask */
int origin_direct_buf[CHUNK_NX][CHUNK_NY];
int direct_buf[CHUNK_NX][CHUNK_NY];
int check_chunk[CHUNK_NX][CHUNK_NY];
hsize_t offset[2] = {0, 0};
size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
- int aggression = 9; /* Compression aggression setting */
+ int aggression = 9; /* Compression aggression setting */
- hsize_t start[2]; /* Start of hyperslab */
- hsize_t stride[2]; /* Stride of hyperslab */
- hsize_t count[2]; /* Block count */
- hsize_t block[2]; /* Block sizes */
+ unsigned read_filter_mask = 0; /* filter mask after direct read */
+ int read_direct_buf[CHUNK_NX][CHUNK_NY];
+ hsize_t read_buf_size = 0; /* buf size */
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
TESTING("skipping compression filters but keep two other filters");
@@ -619,22 +771,22 @@ test_skip_compress_write2(hid_t file)
goto error;
/* Register and enable first bogus filter */
- if(H5Zregister (H5Z_BOGUS1) < 0)
- goto error;
+ if(H5Zregister (H5Z_BOGUS1) < 0)
+ goto error;
- if(H5Pset_filter(cparms, H5Z_FILTER_BOGUS1, 0, (size_t)0, NULL) < 0)
- goto error;
+ if(H5Pset_filter(cparms, H5Z_FILTER_BOGUS1, 0, (size_t)0, NULL) < 0)
+ goto error;
/* Enable compression filter */
if((status = H5Pset_deflate( cparms, (unsigned) aggression)) < 0)
goto error;
/* Register and enable second bogus filter */
- if(H5Zregister (H5Z_BOGUS2) < 0)
- goto error;
+ if(H5Zregister (H5Z_BOGUS2) < 0)
+ goto error;
- if(H5Pset_filter(cparms, H5Z_FILTER_BOGUS2, 0, (size_t)0, NULL) < 0)
- goto error;
+ if(H5Pset_filter(cparms, H5Z_FILTER_BOGUS2, 0, (size_t)0, NULL) < 0)
+ goto error;
/*
* Create a new dataset within the file using cparms
@@ -650,12 +802,12 @@ test_skip_compress_write2(hid_t file)
/* Initialize data for one chunk. Apply operations of two bogus filters to the chunk */
for(i = n = 0; i < CHUNK_NX; i++)
for(j = 0; j < CHUNK_NY; j++) {
- origin_direct_buf[i][j] = n++;
- direct_buf[i][j] = (origin_direct_buf[i][j] + ADD_ON) * FACTOR;
- }
+ origin_direct_buf[i][j] = n++;
+ direct_buf[i][j] = (origin_direct_buf[i][j] + ADD_ON) * FACTOR;
+ }
- /* write the uncompressed chunk data repeatedly to dataset, using the direct writing function.
- * Indicate skipping the compression filter but keep the other two bogus filters */
+ /* write the uncompressed chunk data repeatedly to dataset, using the direct writing function.
+ * Indicate skipping the compression filter but keep the other two bogus filters */
offset[0] = CHUNK_NX;
offset[1] = CHUNK_NY;
@@ -665,7 +817,7 @@ test_skip_compress_write2(hid_t file)
if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, direct_buf)) < 0)
goto error;
- if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
+ if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
goto error;
if(H5Dclose(dataset) < 0)
@@ -692,9 +844,34 @@ test_skip_compress_write2(hid_t file)
for(i = 0; i < CHUNK_NX; i++) {
for(j = 0; j < CHUNK_NY; j++) {
if(origin_direct_buf[i][j] != check_chunk[i][j]) {
- printf(" 1. Read different values than written.");
- printf(" At index %d,%d\n", i, j);
- printf(" origin_direct_buf=%d, check_chunk=%d\n", origin_direct_buf[i][j], check_chunk[i][j]);
+ HDprintf(" 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", i, j);
+ HDprintf(" origin_direct_buf=%d, check_chunk=%d\n", origin_direct_buf[i][j], check_chunk[i][j]);
+ goto error;
+ }
+ }
+ }
+
+ /* Query chunk storage size */
+ if((status = H5Dget_chunk_storage_size(dataset, offset, &read_buf_size)) < 0)
+ goto error;
+ if(read_buf_size != buf_size)
+ goto error;
+
+ /* Read the raw chunk back */
+ HDmemset(&read_direct_buf, 0, sizeof(read_direct_buf));
+ if((status = H5DOread_chunk(dataset, H5P_DEFAULT, offset, &read_filter_mask, read_direct_buf)) < 0)
+ goto error;
+ if(read_filter_mask != filter_mask)
+ goto error;
+
+ /* Check that the direct chunk read is the same as the chunk written */
+ for(i = 0; i < CHUNK_NX; i++) {
+ for(j = 0; j < CHUNK_NY; j++) {
+ if(direct_buf[i][j] != read_direct_buf[i][j]) {
+ HDprintf(" 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", i, j);
+ HDprintf(" direct_buf=%d, read_direct_buf=%d\n", direct_buf[i][j], read_direct_buf[i][j]);
goto error;
}
}
@@ -708,7 +885,7 @@ test_skip_compress_write2(hid_t file)
H5Sclose(dataspace);
H5Pclose(cparms);
H5Pclose(dxpl);
-
+
PASSED();
return 0;
@@ -721,8 +898,9 @@ error:
H5Pclose(dxpl);
} H5E_END_TRY;
+ H5_FAILED();
return 1;
-}
+} /* test_skip_compress_write2() */
/*-------------------------------------------------------------------------
* Function: test_data_conv
@@ -730,8 +908,7 @@ error:
* Purpose: Test data conversion
*
* Return: Success: 0
- *
- * Failure: 1
+ * Failure: 1
*
* Programmer: Raymond Lu
* 30 November 2012
@@ -742,10 +919,10 @@ static int
test_data_conv(hid_t file)
{
typedef struct {
- int a, b, c[4], d, e;
+ int a, b, c[4], d, e;
} src_type_t;
typedef struct {
- int a, c[4], e;
+ int a, c[4], e;
} dst_type_t;
hid_t dataspace = -1, dataset = -1;
@@ -763,6 +940,8 @@ test_data_conv(hid_t file)
unsigned filter_mask = 0;
src_type_t direct_buf[CHUNK_NX][CHUNK_NY];
dst_type_t check_chunk[CHUNK_NX][CHUNK_NY];
+ src_type_t read_chunk[CHUNK_NX][CHUNK_NY]; /* For H5DOread_chunk */
+
hsize_t offset[2] = {0, 0};
size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(src_type_t);
@@ -771,7 +950,7 @@ test_data_conv(hid_t file)
hsize_t count[2]; /* Block count */
hsize_t block[2]; /* Block sizes */
- TESTING("data conversion for H5DOwrite_chunk");
+ TESTING("data conversion for H5DOwrite_chunk/H5DOread_chunk");
/*
* Create the data space with unlimited dimensions.
@@ -856,6 +1035,35 @@ test_data_conv(hid_t file)
if((dataset = H5Dopen2(file, DATASETNAME4, H5P_DEFAULT)) < 0)
goto error;
+ /* Use H5DOread_chunk() to read the uncompressed data */
+ if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, read_chunk)) < 0)
+ goto error;
+
+ /* Check that the values read are the same as the values written */
+ for(i = 0; i < CHUNK_NX; i++) {
+ for(j = 0; j < CHUNK_NY; j++) {
+ if ((direct_buf[i][j]).a != (read_chunk[i][j]).a ||
+ (direct_buf[i][j]).b != (read_chunk[i][j]).b ||
+ (direct_buf[i][j]).c[0] != (read_chunk[i][j]).c[0] ||
+ (direct_buf[i][j]).c[1] != (read_chunk[i][j]).c[1] ||
+ (direct_buf[i][j]).c[2] != (read_chunk[i][j]).c[2] ||
+ (direct_buf[i][j]).c[3] != (read_chunk[i][j]).c[3] ||
+ (direct_buf[i][j]).d != (read_chunk[i][j]).d ||
+ (direct_buf[i][j]).e != (read_chunk[i][j]).e) {
+ HDprintf(" 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", i, j);
+ HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n",
+ (direct_buf[i][j]).a, (direct_buf[i][j]).b, (direct_buf[i][j]).c[0], (direct_buf[i][j]).c[1],
+ (direct_buf[i][j]).c[2], (direct_buf[i][j]).c[3], (direct_buf[i][j]).d, (direct_buf[i][j]).e);
+ HDprintf(" dst={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n",
+ (read_chunk[i][j]).a, (read_chunk[i][j]).b, (read_chunk[i][j]).c[0], (read_chunk[i][j]).c[1],
+ (read_chunk[i][j]).c[2], (read_chunk[i][j]).c[3], (read_chunk[i][j]).d, (read_chunk[i][j]).e);
+
+ goto error;
+ }
+ }
+ }
+
/*
* Select hyperslab for the chunk just written in the file
*/
@@ -873,22 +1081,22 @@ test_data_conv(hid_t file)
/* Check that the values read are the same as the values written */
for(i = 0; i < CHUNK_NX; i++) {
for(j = 0; j < CHUNK_NY; j++) {
- if ((direct_buf[i][j]).a != (check_chunk[i][j]).a ||
- (direct_buf[i][j]).c[0] != (check_chunk[i][j]).c[0] ||
- (direct_buf[i][j]).c[1] != (check_chunk[i][j]).c[1] ||
- (direct_buf[i][j]).c[2] != (check_chunk[i][j]).c[2] ||
- (direct_buf[i][j]).c[3] != (check_chunk[i][j]).c[3] ||
- (direct_buf[i][j]).e != (check_chunk[i][j]).e) {
- printf(" 1. Read different values than written.");
- printf(" At index %d,%d\n", i, j);
- printf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n",
- (direct_buf[i][j]).a, (direct_buf[i][j]).b, (direct_buf[i][j]).c[0], (direct_buf[i][j]).c[1],
- (direct_buf[i][j]).c[2], (direct_buf[i][j]).c[3], (direct_buf[i][j]).d, (direct_buf[i][j]).e);
- printf(" dst={a=%d, c=[%d,%d,%d,%d], e=%d\n",
- (check_chunk[i][j]).a, (check_chunk[i][j]).c[0], (check_chunk[i][j]).c[1], (check_chunk[i][j]).c[2],
- (check_chunk[i][j]).c[3], (check_chunk[i][j]).e);
-
- goto error;
+ if ((direct_buf[i][j]).a != (check_chunk[i][j]).a ||
+ (direct_buf[i][j]).c[0] != (check_chunk[i][j]).c[0] ||
+ (direct_buf[i][j]).c[1] != (check_chunk[i][j]).c[1] ||
+ (direct_buf[i][j]).c[2] != (check_chunk[i][j]).c[2] ||
+ (direct_buf[i][j]).c[3] != (check_chunk[i][j]).c[3] ||
+ (direct_buf[i][j]).e != (check_chunk[i][j]).e) {
+ HDprintf(" 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", i, j);
+ HDprintf(" src={a=%d, b=%d, c=[%d,%d,%d,%d], d=%d, e=%d\n",
+ (direct_buf[i][j]).a, (direct_buf[i][j]).b, (direct_buf[i][j]).c[0], (direct_buf[i][j]).c[1],
+ (direct_buf[i][j]).c[2], (direct_buf[i][j]).c[3], (direct_buf[i][j]).d, (direct_buf[i][j]).e);
+ HDprintf(" dst={a=%d, c=[%d,%d,%d,%d], e=%d\n",
+ (check_chunk[i][j]).a, (check_chunk[i][j]).c[0], (check_chunk[i][j]).c[1], (check_chunk[i][j]).c[2],
+ (check_chunk[i][j]).c[3], (check_chunk[i][j]).e);
+
+ goto error;
}
}
}
@@ -904,7 +1112,6 @@ test_data_conv(hid_t file)
H5Tclose(st);
H5Tclose(dt);
-
PASSED();
return 0;
@@ -919,17 +1126,17 @@ error:
H5Tclose(dt);
} H5E_END_TRY;
+ H5_FAILED();
return 1;
-}
+} /* test_data_conv() */
/*-------------------------------------------------------------------------
* Function: test_invalid_parameters
*
- * Purpose: Test invalid parameters for H5DOwrite_chunk
+ * Purpose: Test invalid parameters for H5DOwrite_chunk and H5DOread_chunk
*
- * Return: Success: 0
- *
- * Failure: 1
+ * Return: Success: 0
+ * Failure: 1
*
* Programmer: Raymond Lu
* 30 November 2012
@@ -953,7 +1160,9 @@ test_invalid_parameters(hid_t file)
size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
int aggression = 9; /* Compression aggression setting */
- TESTING("invalid parameters for H5DOwrite_chunk");
+ hsize_t chunk_nbytes; /* Chunk size */
+
+ TESTING("invalid parameters for H5DOwrite_chunk/H5DOread_chunk");
/*
* Create the data space with unlimited dimensions.
@@ -971,7 +1180,7 @@ test_invalid_parameters(hid_t file)
goto error;
/*
- * Create a new contiguous dataset to verify H5DOwrite_chunk doesn't work
+ * Create a new contiguous dataset to verify H5DOwrite_chunk/H5DOread_chunk doesn't work
*/
if((dataset = H5Dcreate2(file, DATASETNAME5, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
cparms, H5P_DEFAULT)) < 0)
@@ -983,8 +1192,8 @@ test_invalid_parameters(hid_t file)
/* Initialize data for one chunk */
for(i = n = 0; i < CHUNK_NX; i++)
for(j = 0; j < CHUNK_NY; j++) {
- direct_buf[i][j] = n++;
- }
+ direct_buf[i][j] = n++;
+ }
/* Try to write the chunk data to contiguous dataset. It should fail */
offset[0] = CHUNK_NX;
@@ -995,9 +1204,22 @@ test_invalid_parameters(hid_t file)
goto error;
} H5E_END_TRY;
+ /* Try to get chunk size for a contiguous dataset. It should fail */
+ H5E_BEGIN_TRY {
+ if((status = H5Dget_chunk_storage_size(dataset, offset, &chunk_nbytes)) != FAIL)
+ goto error;
+ } H5E_END_TRY;
+
+ /* Try to H5DOread_chunk from the contiguous dataset. It should fail */
+ H5E_BEGIN_TRY {
+ if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, direct_buf)) != FAIL)
+ goto error;
+ } H5E_END_TRY;
+
if(H5Dclose(dataset) < 0)
goto error;
+
/* Create a chunked dataset with compression filter */
if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
goto error;
@@ -1013,25 +1235,40 @@ test_invalid_parameters(hid_t file)
cparms, H5P_DEFAULT)) < 0)
goto error;
- /* Check invalid dataset ID */
+ /* Check invalid dataset ID for H5DOwrite_chunk and H5DOread_chunk */
H5E_BEGIN_TRY {
if((status = H5DOwrite_chunk((hid_t)-1, dxpl, filter_mask, offset, buf_size, direct_buf)) != FAIL)
goto error;
} H5E_END_TRY;
- /* Check invalid DXPL ID */
+ H5E_BEGIN_TRY {
+ if((status = H5DOread_chunk((hid_t)-1, dxpl, offset, &filter_mask, direct_buf)) != FAIL)
+ goto error;
+ } H5E_END_TRY;
+
+ /* Check invalid DXPL ID for H5DOwrite_chunk and H5DOread_chunk */
H5E_BEGIN_TRY {
if((status = H5DOwrite_chunk(dataset, (hid_t)-1, filter_mask, offset, buf_size, direct_buf)) != FAIL)
goto error;
} H5E_END_TRY;
- /* Check invalid OFFSET */
+ H5E_BEGIN_TRY {
+ if((status = H5DOread_chunk(dataset, (hid_t)-1, offset, &filter_mask, direct_buf)) != FAIL)
+ goto error;
+ } H5E_END_TRY;
+
+ /* Check invalid OFFSET for H5DOwrite_chunk and H5DOread_chunk */
H5E_BEGIN_TRY {
if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, NULL, buf_size, direct_buf)) != FAIL)
goto error;
} H5E_END_TRY;
- /* Check when OFFSET is out of dataset range */
+ H5E_BEGIN_TRY {
+ if((status = H5DOread_chunk(dataset, dxpl, NULL, &filter_mask, direct_buf)) != FAIL)
+ goto error;
+ } H5E_END_TRY;
+
+ /* Check when OFFSET is out of dataset range for H5DOwrite_chunk and H5DOread_chunk */
offset[0] = NX + 1;
offset[1] = NY;
H5E_BEGIN_TRY {
@@ -1039,7 +1276,12 @@ test_invalid_parameters(hid_t file)
goto error;
} H5E_END_TRY;
- /* Check when OFFSET is not on chunk boundary */
+ H5E_BEGIN_TRY {
+ if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, direct_buf)) != FAIL)
+ goto error;
+ } H5E_END_TRY;
+
+ /* Check when OFFSET is not on chunk boundary for H5DOwrite_chunk and H5DOread_chunk */
offset[0] = CHUNK_NX;
offset[1] = CHUNK_NY + 1;
H5E_BEGIN_TRY {
@@ -1047,7 +1289,12 @@ test_invalid_parameters(hid_t file)
goto error;
} H5E_END_TRY;
- /* Check invalid buffer size */
+ H5E_BEGIN_TRY {
+ if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, direct_buf)) != FAIL)
+ goto error;
+ } H5E_END_TRY;
+
+ /* Check invalid buffer size for H5DOwrite_chunk only */
offset[0] = CHUNK_NX;
offset[1] = CHUNK_NY;
buf_size = 0;
@@ -1056,13 +1303,18 @@ test_invalid_parameters(hid_t file)
goto error;
} H5E_END_TRY;
- /* Check invalid data buffer */
+ /* Check invalid data buffer for H5DOwrite_chunk and H5DOread_chunk */
buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
H5E_BEGIN_TRY {
if((status = H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, buf_size, NULL)) != FAIL)
goto error;
} H5E_END_TRY;
+ H5E_BEGIN_TRY {
+ if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, NULL)) != FAIL)
+ goto error;
+ } H5E_END_TRY;
+
if(H5Dclose(dataset) < 0)
goto error;
@@ -1086,17 +1338,644 @@ error:
H5Pclose(dxpl);
} H5E_END_TRY;
+ H5_FAILED();
return 1;
-}
+} /* test_invalid_parameters() */
/*-------------------------------------------------------------------------
- * Function: Main function
+ * Function: test_direct_chunk_read_no_cache
*
- * Purpose: Test direct chunk write function H5DOwrite_chunk
+ * Purpose: Test the basic functionality of H5DOread_chunk with the
+ * chunk cache diabled.
*
- * Return: Success: 0
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Matthew Strong (GE Healthcare)
+ * 14 February 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_FILTER_DEFLATE
+static int
+test_direct_chunk_read_no_cache (hid_t file)
+{
+ hid_t dataspace = -1, dataset = -1;
+ hid_t mem_space = -1;
+ hid_t cparms = -1, dxpl = -1, dapl = -1;
+ hsize_t dims[2] = {NX, NY};
+ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY};
+ herr_t status; /* status from H5 function calls */
+ int ret; /* deflate return status */
+ int data[NX][NY];
+ int i, j, k, l, n; /* local index variables */
+
+ unsigned filter_mask = 0; /* filter mask returned from H5DOread_chunk */
+ int direct_buf[CHUNK_NX][CHUNK_NY]; /* chunk read with H5DOread and manually decompressed */
+ int check_chunk[CHUNK_NX][CHUNK_NY]; /* chunk read with H5Dread */
+ hsize_t offset[2]; /* chunk offset used for H5DOread_chunk */
+ size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
+
+ Bytef *z_src = NULL; /* source buffer */
+ uLongf z_src_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
+ Bytef *z_dst = (Bytef*)(direct_buf);
+ uLong z_dst_nbytes = (uLong)buf_size;
+ int aggression = 9; /* Compression aggression setting */
+ void *outbuf = NULL; /* Pointer to new buffer */
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+
+ TESTING("basic functionality of H5DOread_chunk (chunk cache disabled)");
+
+ /* Create the data space with unlimited dimensions. */
+ if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
+ goto error;
+ if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
+ goto error;
+
+ /* Modify dataset creation properties, i.e. enable chunking and compression */
+ if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
+ goto error;
+ if((status = H5Pset_deflate( cparms, (unsigned) aggression)) < 0)
+ goto error;
+ if((dapl = H5Pcreate(H5P_DATASET_ACCESS)) < 0)
+ goto error;
+
+ /* Disable chunk cache by setting number of slots to 0 */
+ if((status = H5Pset_chunk_cache(dapl, 0, H5D_CHUNK_CACHE_NBYTES_DEFAULT, H5D_CHUNK_CACHE_W0_DEFAULT)) < 0)
+ goto error;
+
+ /* Create a new dataset within the file using cparms creation properties. */
+ if((dataset = H5Dcreate2(file, DATASETNAME8, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
+ cparms, dapl)) < 0)
+ goto error;
+
+ /* Initialize the dataset */
+ for(i = n = 0; i < NX; i++)
+ for(j = 0; j < NY; j++)
+ data[i][j] = n++;
+
+ if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto error;
+
+ /* Write the data for the dataset.
+ * Data will skip chunk cache and go directly to disk. */
+ if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ dxpl, data)) < 0)
+ goto error;
+
+ /* Allocate output (compressed) buffer */
+ outbuf = HDmalloc(z_src_nbytes);
+ z_src = (Bytef *)outbuf;
+
+ /* For each chunk in the dataset, compare the result of H5Dread and H5DOread_chunk. */
+ for(i=0; i<NX/CHUNK_NX; i++) {
+ for(j=0; j<NY/CHUNK_NY; j++) {
+ /* Select hyperslab for one chunk in the file */
+ start[0] = (hsize_t)i * CHUNK_NX; start[1] = (hsize_t)j * CHUNK_NY;
+ stride[0] = 1; stride[1] = 1;
+ count[0] = 1; count[1] = 1;
+ block[0] = CHUNK_NX; block[1] = CHUNK_NY;
+
+ /* Hyperslab selection equals single chunk */
+ if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
+ goto error;
+
+ /* Read the chunk back */
+ if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
+ goto error;
+
+ offset[0] = (hsize_t)i * CHUNK_NX; offset[1] = (hsize_t)j * CHUNK_NY;
+ /* Read the compressed chunk back using the direct read function. */
+ if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, outbuf)) < 0)
+ goto error;
+
+ /* Check filter mask return value */
+ if(filter_mask != 0)
+ goto error;
+
+ /* Perform decompression from the source to the destination buffer */
+ ret = uncompress(z_dst, &z_dst_nbytes, z_src, z_src_nbytes);
+
+ /* Check for various zlib errors */
+ if(Z_BUF_ERROR == ret) {
+ HDfprintf(stderr, "overflow\n");
+ goto error;
+ } else if(Z_MEM_ERROR == ret) {
+ HDfprintf(stderr, "deflate memory error\n");
+ goto error;
+ } else if(Z_DATA_ERROR == ret) {
+ HDfprintf(stderr, "corrupted data\n");
+ goto error;
+ } else if(Z_OK != ret) {
+ HDfprintf(stderr, "other deflate error\n");
+ goto error;
+ }
+
+ /* Check that the decompressed values match those read from H5Dread */
+ for(k = 0; k < CHUNK_NX; k++) {
+ for(l = 0; l < CHUNK_NY; l++) {
+ if(direct_buf[k][l] != check_chunk[k][l]) {
+ HDprintf("\n 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", k, l);
+ HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[k][l], check_chunk[k][l]);
+ goto error;
+ }
+ }
+ }
+ }
+ }
+
+ /* Close/release resources. */
+ H5Dclose(dataset);
+ H5Sclose(mem_space);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Pclose(dxpl);
+ H5Pclose(dapl);
+
+ if(outbuf)
+ HDfree(outbuf);
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Sclose(mem_space);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Pclose(dxpl);
+ H5Pclose(dapl);
+ } H5E_END_TRY;
+
+ if(outbuf)
+ HDfree(outbuf);
+
+ H5_FAILED();
+ return 1;
+} /* test_direct_chunk_read_no_cache() */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+#ifdef H5_HAVE_FILTER_DEFLATE
+static int
+test_direct_chunk_read_cache (hid_t file, hbool_t flush)
+{
+ hid_t dataspace = -1, dataset = -1;
+ hid_t mem_space = -1;
+ hid_t cparms = -1, dxpl = -1;
+ hsize_t dims[2] = {NX, NY};
+ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY};
+ herr_t status; /* status from H5 function calls */
+ int ret; /* deflate return status */
+ int data[NX][NY];
+ int i, j, k, l, n; /* local index variables */
+
+ unsigned filter_mask = 0; /* filter mask returned from H5DOread_chunk */
+ int direct_buf[CHUNK_NX][CHUNK_NY]; /* chunk read with H5DOread and manually decompressed */
+ int check_chunk[CHUNK_NX][CHUNK_NY]; /* chunk read with H5Dread */
+ hsize_t offset[2]; /* chunk offset used for H5DOread_chunk */
+ size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
+
+ Bytef *z_src = NULL; /* source buffer */
+ uLongf z_src_nbytes = (uLongf)DEFLATE_SIZE_ADJUST(buf_size);
+ Bytef *z_dst = (Bytef*)(direct_buf);
+ uLong z_dst_nbytes = (uLong)buf_size;
+ int aggression = 9; /* Compression aggression setting */
+ void *outbuf = NULL; /* Pointer to new buffer */
+ hsize_t read_buf_size = 0;
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+
+ if(flush) {
+ TESTING("basic functionality of H5DOread_chunk (flush chunk cache)");
+ } else {
+ TESTING("basic functionality of H5DOread_chunk (does not flush chunk cache)");
+ }
+
+ /* Create the data space with unlimited dimensions. */
+ if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
+ goto error;
+ if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
+ goto error;
+
+ /* Modify dataset creation properties, i.e. enable chunking and compression */
+ if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
+ goto error;
+ if((status = H5Pset_deflate( cparms, (unsigned) aggression)) < 0)
+ goto error;
+
+ /* Create a new dataset within the file using cparms creation properties. */
+ if((dataset = H5Dcreate2(file, flush?DATASETNAME9:DATASETNAME10, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
+ cparms, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Initialize the dataset */
+ for(i = n = 0; i < NX; i++)
+ for(j = 0; j < NY; j++)
+ data[i][j] = n++;
+
+ if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto error;
+
+ /* Write the data for the dataset.
+ * It should stay in the chunk cache. */
+ if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ dxpl, data)) < 0)
+ goto error;
+
+ if(flush) {
+ /* Flush the chunk cache to disk. Cache entry is not evicted. */
+ if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
+ goto error;
+ }
+
+ /* Allocate output (compressed) buffer */
+ outbuf = HDmalloc(z_src_nbytes);
+ z_src = (Bytef *)outbuf;
+
+ /* For each chunk in the dataset, compare the result of H5Dread and H5DOread_chunk. */
+ for(i=0; i<NX/CHUNK_NX; i++) {
+ for(j=0; j<NY/CHUNK_NY; j++) {
+ /* Select hyperslab for one chunk in the file */
+ start[0] = (hsize_t)i * CHUNK_NX; start[1] = (hsize_t)j * CHUNK_NY;
+ stride[0] = 1; stride[1] = 1;
+ count[0] = 1; count[1] = 1;
+ block[0] = CHUNK_NX; block[1] = CHUNK_NY;
+
+ /* Hyperslab selection equals single chunk */
+ if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
+ goto error;
+
+ /* Read the chunk back */
+ if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
+ goto error;
+
+ offset[0] = (hsize_t)i * CHUNK_NX; offset[1] = (hsize_t)j * CHUNK_NY;
+
+ /* Query chunk storage size */
+ if((status = H5Dget_chunk_storage_size(dataset, offset, &read_buf_size)) < 0)
+ goto error;
+ if(read_buf_size == 0)
+ goto error;
+
+ /* Read the compressed chunk back using the direct read function. */
+ if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, outbuf)) < 0)
+ goto error;
+
+ /* Check filter mask return value */
+ if(filter_mask != 0)
+ goto error;
+
+ /* Perform decompression from the source to the destination buffer */
+ ret = uncompress(z_dst, &z_dst_nbytes, z_src, z_src_nbytes);
+
+ /* Check for various zlib errors */
+ if(Z_BUF_ERROR == ret) {
+ HDfprintf(stderr, "overflow\n");
+ goto error;
+ } else if(Z_MEM_ERROR == ret) {
+ HDfprintf(stderr, "deflate memory error\n");
+ goto error;
+ } else if(Z_DATA_ERROR == ret) {
+ HDfprintf(stderr, "corrupted data\n");
+ goto error;
+ } else if(Z_OK != ret) {
+ HDfprintf(stderr, "other deflate error\n");
+ goto error;
+ }
+
+ /* Check that the decompressed values match those read from H5Dread */
+ for(k = 0; k < CHUNK_NX; k++) {
+ for(l = 0; l < CHUNK_NY; l++) {
+ if(direct_buf[k][l] != check_chunk[k][l]) {
+ HDprintf("\n 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", k, l);
+ HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[k][l], check_chunk[k][l]);
+ goto error;
+ }
+ }
+ }
+ }
+ }
+
+ /* Close/release resources. */
+ H5Dclose(dataset);
+ H5Sclose(mem_space);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Pclose(dxpl);
+
+ if(outbuf)
+ HDfree(outbuf);
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Sclose(mem_space);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Pclose(dxpl);
+ } H5E_END_TRY;
+
+ if(outbuf)
+ HDfree(outbuf);
+
+ H5_FAILED();
+ return 1;
+} /* test_direct_chunk_read_cache() */
+#endif /* H5_HAVE_FILTER_DEFLATE */
+
+/*-------------------------------------------------------------------------
+ * Function: test_read_unfiltered_dset
+ *
+ * Purpose: Test the basic functionality of H5DOread_chunk on a dataset
+ * without no filters applied.
*
- * Failure: 1
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Matthew Strong (GE Healthcare)
+ * 30 November 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_read_unfiltered_dset(hid_t file)
+{
+ hid_t dataspace = -1, dataset = -1;
+ hid_t mem_space = -1;
+ hid_t cparms = -1, dxpl = -1;
+ hsize_t dims[2] = {NX, NY};
+ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t chunk_dims[2] ={CHUNK_NX, CHUNK_NY};
+ herr_t status;
+ int data[NX][NY];
+ int i, j, k, l, n;
+
+ unsigned filter_mask = 0;
+ int direct_buf[CHUNK_NX][CHUNK_NY];
+ int check_chunk[CHUNK_NX][CHUNK_NY]; /* chunk read with H5Dread */
+ hsize_t offset[2] = {0, 0};
+ size_t buf_size = CHUNK_NX*CHUNK_NY*sizeof(int);
+ hsize_t read_buf_size = 0;
+
+ hsize_t start[2]; /* Start of hyperslab */
+ hsize_t stride[2]; /* Stride of hyperslab */
+ hsize_t count[2]; /* Block count */
+ hsize_t block[2]; /* Block sizes */
+
+ TESTING("basic functionality of H5DOread_chunk on unfiltered datasets");
+
+ /* Create the data space with unlimited dimensions. */
+ if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
+ goto error;
+ if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
+ goto error;
+
+ /* Modify dataset creation properties, i.e. enable chunking, no compression */
+ if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
+ goto error;
+
+ /* Create a new dataset within the file using cparms creation properties. */
+ if((dataset = H5Dcreate2(file, DATASETNAME12, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
+ cparms, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Initialize the dataset */
+ for(i = n = 0; i < NX; i++)
+ for(j = 0; j < NY; j++)
+ data[i][j] = n++;
+
+ if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto error;
+
+ /* Write the data for the dataset.
+ * It should stay in the chunk cache and will be evicted/flushed by
+ * the H5DOread_chunk function call. */
+ if((status = H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ dxpl, data)) < 0)
+ goto error;
+
+ if(H5Fflush(dataset, H5F_SCOPE_LOCAL) < 0)
+ goto error;
+
+ /* For each chunk in the dataset, compare the result of H5Dread and H5DOread_chunk. */
+ for(i=0; i<NX/CHUNK_NX; i++) {
+ for(j=0; j<NY/CHUNK_NY; j++) {
+ /* Select hyperslab for one chunk in the file */
+ start[0] = (hsize_t)i * CHUNK_NX; start[1] = (hsize_t)j * CHUNK_NY;
+ stride[0] = 1; stride[1] = 1;
+ count[0] = 1; count[1] = 1;
+ block[0] = CHUNK_NX; block[1] = CHUNK_NY;
+
+ /* Hyperslab selection equals single chunk */
+ if((status = H5Sselect_hyperslab(dataspace, H5S_SELECT_SET, start, stride, count, block)) < 0)
+ goto error;
+
+ /* Read the chunk back */
+ if((status = H5Dread(dataset, H5T_NATIVE_INT, mem_space, dataspace, H5P_DEFAULT, check_chunk)) < 0)
+ goto error;
+
+ /* Query chunk storage size */
+ if((status = H5Dget_chunk_storage_size(dataset, offset, &read_buf_size)) < 0)
+ goto error;
+
+ if(read_buf_size != buf_size )
+ goto error;
+
+ offset[0] = (hsize_t)i * CHUNK_NX; offset[1] = (hsize_t)j * CHUNK_NY;
+ /* Read the raw chunk back */
+ HDmemset(&direct_buf, 0, sizeof(direct_buf));
+ filter_mask = UINT_MAX;
+ if((status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, direct_buf)) < 0)
+ goto error;
+
+ /* Check filter mask return value */
+ if(filter_mask != 0)
+ goto error;
+
+ /* Check that the decompressed values match those read from H5Dread */
+ for(k = 0; k < CHUNK_NX; k++) {
+ for(l = 0; l < CHUNK_NY; l++) {
+ if(direct_buf[k][l] != check_chunk[k][l]) {
+ HDprintf("\n 1. Read different values than written.");
+ HDprintf(" At index %d,%d\n", k, l);
+ HDprintf(" direct_buf=%d, check_chunk=%d\n", direct_buf[k][l], check_chunk[k][l]);
+ goto error;
+ }
+ }
+ }
+ }
+ }
+
+ /* Close/release resources. */
+ H5Dclose(dataset);
+ H5Sclose(mem_space);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Pclose(dxpl);
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Sclose(mem_space);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Pclose(dxpl);
+ } H5E_END_TRY;
+
+ H5_FAILED();
+ return 1;
+} /* test_read_unfiltered_dset() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_read_unallocated_chunk
+ *
+ * Purpose: Tests the H5DOread_chunk and H5Dget_chunk_storage_size with valid
+ * offets to chunks that have not been written to the dataset and are
+ * not allocated in the chunk storage on disk.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Matthew Strong (GE Healthcare)
+ * 30 November 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+test_read_unallocated_chunk (hid_t file)
+{
+ hid_t dataspace = -1, dataset = -1;
+ hid_t mem_space = -1;
+ hid_t cparms = -1, dxpl = -1;
+ hsize_t dims[2] = {NX, NY};
+ hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED};
+ hsize_t chunk_dims[2] = {CHUNK_NX, CHUNK_NY};
+ hsize_t chunk_nbytes = CHUNK_NX*CHUNK_NY*sizeof(int);
+ hsize_t direct_chunk_nbytes = 0; /* size (bytes) of the on-disk chunk */
+ herr_t status; /* status from H5 function calls */
+ hsize_t i, j; /* local index variables */
+
+ unsigned filter_mask = 0; /* filter mask returned from H5DOread_chunk */
+ int direct_buf[CHUNK_NX][CHUNK_NY]; /* chunk read with H5DOread and manually decompressed */
+ hsize_t offset[2]; /* chunk offset used for H5DOread_chunk */
+
+ TESTING("H5DOread_chunk with unallocated chunks");
+
+ /* Create the data space with unlimited dimensions. */
+ if((dataspace = H5Screate_simple(RANK, dims, maxdims)) < 0)
+ goto error;
+ if((mem_space = H5Screate_simple(RANK, chunk_dims, NULL)) < 0)
+ goto error;
+
+ /* Modify dataset creation properties, i.e. enable chunking, no compression */
+ if((cparms = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ if((status = H5Pset_chunk( cparms, RANK, chunk_dims)) < 0)
+ goto error;
+
+ /* Create a new dataset within the file using cparms creation properties. */
+ if((dataset = H5Dcreate2(file, DATASETNAME11, H5T_NATIVE_INT, dataspace, H5P_DEFAULT,
+ cparms, H5P_DEFAULT)) < 0)
+ goto error;
+
+ if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0)
+ goto error;
+
+ /* Write a single chunk to intialize the chunk storage */
+ HDmemset(&chunk_dims, 0, sizeof(chunk_dims));
+ offset[0] = 0; offset[1] = 0;
+
+ if(H5DOwrite_chunk(dataset, dxpl, filter_mask, offset, chunk_nbytes, &chunk_dims) < 0)
+ goto error;
+
+ /* Attempt to read each chunk in the dataset. Chunks are not allocated,
+ * therefore we expect the result of H5DOread_chunk to fail. Chunk idx starts
+ * at 1, since one chunk was written to init the chunk storage. */
+ for(i=1; i<NX/CHUNK_NX; i++) {
+ for(j=0; j<NY/CHUNK_NY; j++) {
+
+ offset[0] = i * CHUNK_NX;
+ offset[1] = j * CHUNK_NY;
+
+ /* Read a non-existant chunk using the direct read function. */
+ H5E_BEGIN_TRY {
+ status = H5DOread_chunk(dataset, dxpl, offset, &filter_mask, &direct_buf);
+ } H5E_END_TRY;
+
+ /* Check that the chunk read call does not succeed. */
+ if(status != -1)
+ goto error;
+
+ /* Query the size of the non-existant chunk */
+ direct_chunk_nbytes = ULONG_MAX;
+ H5E_BEGIN_TRY {
+ status = H5Dget_chunk_storage_size(dataset, offset, &direct_chunk_nbytes);
+ } H5E_END_TRY;
+
+ /* Check that the chunk storage size call does not succeed. */
+ if(status != -1 )
+ goto error;
+ if(direct_chunk_nbytes != 0 )
+ goto error;
+
+ }
+ }
+
+ /* Close/release resources. */
+ H5Dclose(dataset);
+ H5Sclose(mem_space);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Pclose(dxpl);
+
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Sclose(mem_space);
+ H5Sclose(dataspace);
+ H5Pclose(cparms);
+ H5Pclose(dxpl);
+ } H5E_END_TRY;
+
+ H5_FAILED();
+ return 1;
+} /* test_read_unallocated_chunk() */
+
+/*-------------------------------------------------------------------------
+ * Function: Main function
+ *
+ * Purpose: Test direct chunk write function H5DOwrite_chunk and
+ * chunk direct read function H5DOread_chunk
+ *
+ * Return: Success: 0
+ * Failure: 1
*
* Programmer: Raymond Lu
* 30 November 2012
@@ -1114,15 +1993,25 @@ int main( void )
if((file_id = H5Fcreate(FILE_NAME, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
goto error;
- /* Test direct chunk write */
+ /* Test direct chunk write and direct chunk read */
#ifdef H5_HAVE_FILTER_DEFLATE
nerrors += test_direct_chunk_write(file_id);
#endif /* H5_HAVE_FILTER_DEFLATE */
+ nerrors += test_direct_chunk_overwrite_data(file_id);
nerrors += test_skip_compress_write1(file_id);
nerrors += test_skip_compress_write2(file_id);
nerrors += test_data_conv(file_id);
nerrors += test_invalid_parameters(file_id);
+ /* Test direct chunk read */
+#ifdef H5_HAVE_FILTER_DEFLATE
+ nerrors += test_direct_chunk_read_no_cache(file_id);
+ nerrors += test_direct_chunk_read_cache(file_id, TRUE);
+ nerrors += test_direct_chunk_read_cache(file_id, FALSE);
+#endif /* H5_HAVE_FILTER_DEFLATE */
+ nerrors += test_read_unfiltered_dset(file_id);
+ nerrors += test_read_unallocated_chunk(file_id);
+
if(H5Fclose(file_id) < 0)
goto error;
@@ -1130,8 +2019,10 @@ int main( void )
if (nerrors)
goto error;
- return 0;
+ HDputs("All direct chunk read/write tests passed.");
+ return EXIT_SUCCESS;
error:
- return 1;
+ HDputs("*** TESTS FAILED ***");
+ return EXIT_FAILURE;
}
diff --git a/hl/test/test_file_image.c b/hl/test/test_file_image.c
index 6ff5bf4..a98642c 100644
--- a/hl/test/test_file_image.c
+++ b/hl/test/test_file_image.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5hltest.h"
diff --git a/hl/test/test_image.c b/hl/test/test_image.c
index 8bc6f95..c30b572 100644
--- a/hl/test/test_image.c
+++ b/hl/test/test_image.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <limits.h>
diff --git a/hl/test/test_ld.c b/hl/test/test_ld.c
index df721e6..12a226a 100644
--- a/hl/test/test_ld.c
+++ b/hl/test/test_ld.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/hl/test/test_lite.c b/hl/test/test_lite.c
index 6cadd95..763fdb5 100644
--- a/hl/test/test_lite.c
+++ b/hl/test/test_lite.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/hl/test/test_packet.c b/hl/test/test_packet.c
index f577947..7003b26 100644
--- a/hl/test/test_packet.c
+++ b/hl/test/test_packet.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/hl/test/test_packet_vlen.c b/hl/test/test_packet_vlen.c
index b152a2c..35bd43a 100644
--- a/hl/test/test_packet_vlen.c
+++ b/hl/test/test_packet_vlen.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5hltest.h"
diff --git a/hl/test/test_table.c b/hl/test/test_table.c
index c9c3c19..0215697 100644
--- a/hl/test/test_table.c
+++ b/hl/test/test_table.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/hl/tools/CMakeLists.txt b/hl/tools/CMakeLists.txt
index 8384b59..9536517 100644
--- a/hl/tools/CMakeLists.txt
+++ b/hl/tools/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_TOOLS C CXX)
add_subdirectory (${HDF5_HL_TOOLS_SOURCE_DIR}/gif2h5 ${HDF5_HL_TOOLS_BINARY_DIR}/gif2h5)
diff --git a/hl/tools/COPYING b/hl/tools/COPYING
index 6903daf..6497ace 100644
--- a/hl/tools/COPYING
+++ b/hl/tools/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/hl/tools/Makefile.am b/hl/tools/Makefile.am
index 090c4a6..5d814df 100644
--- a/hl/tools/Makefile.am
+++ b/hl/tools/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/tools/gif2h5/CMakeLists.txt b/hl/tools/gif2h5/CMakeLists.txt
index 9e6e828..9d7a406 100644
--- a/hl/tools/gif2h5/CMakeLists.txt
+++ b/hl/tools/gif2h5/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_TOOLS_GIF2H5)
#-----------------------------------------------------------------------------
@@ -50,19 +50,21 @@ if (BUILD_TESTING)
set_target_properties (hl_h52gifgentest PROPERTIES FOLDER generator/tools/hl)
# add_test (NAME hl_h52gifgentest COMMAND $<TARGET_FILE:hl_h52gifgentest>)
- endif (HDF5_BUILD_GENERATORS)
+ endif ()
include (CMakeTests.cmake)
-endif (BUILD_TESTING)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
#-----------------------------------------------------------------------------
-install (
- TARGETS
- gif2h5
- h52gif
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT hltoolsapplications
-)
+if (HDF5_EXPORTED_TARGETS)
+ install (
+ TARGETS
+ gif2h5
+ h52gif
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT hltoolsapplications
+ )
+endif ()
diff --git a/hl/tools/gif2h5/CMakeTests.cmake b/hl/tools/gif2h5/CMakeTests.cmake
index 8a52f50..61c004e 100644
--- a/hl/tools/gif2h5/CMakeTests.cmake
+++ b/hl/tools/gif2h5/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
diff --git a/hl/tools/gif2h5/Makefile.am b/hl/tools/gif2h5/Makefile.am
index c82485e..d30d66a 100644
--- a/hl/tools/gif2h5/Makefile.am
+++ b/hl/tools/gif2h5/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/hl/tools/gif2h5/decompress.c b/hl/tools/gif2h5/decompress.c
index ef2d78a..453db12 100644
--- a/hl/tools/gif2h5/decompress.c
+++ b/hl/tools/gif2h5/decompress.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
#include <stdlib.h>
diff --git a/hl/tools/gif2h5/gif.h b/hl/tools/gif2h5/gif.h
index f34a111..ed1cc81 100644
--- a/hl/tools/gif2h5/gif.h
+++ b/hl/tools/gif2h5/gif.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/hl/tools/gif2h5/gif2hdf.c b/hl/tools/gif2h5/gif2hdf.c
index 8a9e971..2e06d34 100644
--- a/hl/tools/gif2h5/gif2hdf.c
+++ b/hl/tools/gif2h5/gif2hdf.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string.h>
#include <stdio.h>
diff --git a/hl/tools/gif2h5/gif2mem.c b/hl/tools/gif2h5/gif2mem.c
index 456a32d..ec029ea 100644
--- a/hl/tools/gif2h5/gif2mem.c
+++ b/hl/tools/gif2h5/gif2mem.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -220,7 +218,7 @@ Gif2Mem(BYTE *MemGif, GIFTOMEM *GifMemoryStruct)
PlainTextCount++;
if (PlainTextCount > PlainTextArray)
- PlainTextArray = (BYTE)((PlainTextArray << 1) + 1);
+ PlainTextArray = (BYTE)((PlainTextArray << 1) + 1);
if (!(gifPlainText = (GIFPLAINTEXT **)realloc(gifPlainText , sizeof(GIFPLAINTEXT *) * PlainTextArray))) {
printf("Out of memory!");
@@ -242,7 +240,7 @@ Gif2Mem(BYTE *MemGif, GIFTOMEM *GifMemoryStruct)
CommentCount++;
if (CommentCount > CommentArray)
- CommentArray = (BYTE)((CommentArray << 1) + 1);
+ CommentArray = (BYTE)((CommentArray << 1) + 1);
if (!(gifComment = (GIFCOMMENT **)realloc(gifComment , sizeof(GIFCOMMENT *) * CommentArray))) {
printf("Out of memory!");
@@ -307,7 +305,7 @@ Gif2Mem(BYTE *MemGif, GIFTOMEM *GifMemoryStruct)
ApplicationCount++;
if (ApplicationCount > ApplicationArray)
- ApplicationArray = (BYTE)((ApplicationArray << 1) + 1);
+ ApplicationArray = (BYTE)((ApplicationArray << 1) + 1);
if (!(gifApplication = (GIFAPPLICATION **)realloc(gifApplication , sizeof(GIFAPPLICATION *) * ApplicationArray))) {
printf("Out of memory!");
@@ -327,7 +325,7 @@ Gif2Mem(BYTE *MemGif, GIFTOMEM *GifMemoryStruct)
break;
default:
- printf("Unknown Extension Label: 0x%02x\n", Label);
+ printf("Unknown Extension Label: %#02x\n", Label);
break;
}
@@ -335,7 +333,7 @@ Gif2Mem(BYTE *MemGif, GIFTOMEM *GifMemoryStruct)
default:
fprintf(stderr,
- "Unknown Block Separator Character: 0x%02x\n", Identifier);
+ "Unknown Block Separator Character: %#02x\n", Identifier);
}
}
}
diff --git a/hl/tools/gif2h5/gifread.c b/hl/tools/gif2h5/gifread.c
index 2b4c129..948e112 100644
--- a/hl/tools/gif2h5/gifread.c
+++ b/hl/tools/gif2h5/gifread.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
#include <stdlib.h>
diff --git a/hl/tools/gif2h5/h52gifgentst.c b/hl/tools/gif2h5/h52gifgentst.c
index 39e950b..655563c 100644
--- a/hl/tools/gif2h5/h52gifgentst.c
+++ b/hl/tools/gif2h5/h52gifgentst.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <assert.h>
diff --git a/hl/tools/gif2h5/h52giftest.sh.in b/hl/tools/gif2h5/h52giftest.sh.in
index 90931f2..5643e30 100644
--- a/hl/tools/gif2h5/h52giftest.sh.in
+++ b/hl/tools/gif2h5/h52giftest.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# HDF Utilities Test script
diff --git a/hl/tools/gif2h5/hdf2gif.c b/hl/tools/gif2h5/hdf2gif.c
index 8ed05df..ae63e92 100644
--- a/hl/tools/gif2h5/hdf2gif.c
+++ b/hl/tools/gif2h5/hdf2gif.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
diff --git a/hl/tools/gif2h5/hdfgifwr.c b/hl/tools/gif2h5/hdfgifwr.c
index 4c52f4c..6f5ab58 100644
--- a/hl/tools/gif2h5/hdfgifwr.c
+++ b/hl/tools/gif2h5/hdfgifwr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/hl/tools/gif2h5/writehdf.c b/hl/tools/gif2h5/writehdf.c
index b4cca67..b656c36 100644
--- a/hl/tools/gif2h5/writehdf.c
+++ b/hl/tools/gif2h5/writehdf.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <string.h>
#include <stdlib.h>
diff --git a/hl/tools/h5watch/CMakeLists.txt b/hl/tools/h5watch/CMakeLists.txt
index e3a7e9d..fde40e3 100644
--- a/hl/tools/h5watch/CMakeLists.txt
+++ b/hl/tools/h5watch/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_HL_TOOLS_H5WATCH)
#-----------------------------------------------------------------------------
@@ -9,9 +9,9 @@ set (H5WATCH_SOURCES
)
#-- Add h5watch program
-INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
-INCLUDE_DIRECTORIES (${HDF5_HL_TOOLS_DIR}/src)
-INCLUDE_DIRECTORIES (${HDF5_HL_TOOLS_H5WATCH_SOURCE_DIR})
+include_directories (${HDF5_TOOLS_DIR}/lib)
+include_directories (${HDF5_HL_TOOLS_DIR}/src)
+include_directories (${HDF5_HL_TOOLS_H5WATCH_SOURCE_DIR})
add_executable (h5watch ${H5WATCH_SOURCES})
TARGET_NAMING (h5watch STATIC)
@@ -20,6 +20,8 @@ target_link_libraries (h5watch ${HDF5_HL_LIB_TARGET} ${HDF5_LIB_TARGET} ${HDF5_T
set_target_properties (h5watch PROPERTIES FOLDER tools/hl)
if (BUILD_TESTING)
+ include_directories (${HDF5_TEST_SRC_DIR})
+ include_directories(${HDF5_HL_SRC_DIR}/test)
#-- Add swmr_check_compat_vfd program
set (hl_swmr_check_compat_vfd_SOURCES
${HDF5_HL_TOOLS_H5WATCH_SOURCE_DIR}/swmr_check_compat_vfd.c
@@ -37,7 +39,7 @@ if (BUILD_TESTING)
add_executable (extend_dset ${extend_dset_SOURCES})
TARGET_NAMING (extend_dset STATIC)
TARGET_C_PROPERTIES (extend_dset STATIC " " " ")
- target_link_libraries (extend_dset ${HDF5_HL_LIB_TARGET} ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET})
+ target_link_libraries (extend_dset ${HDF5_HL_LIB_TARGET} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET})
set_target_properties (extend_dset PROPERTIES FOLDER tools/hl)
add_executable (h5watchgentest ${HDF5_HL_TOOLS_H5WATCH_SOURCE_DIR}/h5watchgentest.c)
@@ -47,13 +49,17 @@ if (BUILD_TESTING)
set_target_properties (h5watchgentest PROPERTIES FOLDER generator/tools/hl)
include (CMakeTests.cmake)
-endif (BUILD_TESTING)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
#-----------------------------------------------------------------------------
-install (
- TARGETS
- h5watch
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT hltoolsapplications
-)
+if (HDF5_EXPORTED_TARGETS)
+ install (
+ TARGETS
+ h5watch
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT hltoolsapplications
+ )
+endif ()
diff --git a/hl/tools/h5watch/CMakeTests.cmake b/hl/tools/h5watch/CMakeTests.cmake
index 3242a72..35e7829 100644
--- a/hl/tools/h5watch/CMakeTests.cmake
+++ b/hl/tools/h5watch/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -56,7 +67,7 @@ foreach (h5watch_file ${H5WATCH_TEST_FILES})
COMMAND ${CMAKE_COMMAND}
ARGS -E copy_if_different ${HDF5_HL_TOOLS_DIR}/testfiles/${h5watch_file} ${dest}
)
-endforeach (h5watch_file ${H5WATCH_TEST_FILES})
+endforeach ()
##############################################################################
##############################################################################
@@ -64,21 +75,9 @@ endforeach (h5watch_file ${H5WATCH_TEST_FILES})
##############################################################################
##############################################################################
- MACRO (ADD_H5_TEST resultfile resultcode)
+ macro (ADD_H5_TEST resultfile resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
- NAME H5WATCH_ARGS-${resultfile}-clear-objects
- COMMAND ${CMAKE_COMMAND}
- -E remove
- ${resultfile}.out
- ${resultfile}.out.err
- )
- set_tests_properties (H5WATCH_ARGS-${resultfile}-clear-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
- if (NOT "${last_test}" STREQUAL "")
- set_tests_properties (H5WATCH_ARGS-${resultfile}-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- set (last_test "H5WATCH_ARGS-${resultfile}-clearall-objects")
- add_test (
NAME H5WATCH_ARGS-h5watch-${resultfile}
COMMAND "${CMAKE_COMMAND}"
-D "TEST_PROGRAM=$<TARGET_FILE:h5watch>"
@@ -89,20 +88,18 @@ endforeach (h5watch_file ${H5WATCH_TEST_FILES})
-D "TEST_REFERENCE=${resultfile}.ddl"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- set_tests_properties (H5WATCH_ARGS-h5watch-${resultfile} PROPERTIES DEPENDS H5WATCH_ARGS-${resultfile}-clear-objects)
- set (last_test "H5WATCH_ARGS-${resultfile}")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST resultfile resultcode resultoption)
+ set_tests_properties (H5WATCH_ARGS-h5watch-${resultfile} PROPERTIES DEPENDS ${last_test})
+ set (last_test "H5WATCH_ARGS-h5watch-${resultfile}")
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_WATCH resultfile resultcode)
+ macro (ADD_H5_WATCH resultfile resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5WATCH-${resultfile}-clear-objects
COMMAND ${CMAKE_COMMAND}
-E remove
${resultfile}.h5
- ${resultfile}.out
- ${resultfile}.out.err
)
set_tests_properties (H5WATCH-${resultfile}-clear-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
add_test (
@@ -117,8 +114,8 @@ endforeach (h5watch_file ${H5WATCH_TEST_FILES})
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5WATCH-${resultfile} PROPERTIES DEPENDS H5WATCH-${resultfile}-clear-objects)
- endif (NOTHDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_WATCH resultfile resultcode)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -130,7 +127,7 @@ endforeach (h5watch_file ${H5WATCH_TEST_FILES})
# supports SWMR.
set (SWMR_INCOMPAT ${hl_swmr_check_compat_vfd})
-IF (NOT SWMR_INCOMPAT)
+if (NOT SWMR_INCOMPAT)
# Remove any output file left over from previous test run
add_test (
NAME H5WATCH-clearall-objects
@@ -140,7 +137,7 @@ IF (NOT SWMR_INCOMPAT)
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5WATCH-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5WATCH-clearall-objects")
#################################################################################################
@@ -169,23 +166,23 @@ IF (NOT SWMR_INCOMPAT)
set (last_test "H5WATCH-h5watchgentest")
# Test on --help options
- ADD_H5_TEST(w-help1 0 --help)
+ ADD_H5_TEST (w-help1 0 --help)
#
# Tests on expected failures
- ADD_H5_TEST(w-err-dset1 1 WATCH.h5)
- ADD_H5_TEST(w-err-dset2 1 WATCH.h5/group/DSET_CMPD)
- ADD_H5_TEST(w-err-dset-none 1 WATCH.h5/DSET_NONE)
- ADD_H5_TEST(w-err-dset-nomax 1 WATCH.h5/DSET_NOMAX)
- ADD_H5_TEST(w-err-file 1 ../WATCH.h5/DSET_CMPD)
- ADD_H5_TEST(w-err-width 1 --width=-8 WATCH.h5/DSET_ONE)
- ADD_H5_TEST(w-err-poll 1 --polling=-8 WATCH.h5/DSET_ONE)
- ADD_H5_TEST(w-err-poll0 1 --polling=0 WATCH.h5/DSET_ONE)
+ ADD_H5_TEST (w-err-dset1 1 WATCH.h5)
+ ADD_H5_TEST (w-err-dset2 1 WATCH.h5/group/DSET_CMPD)
+ ADD_H5_TEST (w-err-dset-none 1 WATCH.h5/DSET_NONE)
+ ADD_H5_TEST (w-err-dset-nomax 1 WATCH.h5/DSET_NOMAX)
+ ADD_H5_TEST (w-err-file 1 ../WATCH.h5/DSET_CMPD)
+ ADD_H5_TEST (w-err-width 1 --width=-8 WATCH.h5/DSET_ONE)
+ ADD_H5_TEST (w-err-poll 1 --polling=-8 WATCH.h5/DSET_ONE)
+ ADD_H5_TEST (w-err-poll0 1 --polling=0 WATCH.h5/DSET_ONE)
#
# Tests on invalid field names via --fields option for a compound typed dataset: DSET_CMPD
- ADD_H5_TEST(w-err-cmpd1 1 --fields=fieldx WATCH.h5/DSET_CMPD)
- ADD_H5_TEST(w-err-cmpd2 1 --fields=field1,field2. WATCH.h5/DSET_CMPD)
- ADD_H5_TEST(w-err-cmpd3 1 --fields=field1,field2, WATCH.h5/DSET_CMPD)
- ADD_H5_TEST(w-err-cmpd4 1 --fields=field1,field2.b.k WATCH.h5/DSET_CMPD)
- ADD_H5_TEST(w-err-cmpd5 1 --fields=field1 --fields=field2.b.k WATCH.h5/DSET_CMPD)
+ ADD_H5_TEST (w-err-cmpd1 1 --fields=fieldx WATCH.h5/DSET_CMPD)
+ ADD_H5_TEST (w-err-cmpd2 1 --fields=field1,field2. WATCH.h5/DSET_CMPD)
+ ADD_H5_TEST (w-err-cmpd3 1 --fields=field1,field2, WATCH.h5/DSET_CMPD)
+ ADD_H5_TEST (w-err-cmpd4 1 --fields=field1,field2.b.k WATCH.h5/DSET_CMPD)
+ ADD_H5_TEST (w-err-cmpd5 1 --fields=field1 --fields=field2.b.k WATCH.h5/DSET_CMPD)
#
-ENDIF (NOT SWMR_INCOMPAT)
+endif ()
diff --git a/hl/tools/h5watch/Makefile.am b/hl/tools/h5watch/Makefile.am
index a5891ef..c60fceb 100644
--- a/hl/tools/h5watch/Makefile.am
+++ b/hl/tools/h5watch/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -21,7 +19,7 @@
include $(top_srcdir)/config/commence.am
# Include src and tools/lib directories
-AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib -I$(top_srcdir)/hl/src
+AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib -I$(top_srcdir)/hl/src -I$(top_srcdir)/hl/test
# These are our main targets, the tools
TEST_SCRIPT=testh5watch.sh
@@ -37,6 +35,14 @@ h5watch_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
# Programs all depend on the hdf5 library, the tools library, and the HL
# library.
LDADD=$(LIBH5_HL) $(LIBH5TOOLS) $(LIBHDF5)
+
+# Add extend_dset specific preprocessor flags here
+# (add the main test subdirectory to the include file path)
+extend_dset_CPPFLAGS=$(AM_CPPFLAGS) -I$(top_srcdir)/test
+# Add extend_dset specific library flags here
+# (add the main test library to the list of libraries)
+extend_dset_LDADD=$(LDADD) $(LIBH5TEST) $(LIBHDF5)
+
#
CHECK_CLEANFILES+=*.h5
DISTCLEANFILES=testh5watch.sh
diff --git a/hl/tools/h5watch/extend_dset.c b/hl/tools/h5watch/extend_dset.c
index 023e201..957db10 100644
--- a/hl/tools/h5watch/extend_dset.c
+++ b/hl/tools/h5watch/extend_dset.c
@@ -5,15 +5,13 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
-#include "H5HLprivate2.h"
+#include "h5hltest.h"
/*
* Extending datasets in WATCH.h5 generated by h5watchgentest.c
@@ -30,35 +28,12 @@
#define WRITER_MESSAGE "writer_message"
/* The message received from the test script to start extending dataset */
#define READER_MESSAGE "reader_message"
-/* Message timeout in seconds */
-#define MESSAGE_TIMEOUT 300
/* Size of data buffer */
#define TEST_BUF_SIZE 100
-/*
- * Test variations (incremental) for one-dimensional dataset:
- * Varies from 10->13->12->12->1->3
- */
-#define ONE_NTESTS 5
-int one_tests[ONE_NTESTS] = {3, -1, 0, -11, 2};
-
-/*
- * Test variations (incremental) for two-dimensional dataset:
- * Varies from {4,10}->{6,12}->{8,1}->{10,1}->
- * {3,3}->{2,2}->{1,2}->
- * {1,4}->{1,3}->{1,3}
- */
-#define TWO_NTESTS 9
-int two_tests[TWO_NTESTS][2] = { {2, 2}, {2, -11}, {2, 0},
- {-7, 2}, {-1, -1}, {-1, 0},
- {0, 2}, {0, -1}, {0, 0}
- };
-
-static herr_t extend_dset_two(const char *file, char *dname);
-static herr_t extend_dset_one(const char *file, char *dname);
-void send_message(const char *file);
-static herr_t wait_message(const char *file);
+static herr_t extend_dset_two(const char *file, char *dname, int action1, int action2);
+static herr_t extend_dset_one(const char *file, char *dname, int action);
/* Data structures for datasets with compound data type */
@@ -87,80 +62,15 @@ typedef struct set_t {
} set_t;
/*
- * To send a message by creating the file.
- */
-void
-send_message(const char *file)
-{
- FILE *id;
-
- id = HDfopen(file, "w+");
- HDfclose(id);
-} /* end send_message() */
-
-/*
- *
- * Repeatedly check for the message file.
- * It will stop when the file exists or exceeds the timeout limit.
- */
-static herr_t
-wait_message(const char *file)
-{
- FILE *id; /* File pointer */
- time_t t0, t1; /* Time info */
-
- /* Start timer */
- HDtime(&t0);
-
- /* Wait for message from test script to start work */
- while((id = HDfopen(file, "r")) == NULL) {
- /* Get current time */
- HDtime(&t1);
- /*
- * Determine time difference--
- * if waiting too long for the message, then it is
- * unlikely the message will get sent, then fail rather
- * than loop forever.
- */
- if(HDdifftime(t1, t0) > MESSAGE_TIMEOUT)
- goto error;
- } /* end while */
-
- HDfclose(id);
- HDunlink(file);
-
- return SUCCEED;
-
-error:
- return FAIL;
-} /* end wait_message() */
-
-/*
***********************************************************************
*
- * Extending a two-dimensional dataset:
- * dims[0] dims[1]
- * ------- -------
- * case #1: increase increase
- * case #2: increase decrease
- * case #3: increase same
- * case #4: decrease increase
- * case #5: decrease decrease (no action)
- * case #6: decrease same (no action)
- * case #7: same increase
- * case #8: same decrease (no action)
- * case #9: same same (no action)
- *
- * two_tests[TWO_NTESTS][2] = { {2,2}, {2,-11}, {2,0},
- * {-7,2}, {-1,-1}, {-1,0},
- * {0,2}, {0,-1}, {0,0} }
- * varies from {4,10}->{6,12}->{8,1}->{10,1}->
- * {3,3}->{2,2}->{1,2}->
- * {1,4}->{1,3}->{1,3}
+ * Extending a two-dimensional dataset by action1 and action2.
+ * --action1 and action2 can be a positive # or negative # or 0.
+ *
***********************************************************************
*/
static herr_t
-extend_dset_two(const char *file, char *dname)
+extend_dset_two(const char *file, char *dname, int action1, int action2)
{
hid_t fid = -1; /* file id */
hid_t fapl = -1; /* file access property list id */
@@ -198,7 +108,7 @@ extend_dset_two(const char *file, char *dname)
goto error;
/* Send message to the test script to start "h5watch" */
- send_message(WRITER_MESSAGE);
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
if((sid = H5Dget_space(did)) < 0)
goto error;
@@ -215,64 +125,60 @@ extend_dset_two(const char *file, char *dname)
goto error;
/* Wait for message from the test script to start extending dataset */
- if(wait_message(READER_MESSAGE) < 0)
+ if(h5_wait_message(READER_MESSAGE) < 0)
goto error;
- /* Loop through different variations of extending the dataset */
- for(i = 0; i < TWO_NTESTS; i++) {
+ /* sleep to emulate about 2 seconds of application operation */
+ HDsleep(2);
- /* sleep to emulate about 2 seconds of application operation */
- HDsleep(2);
+ /* Get current dimension sizes */
+ if(H5LDget_dset_dims(did, cur_dims) < 0)
+ goto error;
- /* Get current dimension sizes */
- if(H5LDget_dset_dims(did, cur_dims) < 0)
- goto error;
+ /* Set up the new extended dimension sizes */
+ ext_dims[0] = cur_dims[0] + (hsize_t)action1;
+ ext_dims[1] = cur_dims[1] + (hsize_t)action2;
- /* Set up the new extended dimension sizes */
- ext_dims[0] = cur_dims[0] + (hsize_t)two_tests[i][0];
- ext_dims[1] = cur_dims[1] + (hsize_t)two_tests[i][1];
+ /* Extend the dataset */
+ if(H5Dset_extent(did, ext_dims) < 0)
+ goto error;
- /* Extend the dataset */
- if(H5Dset_extent(did, ext_dims) < 0)
+ num_elmts = 1;
+ for(j = 0; j < (unsigned)ndims; j++)
+ num_elmts *= (unsigned)ext_dims[j];
+
+ /* Compound type */
+ if(!HDstrcmp(dname, DSET_CMPD_TWO)) {
+
+ HDmemset(cbuf, 0, TEST_BUF_SIZE * sizeof(set_t));
+ for(j = 0; j < num_elmts; j++) {
+ cbuf[j].field1 = action1;
+ cbuf[j].field2.a = action1;
+ cbuf[j].field2.c = action1;
+ cbuf[j].field2.b.a = action1;
+ cbuf[j].field2.b.b = action1;
+ cbuf[j].field2.b.c = action1;
+ cbuf[j].field3 = action1;
+ cbuf[j].field4.a = action1;
+ cbuf[j].field4.b = action1;
+ } /* end for */
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, cbuf) < 0)
goto error;
- num_elmts = 1;
- for(j = 0; j < (unsigned)ndims; j++)
- num_elmts *= (unsigned)ext_dims[j];
-
- /* Compound type */
- if(!HDstrcmp(dname, DSET_CMPD_TWO)) {
-
- HDmemset(cbuf, 0, TEST_BUF_SIZE * sizeof(set_t));
- for(j = 0; j < num_elmts; j++) {
- cbuf[j].field1 = i + 1;
- cbuf[j].field2.a = i + 1;
- cbuf[j].field2.c = i + 1;
- cbuf[j].field2.b.a = i + 1;
- cbuf[j].field2.b.b = i + 1;
- cbuf[j].field2.b.c = i + 1;
- cbuf[j].field3 = i + 1;
- cbuf[j].field4.a = i + 1;
- cbuf[j].field4.b = i + 1;
- } /* end for */
-
- /* Write to the dataset */
- if(H5Dwrite(did, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, cbuf) < 0)
- goto error;
- } else { /* Integer type */
- HDmemset(ibuf, 0, TEST_BUF_SIZE * sizeof(int));
- for(j = 0; j < num_elmts; j++)
- ibuf[j] = (int)(i + 1);
-
- /* Write to the dataset */
- if(H5Dwrite(did, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf) < 0)
- goto error;
- } /* end if-else */
+ } else { /* Integer type */
+ HDmemset(ibuf, 0, TEST_BUF_SIZE * sizeof(int));
+ for(j = 0; j < num_elmts; j++)
+ ibuf[j] = action1;
- if(H5Dflush(did) < 0)
+ /* Write to the dataset */
+ if(H5Dwrite(did, dtid, H5S_ALL, H5S_ALL, H5P_DEFAULT, ibuf) < 0)
goto error;
+ } /* end if-else */
- } /* end for TWO_NTESTS */
+ if(H5Dflush(did) < 0)
+ goto error;
/* Closing */
if(H5Tclose(dtid) < 0) goto error;
@@ -280,8 +186,8 @@ extend_dset_two(const char *file, char *dname)
if(H5Pclose(fapl) < 0) goto error;
if(H5Fclose(fid) < 0) goto error;
- HDfree(ibuf);
- HDfree(cbuf);
+ if(ibuf) HDfree(ibuf);
+ if(cbuf) HDfree(cbuf);
return SUCCEED;
@@ -305,21 +211,13 @@ error:
/*
***********************************************************************
*
- * Extending a one-dimensional dataset
- * Test cases:
- * #1: increase
- * #2: decrease
- * #3: same
- * #4: decrease
- * #5: increase
- *
- * one_tests[ONE_NTESTS] = {3, -1, 0, -11, 2}
- * varies from 10->13->12->12->1->3
+ * Extending a one-dimensional dataset by action:
+ * --action can be a positive # or negative # or 0.
*
***********************************************************************
*/
static herr_t
-extend_dset_one(const char *file, char *dname)
+extend_dset_one(const char *file, char *dname, int action)
{
hid_t fid = -1; /* file id */
hid_t fapl = -1; /* file access property list id */
@@ -355,7 +253,7 @@ extend_dset_one(const char *file, char *dname)
goto error;
/* Send message to the test script to start "h5watch" */
- send_message(WRITER_MESSAGE);
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
if((did = H5Dopen2(fid, dname, H5P_DEFAULT)) < 0)
goto error;
@@ -369,83 +267,78 @@ extend_dset_one(const char *file, char *dname)
goto error;
/* Wait for message from the test script to start extending dataset */
- if(wait_message(READER_MESSAGE) < 0)
+ if(h5_wait_message(READER_MESSAGE) < 0)
goto error;
- /* Loop through different variations of extending the dataset */
- for(i = 0; i < ONE_NTESTS; i++) {
+ /* sleep to emulate about 2 seconds of application operation */
+ HDsleep(2);
- /* sleep to emulate about 2 seconds of application operation */
- HDsleep(2);
+ /* Get current dimension sizes */
+ if(H5LDget_dset_dims(did, cur_dims) < 0)
+ goto error;
- /* Get current dimension sizes */
- if(H5LDget_dset_dims(did, cur_dims) < 0)
- goto error;
+ /* Set up the new extended dimension sizes */
+ ext_dims[0] = cur_dims[0] + (hsize_t)action;
- /* Set up the new extended dimension sizes */
- ext_dims[0] = cur_dims[0] + (hsize_t)one_tests[i];
+ /* Extend the dataset */
+ if(H5Dset_extent(did, ext_dims) < 0)
+ goto error;
- /* Extend the dataset */
- if(H5Dset_extent(did, ext_dims) < 0)
+ /* Write to the new appended region of the dataset */
+ if(action > 0) {
+
+ /* Select the extended region */
+ offset[0] = cur_dims[0];
+ count[0] = (hsize_t)action;
+ if((sid = H5Dget_space(did)) < 0)
+ goto error;
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, NULL, count, NULL) < 0)
goto error;
- /* Write to the new appended region of the dataset */
- if(one_tests[i] > 0) {
+ /* Set up memory space and get dataset's datatype */
+ if((mid = H5Screate_simple(1, count, NULL)) < 0)
+ goto error;
- /* Select the extended region */
- offset[0] = cur_dims[0];
- count[0] = (hsize_t)one_tests[i];
- if((sid = H5Dget_space(did)) < 0)
- goto error;
- if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, offset, NULL, count, NULL) < 0)
+ /* Initialize data for the extended region of the dataset */
+ /* Compound type */
+ if(!HDstrcmp(dname, DSET_CMPD) || !HDstrcmp(dname, DSET_CMPD_ESC)) {
+
+ HDmemset(cbuf, 0, TEST_BUF_SIZE * sizeof(set_t));
+ for(j = 0; j < (unsigned)action; j++) {
+ cbuf[j].field1 = j + 1;
+ cbuf[j].field2.a = j + 2;
+ cbuf[j].field2.b.a = j + 2;
+ cbuf[j].field2.b.b = j + 2;
+ cbuf[j].field2.b.c = j + 2;
+ cbuf[j].field2.c = j + 2;
+
+ cbuf[j].field3 = j + 3;
+
+ cbuf[j].field4.a = j + 4;
+ cbuf[j].field4.b = j + 4;
+ } /* end for */
+
+ /* Write to the extended region of the dataset */
+ if(H5Dwrite(did, dtid, mid, sid, H5P_DEFAULT, cbuf) < 0)
goto error;
+ } else { /* Integer type */
+
+ HDmemset(ibuf, 0, TEST_BUF_SIZE * sizeof(int));
+ for(j = 0; j < (unsigned)action; j++)
+ ibuf[j] = (int)j;
- /* Set up memory space and get dataset's datatype */
- if((mid = H5Screate_simple(1, count, NULL)) < 0)
+ /* Write to the extended region of the dataset */
+ if(H5Dwrite(did, dtid, mid, sid, H5P_DEFAULT, ibuf) < 0)
goto error;
+ } /* end if-else */
- /* Initialize data for the extended region of the dataset */
- /* Compound type */
- if(!HDstrcmp(dname, DSET_CMPD) || !HDstrcmp(dname, DSET_CMPD_ESC)) {
-
- HDmemset(cbuf, 0, TEST_BUF_SIZE * sizeof(set_t));
- for(j = 0; j < (unsigned)one_tests[i]; j++) {
- cbuf[j].field1 = j + 1;
- cbuf[j].field2.a = j + 2;
- cbuf[j].field2.b.a = j + 2;
- cbuf[j].field2.b.b = j + 2;
- cbuf[j].field2.b.c = j + 2;
- cbuf[j].field2.c = j + 2;
-
- cbuf[j].field3 = j + 3;
-
- cbuf[j].field4.a = j + 4;
- cbuf[j].field4.b = j + 4;
- } /* end for */
-
- /* Write to the extended region of the dataset */
- if(H5Dwrite(did, dtid, mid, sid, H5P_DEFAULT, cbuf) < 0)
- goto error;
- } else { /* Integer type */
-
- HDmemset(ibuf, 0, TEST_BUF_SIZE * sizeof(int));
- for(j = 0; j < (unsigned)one_tests[i]; j++)
- ibuf[j] = (int)j;
-
- /* Write to the extended region of the dataset */
- if(H5Dwrite(did, dtid, mid, sid, H5P_DEFAULT, ibuf) < 0)
- goto error;
- } /* end if-else */
-
- /* Closing */
- if(H5Sclose(sid) < 0) goto error;
- if(H5Sclose(mid) < 0) goto error;
- } /* end if */
-
- if(H5Dflush(did) < 0)
- goto error;
+ /* Closing */
+ if(H5Sclose(sid) < 0) goto error;
+ if(H5Sclose(mid) < 0) goto error;
+ } /* end if */
- } /* end for ONE_NTESTS */
+ if(H5Dflush(did) < 0)
+ goto error;
/* Closing */
if(H5Tclose(dtid) < 0) goto error;
@@ -453,8 +346,8 @@ extend_dset_one(const char *file, char *dname)
if(H5Pclose(fapl) < 0) goto error;
if(H5Fclose(fid) < 0) goto error;
- HDfree(ibuf);
- HDfree(cbuf);
+ if(ibuf) HDfree(ibuf);
+ if(cbuf) HDfree(cbuf);
return SUCCEED;
@@ -476,30 +369,44 @@ error:
return FAIL;
} /* end extend_dset_one() */
-/* Usage: extend_dset xx.h5 dname */
+
+/*
+ ***********************************************************************
+ *
+ * Usage: extend_dset xx.h5 dname action1 action2
+ * --action1 and action2 can be a positive # or negative # or 0.
+ *
+ ***********************************************************************
+ */
int
main(int argc, const char *argv[])
{
char *dname = NULL;
char *fname = NULL;
+ int action1, action2;
- if(argc != 3) {
- HDfprintf(stderr, "Should have file name and dataset name to be extended...\n");
+ if(argc != 5) {
+ HDfprintf(stderr, "Should have file name, dataset name, and the extended amount...\n");
goto error;
} /* end if */
/* Get the dataset name to be extended */
fname = HDstrdup(argv[1]);
dname = HDstrdup(argv[2]);
+ action1 = HDatoi(argv[3]);
+ action2 = HDatoi(argv[4]);
if(!HDstrcmp(dname, DSET_CMPD) || !HDstrcmp(dname, DSET_CMPD_ESC)) {
- if(extend_dset_one(fname, dname) < 0)
+ if(extend_dset_one(fname, dname, action1) < 0)
goto error;
- } else if(!HDstrcmp(dname, DSET_ONE) || !HDstrcmp(dname, DSET_ALLOC_LATE) || !HDstrcmp(dname, DSET_ALLOC_EARLY)) {
- if(extend_dset_one(fname, dname) < 0)
+ } else if(!HDstrcmp(dname, DSET_ONE) ||
+ !HDstrcmp(dname, DSET_ALLOC_LATE) ||
+ !HDstrcmp(dname, DSET_ALLOC_EARLY)) {
+ if(extend_dset_one(fname, dname, action1) < 0)
goto error;
- } else if(!HDstrcmp(dname, DSET_TWO) || !HDstrcmp(dname, DSET_CMPD_TWO)) {
- if(extend_dset_two(fname, dname) < 0)
+ } else if(!HDstrcmp(dname, DSET_TWO) ||
+ !HDstrcmp(dname, DSET_CMPD_TWO)) {
+ if(extend_dset_two(fname, dname, action1, action2) < 0)
goto error;
} else {
HDfprintf(stdout, "Dataset cannot be extended...\n");
@@ -515,4 +422,3 @@ error:
HDfree(fname);
HDexit(EXIT_FAILURE);
} /* end main() */
-
diff --git a/hl/tools/h5watch/h5watch.c b/hl/tools/h5watch/h5watch.c
index 97dca7a..11514b0 100644
--- a/hl/tools/h5watch/h5watch.c
+++ b/hl/tools/h5watch/h5watch.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5tools.h"
@@ -20,29 +18,31 @@
/*
* Note: This tool used private routine
*/
-#define PROGRAMNAME "h5watch" /* Name of tool */
-#define FIELD_SEP "," /* nested field separator */
-#define DEFAULT_RETRY 50 /* number of times to try opening the file */
+#define PROGRAMNAME "h5watch" /* Name of tool */
+#define FIELD_SEP "," /* nested field separator */
+#define DEFAULT_RETRY 50 /* number of times to try opening the file */
/*
* Note:(see comments in hl/src/H5LDprivate.h)
- * This tool uses private routines H5LD_construct_vector()and H5LD_clean_vector()
+ * This tool uses private routines H5LD_construct_vector()and H5LD_clean_vector()
* This tool uses H5LD_memb_t data structure declared in H5LDprivate.h
*/
-const char *progname = "h5watch"; /* tool name */
-static char *g_list_of_fields = NULL; /* command line input for "list_of_fields" */
-static char *g_dup_fields = NULL; /* copy of "list_of_fields" */
-static H5LD_memb_t **g_listv = NULL; /* vector info for "list_of_fields" */
+const char *progname = "h5watch"; /* tool name */
+static char *g_list_of_fields = NULL; /* command line input for "list_of_fields" */
+static char *g_dup_fields = NULL; /* copy of "list_of_fields" */
+static H5LD_memb_t **g_listv = NULL; /* vector info for "list_of_fields" */
static hbool_t g_monitor_size_only = FALSE; /* monitor changes in dataset dimension sizes */
static unsigned g_polling_interval = 1; /* polling interval to check appended data */
+
static hbool_t g_label = FALSE; /* label compound values */
-static int g_display_width = 80; /* output width in characters */
+static int g_display_width = 80; /* output width in characters */
static hbool_t g_simple_output = FALSE; /* make output more machine-readable */
static unsigned g_retry = DEFAULT_RETRY; /* # of times to try opening the file if somehow file is unstable */
-static hbool_t g_display_hex = FALSE; /* display data in hexadecimal format : LATER */
+static hbool_t g_display_hex = FALSE; /* display data in hexadecimal format : LATER */
+static hbool_t g_user_interrupt = FALSE; /* Flag to indicate that user interrupted execution */
static herr_t doprint(hid_t did, hsize_t *start, hsize_t *block, int rank);
static herr_t slicendump(hid_t did, hsize_t *prev_dims, hsize_t *cur_dims,
@@ -108,9 +108,9 @@ static struct long_options l_opts[] = {
* Function: doprint()
*
* Purpose: Prepare to print the dataset's appended data.
- * Call the tools library routine h5tools_dump_dset() to do the printing.
- * (This routine is mostly copied from dump_dataset_values() in tools/h5ls/h5ls.c
- * and modified accordingly).
+ * Call the tools library routine h5tools_dump_dset() to do the printing.
+ * (This routine is mostly copied from dump_dataset_values() in tools/h5ls/h5ls.c
+ * and modified accordingly).
*
* Return: 0 on success; negative on failure
*
@@ -233,17 +233,17 @@ doprint(hid_t did, hsize_t *start, hsize_t *block, int rank)
* Function: slicendump
*
* Purpose: To dump the slice for each dimension
- * For example: prev_dims[2] = {5, 4}; cur_dims[2] = {7, 8}
- * This routine will dump data as follows:
- * {0, 3} to {0, 7} (1x4 elements)
- * {1, 3} to {0, 7} (1x4 elements)
- * {2, 3} to {0, 7} (1x4 elements)
- * {3, 3} to {0, 7} (1x4 elements)
- * {4, 3} to {0, 7} (1x4 elements)
- * {5, 0} to {6, 7} (2x8 elements)
- *
- * Return: Non-negative on success
- * Negative on failure
+ * For example: prev_dims[2] = {5, 4}; cur_dims[2] = {7, 8}
+ * This routine will dump data as follows:
+ * {0, 3} to {0, 7} (1x4 elements)
+ * {1, 3} to {0, 7} (1x4 elements)
+ * {2, 3} to {0, 7} (1x4 elements)
+ * {3, 3} to {0, 7} (1x4 elements)
+ * {4, 3} to {0, 7} (1x4 elements)
+ * {5, 0} to {6, 7} (2x8 elements)
+ *
+ * Return: Non-negative on success
+ * Negative on failure
*
* Programmer: Vailin Choi; August 2010
*
@@ -252,9 +252,9 @@ doprint(hid_t did, hsize_t *start, hsize_t *block, int rank)
static herr_t
slicendump(hid_t did, hsize_t *prev_dims, hsize_t *cur_dims, hsize_t *start, hsize_t *block, int rank, int subrank)
{
- int i; /* Local index variable */
- int ind; /* Index for the current rank */
- herr_t ret_value = SUCCEED; /* Return value */
+ int i; /* Local index variable */
+ int ind; /* Index for the current rank */
+ herr_t ret_value = SUCCEED; /* Return value */
ind = rank - subrank;
@@ -293,13 +293,13 @@ done:
* Function: monitor_dataset
*
* Purpose: To poll a dataset periodically for changes in dimension sizes.
- * For dataset with unchanged and/or decreased dimension sizes:
- * it just prints the dimension size changes
- * For dataset with increase in at least one of its dimension sizes:
- * it will print the new appended data to the dataset
+ * For dataset with unchanged and/or decreased dimension sizes:
+ * it just prints the dimension size changes
+ * For dataset with increase in at least one of its dimension sizes:
+ * it will print the new appended data to the dataset
*
* Return: Non-negative on success: dataset can be monitored
- * Negative on failure: dataset cannot be monitored
+ * Negative on failure: dataset cannot be monitored
*
* Programmer: Vailin Choi; August 2010
*
@@ -320,94 +320,98 @@ monitor_dataset(hid_t fid, char *dsetname)
/* Open the dataset for minitoring */
if((did = H5Dopen2(fid, dsetname, H5P_DEFAULT)) < 0) {
- error_msg("error in opening dataset \"%s\"\n", dsetname);
- ret_value = FAIL;
- goto done;
+ error_msg("error in opening dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
}
if((sid = H5Dget_space(did)) < 0) {
- error_msg("error in getting dataspace id for dataset \"%s\"\n", dsetname);
- ret_value = FAIL;
- goto done;
+ error_msg("error in getting dataspace id for dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
}
/* Get the dataset's dimension sizes */
if((ndims = H5Sget_simple_extent_dims(sid, prev_dims, NULL)) < 0) {
- error_msg("unable to get dimensions sizes for \"%s\"\n", dsetname);
- ret_value = FAIL;
- goto done;
+ error_msg("unable to get dimensions sizes for \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
}
+ HDfflush(stdout);
- while(1) {
+ /* Loop until an error occurs or the user interrupts execution */
+ while(!g_user_interrupt) {
- /* Refreshes the dataset */
- if(H5Drefresh(did) < 0) {
- ret_value = FAIL;
- goto done;
- }
+ /* Refreshes the dataset */
+ if(H5Drefresh(did) < 0) {
+ ret_value = FAIL;
+ goto done;
+ }
- /* Get the dataset's current dimension sizes */
- if(H5LDget_dset_dims(did, cur_dims) < 0) {
- error_msg("unable to get dimension sizes for \"%s\"\n", dsetname);
- ret_value = FAIL;
- goto done;
- }
+ /* Get the dataset's current dimension sizes */
+ if(H5LDget_dset_dims(did, cur_dims) < 0) {
+ error_msg("unable to get dimension sizes for \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
+ }
- /* Check the dimension sizes */
- for(i = 0; i < ndims; i++)
- if(cur_dims[i] != prev_dims[i])
- break;
-
- /* at least one dimension has changed */
- if(i != ndims) {
- /* Printing changes in dimension sizes */
- for(u = 0; u < ndims; u++) {
- HDfprintf(stdout, "dimension %u: %Hu->%Hu", (unsigned)u, prev_dims[u], cur_dims[u]);
- if(cur_dims[u] > prev_dims[u])
- HDfprintf(stdout, " (increases)\n");
- else if(cur_dims[u] < prev_dims[u])
- HDfprintf(stdout, " (decreases)\n");
- else
- HDfprintf(stdout, " (unchanged)\n");
- }
-
- /* Printing elements appended to the dataset if there is */
- if(!g_monitor_size_only) {
-
- /* See if at least one dimension size has increased */
- for(u = 0; u < ndims; u++) {
- int j;
- hsize_t start[H5S_MAX_RANK];
- hsize_t block[H5S_MAX_RANK];
-
- /* Print the new appended data to the dataset */
- if(cur_dims[u] > prev_dims[u]) {
- HDfprintf(stdout, " Data:\n");
-
- for(j = 0; j < ndims; j++) {
- start[j] = 0;
- block[j] = 1;
- }
-
- if((ret_value = slicendump(did, prev_dims, cur_dims, start, block, ndims, ndims)) < 0)
- goto done;
- break;
- }
- } /* end for */
- }
- HDfflush(stdout);
- }
+ /* Check the dimension sizes */
+ for(i = 0; i < ndims; i++)
+ if(cur_dims[i] != prev_dims[i])
+ break;
+
+ /* at least one dimension has changed */
+ if(i != ndims) {
+ /* Printing changes in dimension sizes */
+ for(u = 0; u < ndims; u++) {
+ HDfprintf(stdout, "dimension %u: %Hu->%Hu", (unsigned)u, prev_dims[u], cur_dims[u]);
+ if(cur_dims[u] > prev_dims[u])
+ HDfprintf(stdout, " (increases)\n");
+ else if(cur_dims[u] < prev_dims[u])
+ HDfprintf(stdout, " (decreases)\n");
+ else
+ HDfprintf(stdout, " (unchanged)\n");
+ }
+
+ /* Printing elements appended to the dataset if there is */
+ if(!g_monitor_size_only) {
+
+ /* See if at least one dimension size has increased */
+ for(u = 0; u < ndims; u++) {
+ int j;
+ hsize_t start[H5S_MAX_RANK];
+ hsize_t block[H5S_MAX_RANK];
+
+ /* Print the new appended data to the dataset */
+ if(cur_dims[u] > prev_dims[u]) {
+ HDfprintf(stdout, " Data:\n");
+
+ for(j = 0; j < ndims; j++) {
+ start[j] = 0;
+ block[j] = 1;
+ }
+
+ if((ret_value = slicendump(did, prev_dims, cur_dims, start, block, ndims, ndims)) < 0)
+ goto done;
+ break;
+ }
+ } /* end for */
+ }
+ HDfflush(stdout);
+ }
- /* Save the current dimension sizes */
- HDmemcpy(prev_dims, cur_dims, (size_t)ndims * sizeof(hsize_t));
+ /* Save the current dimension sizes */
+ HDmemcpy(prev_dims, cur_dims, (size_t)ndims * sizeof(hsize_t));
- /* Sleep before next monitor */
+ /* Sleep before next monitor */
HDsleep(g_polling_interval);
} /* end while */
+ HDfflush(stdout);
+
done:
/* Closing */
H5E_BEGIN_TRY
- H5Dclose(did);
+ H5Dclose(did);
H5E_END_TRY
return(ret_value);
@@ -417,7 +421,7 @@ done:
* Function: process_cmpd_fields
*
* Purpose: To check whether the fields selected in "g_list_of_fields"
- * are valid fields associated with the dataset.
+ * are valid fields associated with the dataset.
*
* Return: 0 on success; negative on failure
*
@@ -428,39 +432,39 @@ done:
static herr_t
process_cmpd_fields(hid_t fid, char *dsetname)
{
- hid_t did=-1; /* dataset id */
- hid_t dtid=-1, tid=-1; /* dataset's data type id */
- size_t len; /* number of comma-separated fields in "g_list_of_fields" */
- herr_t ret_value = SUCCEED; /* Return value */
+ hid_t did=-1; /* dataset id */
+ hid_t dtid=-1, tid=-1; /* dataset's data type id */
+ size_t len; /* number of comma-separated fields in "g_list_of_fields" */
+ herr_t ret_value = SUCCEED; /* Return value */
HDassert(g_list_of_fields && *g_list_of_fields);
/* Open the dataset */
if((did = H5Dopen2(fid, dsetname, H5P_DEFAULT)) < 0) {
- error_msg("error in opening dataset \"%s\"\n", dsetname);
- ret_value = FAIL;
- goto done;
+ error_msg("error in opening dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
}
/* Get the dataset's datatype */
if(((dtid = H5Dget_type(did)) < 0) || (tid = H5Tget_native_type(dtid, H5T_DIR_DEFAULT)) < 0) {
- error_msg("error in getting dataset's datatype\n");
+ error_msg("error in getting dataset's datatype\n");
ret_value = FAIL;
goto done;
}
/* Check to make sure that the dataset's datatype is compound type */
if(H5Tget_class(dtid) != H5T_COMPOUND) {
- error_msg("dataset should be compound type for <list_of_fields>\n");
- ret_value = FAIL;
- goto done;
+ error_msg("dataset should be compound type for <list_of_fields>\n");
+ ret_value = FAIL;
+ goto done;
}
/* Make a copy of "g_list_of_fields" */
if((g_dup_fields = HDstrdup(g_list_of_fields)) == NULL) {
- error_msg("error in duplicating g_list_of_fields\n");
+ error_msg("error in duplicating g_list_of_fields\n");
ret_value = FAIL;
- goto done;
+ goto done;
}
/* Estimate the number of comma-separated fields in "g_list of_fields" */
@@ -468,15 +472,15 @@ process_cmpd_fields(hid_t fid, char *dsetname)
/* Allocate memory for a list vector of H5LD_memb_t structures to store "g_list_of_fields" info */
if((g_listv = (H5LD_memb_t **)HDcalloc(len, sizeof(H5LD_memb_t *))) == NULL) {
- error_msg("error in allocating memory for H5LD_memb_t\n");
+ error_msg("error in allocating memory for H5LD_memb_t\n");
ret_value = FAIL;
- goto done;
+ goto done;
}
/* Process and store info for "g_listv" */
if(H5LD_construct_vector(g_dup_fields, g_listv, tid) < 0) {
- error_msg("error in processing <list_of_fields>\n");
- ret_value = FAIL;
+ error_msg("error in processing <list_of_fields>\n");
+ ret_value = FAIL;
goto done;
}
@@ -484,9 +488,9 @@ process_cmpd_fields(hid_t fid, char *dsetname)
done:
/* Closing */
H5E_BEGIN_TRY
- H5Tclose(dtid);
- H5Tclose(tid);
- H5Dclose(did);
+ H5Tclose(dtid);
+ H5Tclose(tid);
+ H5Dclose(did);
H5E_END_TRY
return(ret_value);
} /* process_cmpd_fields() */
@@ -496,15 +500,13 @@ done:
* Function: check_dataset
*
* Purpose: To check whether a dataset can be monitored:
- A chunked dataset with unlimited or max. dimension setting
+ * A chunked dataset with unlimited or max. dimension setting
*
* Return: Non-negative on success: dataset can be monitored
- * Negative on failure: dataset cannot be monitored
+ * Negative on failure: dataset cannot be monitored
*
* Programmer: Vailin Choi; August 2010
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -529,9 +531,9 @@ check_dataset(hid_t fid, char *dsetname)
/* Open the dataset */
if((did = H5Dopen2(fid, dsetname, H5P_DEFAULT)) < 0) {
- error_msg("unable to open dataset \"%s\"\n", dsetname);
- ret_value = FAIL;
- goto done;
+ error_msg("unable to open dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
}
/* Get dataset's creation property list */
@@ -558,28 +560,28 @@ check_dataset(hid_t fid, char *dsetname)
/* Get dataset's dataspace */
if((sid = H5Dget_space(did)) < 0) {
- error_msg("can't get dataset's dataspace\"%s\"\n", dsetname);
- ret_value = FAIL;
- goto done;
+ error_msg("can't get dataset's dataspace\"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
}
/* Get dimension size of dataset's dataspace */
if((ndims = H5Sget_simple_extent_dims(sid, cur_dims, max_dims)) < 0) {
- error_msg("can't get dataspace dimensions for dataset \"%s\"\n", dsetname);
- ret_value = FAIL;
- goto done;
+ error_msg("can't get dataspace dimensions for dataset \"%s\"\n", dsetname);
+ ret_value = FAIL;
+ goto done;
}
/* Check whether dataset has unlimited dimension or max. dimension setting */
for(u = 0; u < (unsigned)ndims; u++)
if(max_dims[u] == H5S_UNLIMITED || cur_dims[u] != max_dims[u]) {
- unlim_max_dims = TRUE;
+ unlim_max_dims = TRUE;
break;
}
if(!unlim_max_dims) {
- error_msg("\"%s\" should have unlimited or max. dimension setting\n", dsetname);
- ret_value = FAIL;
+ error_msg("\"%s\" should have unlimited or max. dimension setting\n", dsetname);
+ ret_value = FAIL;
}
done:
@@ -587,9 +589,9 @@ done:
/* Closing */
H5E_BEGIN_TRY
- H5Sclose(sid);
- H5Pclose(dcp);
- H5Dclose(did);
+ H5Sclose(sid);
+ H5Pclose(dcp);
+ H5Dclose(did);
H5E_END_TRY
return(ret_value);
@@ -605,7 +607,6 @@ done:
*
* Programmer: Vailin Choi; August 2010
*
- * Modifications:
*
*-------------------------------------------------------------------------
*/
@@ -627,7 +628,6 @@ leave(int ret)
*
* Programmer: Vailin Choi; August 2010
*
- * Modifications:
*-------------------------------------------------------------------------
*/
static void
@@ -683,8 +683,6 @@ usage(const char *prog)
*
* Programmer: Vailin Choi; August 2010
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static void
@@ -704,7 +702,7 @@ parse_command_line(int argc, const char *argv[])
switch ((char)opt) {
case '?':
case 'h': /* --help */
- usage(h5tools_getprogname());
+ usage(h5tools_getprogname());
leave(EXIT_SUCCESS);
case 'V': /* --version */
@@ -713,63 +711,63 @@ parse_command_line(int argc, const char *argv[])
break;
case 'w': /* --width=N */
- g_display_width = (int)HDstrtol(opt_arg, NULL, 0);
- if(g_display_width < 0) {
- usage(h5tools_getprogname());
- leave(EXIT_FAILURE);
- }
+ g_display_width = (int)HDstrtol(opt_arg, NULL, 0);
+ if(g_display_width < 0) {
+ usage(h5tools_getprogname());
+ leave(EXIT_FAILURE);
+ }
break;
case 'd': /* --dim */
- g_monitor_size_only = TRUE;
+ g_monitor_size_only = TRUE;
break;
case 'S': /* --simple */
- g_simple_output = TRUE;
+ g_simple_output = TRUE;
break;
- case 'l': /* --label */
- g_label = TRUE;
+ case 'l': /* --label */
+ g_label = TRUE;
break;
case 'p': /* --polling=N */
- /* g_polling_interval = HDstrtod(opt_arg, NULL); */
- if((tmp = (int)HDstrtol(opt_arg, NULL, 10)) <= 0) {
- usage(h5tools_getprogname());
- leave(EXIT_FAILURE);
- }
- g_polling_interval = (unsigned)tmp;
+ /* g_polling_interval = HDstrtod(opt_arg, NULL); */
+ if((tmp = (int)HDstrtol(opt_arg, NULL, 10)) <= 0) {
+ usage(h5tools_getprogname());
+ leave(EXIT_FAILURE);
+ }
+ g_polling_interval = (unsigned)tmp;
break;
case 'f': /* --fields=<list_of_fields> */
- if(g_list_of_fields == NULL) {
- if((g_list_of_fields = HDstrdup(opt_arg)) == NULL) {
- error_msg("memory allocation failed (file %s:line %d)\n",
- __FILE__, __LINE__);
- leave(EXIT_FAILURE);
- }
- } else {
- char *str;
-
- if((str = HDstrdup(opt_arg)) == NULL) {
- error_msg("memory allocation failed (file %s:line %d)\n",
- __FILE__, __LINE__);
- leave(EXIT_FAILURE);
- }
- if((g_list_of_fields = (char *)HDrealloc(g_list_of_fields, HDstrlen(g_list_of_fields) + HDstrlen(str) + 2)) == NULL) {
- error_msg("memory allocation failed (file %s:line %d)\n",
- __FILE__, __LINE__);
- leave(EXIT_FAILURE);
-
- }
- HDstrcat(g_list_of_fields, FIELD_SEP);
- HDstrcat(g_list_of_fields, str);
- }
+ if(g_list_of_fields == NULL) {
+ if((g_list_of_fields = HDstrdup(opt_arg)) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ leave(EXIT_FAILURE);
+ }
+ } else {
+ char *str;
+
+ if((str = HDstrdup(opt_arg)) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ leave(EXIT_FAILURE);
+ }
+ if((g_list_of_fields = (char *)HDrealloc(g_list_of_fields, HDstrlen(g_list_of_fields) + HDstrlen(str) + 2)) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ leave(EXIT_FAILURE);
+
+ }
+ HDstrcat(g_list_of_fields, FIELD_SEP);
+ HDstrcat(g_list_of_fields, str);
+ }
break;
default:
- usage(h5tools_getprogname());
+ usage(h5tools_getprogname());
leave(EXIT_FAILURE);
}
}
@@ -778,7 +776,7 @@ parse_command_line(int argc, const char *argv[])
/* check for object to be processed */
if (argc <= opt_ind) {
error_msg("missing dataset name\n");
- usage(h5tools_getprogname());
+ usage(h5tools_getprogname());
leave(EXIT_FAILURE);
}
} /* parse_command_line() */
@@ -788,7 +786,7 @@ parse_command_line(int argc, const char *argv[])
* Function: catch_signal
*
* Purpose: The signal handler to catch the signals:
- * SIGTERM and SIGINT and exit from h5watch
+ * SIGTERM and SIGINT and set flag to get out of the main loop
*
* Return: No return
*
@@ -798,9 +796,8 @@ parse_command_line(int argc, const char *argv[])
*/
static void catch_signal(int H5_ATTR_UNUSED signo)
{
- /* Exit from h5watch */
- leave(EXIT_SUCCESS);
-
+ /* Set the flag to get out of the main loop */
+ g_user_interrupt = TRUE;
} /* catch_signal() */
@@ -819,14 +816,14 @@ static void catch_signal(int H5_ATTR_UNUSED signo)
int
main(int argc, const char *argv[])
{
- char drivername[50];
- char *fname = NULL;
- char *dname = NULL;
- void *edata;
- H5E_auto2_t func;
- char *x;
- hid_t fid = -1;
- hid_t fapl = -1;
+ char drivername[50]; /* VFD name */
+ char *fname = NULL; /* File name */
+ char *dname = NULL; /* Dataset name */
+ void *edata; /* Error reporting */
+ H5E_auto2_t func; /* Error reporting */
+ char *x; /* Temporary string pointer */
+ hid_t fid = -1; /* File ID */
+ hid_t fapl = -1; /* File access property list */
/* Set up tool name and exit status */
h5tools_setprogname(PROGRAMNAME);
@@ -841,14 +838,14 @@ main(int argc, const char *argv[])
/* To exit from h5watch for SIGTERM signal */
if(HDsignal(SIGTERM, catch_signal) == SIG_ERR) {
- error_msg("An error occurred while setting a signal handler.\n");
- leave(EXIT_FAILURE);
+ error_msg("An error occurred while setting a signal handler.\n");
+ leave(EXIT_FAILURE);
}
/* To exit from h5watch for SIGINT signal */
if(HDsignal(SIGINT, catch_signal) == SIG_ERR) {
error_msg("An error occurred while setting a signal handler.\n");
- leave(EXIT_FAILURE);
+ leave(EXIT_FAILURE);
}
/* parse command line options */
@@ -856,7 +853,7 @@ main(int argc, const char *argv[])
if(argc <= opt_ind) {
error_msg("missing dataset name\n");
- usage(h5tools_getprogname());
+ usage(h5tools_getprogname());
leave(EXIT_FAILURE);
}
@@ -876,9 +873,9 @@ main(int argc, const char *argv[])
* doesn't exist).
*/
if((fname = HDstrdup(argv[opt_ind])) == NULL) {
- error_msg("memory allocation failed (file %s:line %d)\n",
- __FILE__, __LINE__);
- h5tools_setstatus(EXIT_FAILURE);
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ h5tools_setstatus(EXIT_FAILURE);
}
/* Create a copy of file access property list */
@@ -890,80 +887,80 @@ main(int argc, const char *argv[])
return -1;
do {
- while(fname && *fname) {
- fid = h5tools_fopen(fname, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl, NULL, drivername, sizeof drivername);
-
- if(fid >= 0) {
- HDfprintf(stdout, "Opened \"%s\" with %s driver.\n", fname, drivername);
- break; /*success*/
- } /* end if */
-
- /* Shorten the file name; lengthen the object name */
- x = dname;
- dname = HDstrrchr(fname, '/');
- if(x)
- *x = '/';
- if(!dname)
- break;
- *dname = '\0';
- } /* end while */
+ while(fname && *fname) {
+ fid = h5tools_fopen(fname, H5F_ACC_RDONLY|H5F_ACC_SWMR_READ, fapl, NULL, drivername, sizeof drivername);
+
+ if(fid >= 0) {
+ HDfprintf(stdout, "Opened \"%s\" with %s driver.\n", fname, drivername);
+ break; /*success*/
+ } /* end if */
+
+ /* Shorten the file name; lengthen the object name */
+ x = dname;
+ dname = HDstrrchr(fname, '/');
+ if(x)
+ *x = '/';
+ if(!dname)
+ break;
+ *dname = '\0';
+ } /* end while */
/* Try opening the file again if somehow unstable */
} while(g_retry-- > 0 && fid == FAIL);
if(fid < 0) {
- error_msg("unable to open file \"%s\"\n", fname);
- if(fname) HDfree(fname);
- if(fapl >= 0) H5Pclose(fapl);
- leave(EXIT_FAILURE);
+ error_msg("unable to open file \"%s\"\n", fname);
+ if(fname) HDfree(fname);
+ if(fapl >= 0) H5Pclose(fapl);
+ leave(EXIT_FAILURE);
}
if(!dname) {
- error_msg("no dataset specified\n");
- h5tools_setstatus(EXIT_FAILURE);
+ error_msg("no dataset specified\n");
+ h5tools_setstatus(EXIT_FAILURE);
} else {
- *dname = '/';
- x = dname;
- if((dname = HDstrdup(dname)) == NULL) {
- error_msg("memory allocation failed (file %s:line %d)\n",
- __FILE__, __LINE__);
- h5tools_setstatus(EXIT_FAILURE);
- } else {
- *x = '\0';
- /* Validate dataset */
- if(check_dataset(fid, dname) < 0)
- h5tools_setstatus(EXIT_FAILURE);
- /* Validate input "fields" */
- else if(g_list_of_fields && *g_list_of_fields)
- if(process_cmpd_fields(fid, dname) < 0)
- h5tools_setstatus(EXIT_FAILURE);
- }
+ *dname = '/';
+ x = dname;
+ if((dname = HDstrdup(dname)) == NULL) {
+ error_msg("memory allocation failed (file %s:line %d)\n",
+ __FILE__, __LINE__);
+ h5tools_setstatus(EXIT_FAILURE);
+ } else {
+ *x = '\0';
+ /* Validate dataset */
+ if(check_dataset(fid, dname) < 0)
+ h5tools_setstatus(EXIT_FAILURE);
+ /* Validate input "fields" */
+ else if(g_list_of_fields && *g_list_of_fields)
+ if(process_cmpd_fields(fid, dname) < 0)
+ h5tools_setstatus(EXIT_FAILURE);
+ }
}
/* If everything is fine, start monitoring the datset */
if(h5tools_getstatus() != EXIT_FAILURE)
- if(monitor_dataset(fid, dname) < 0)
- h5tools_setstatus(EXIT_FAILURE);
+ if(monitor_dataset(fid, dname) < 0)
+ h5tools_setstatus(EXIT_FAILURE);
/* Free spaces */
if(fname) HDfree(fname);
if(dname) HDfree(dname);
if(g_list_of_fields) HDfree(g_list_of_fields);
if(g_listv) {
- H5LD_clean_vector(g_listv);
- HDfree(g_listv);
+ H5LD_clean_vector(g_listv);
+ HDfree(g_listv);
}
if(g_dup_fields) HDfree(g_dup_fields);
/* Close the file access property list */
if(fapl >= 0 && H5Pclose(fapl) < 0) {
- error_msg("unable to close file access property list\n");
- h5tools_setstatus(EXIT_FAILURE);
+ error_msg("unable to close file access property list\n");
+ h5tools_setstatus(EXIT_FAILURE);
}
/* Close the file */
if(H5Fclose(fid) < 0) {
- error_msg("unable to close file\n");
- h5tools_setstatus(EXIT_FAILURE);
+ error_msg("unable to close file\n");
+ h5tools_setstatus(EXIT_FAILURE);
}
H5Eset_auto2(H5E_DEFAULT, func, edata);
diff --git a/hl/tools/h5watch/h5watchgentest.c b/hl/tools/h5watch/h5watchgentest.c
index 12298ca..c5b76de 100644
--- a/hl/tools/h5watch/h5watchgentest.c
+++ b/hl/tools/h5watch/h5watchgentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/hl/tools/h5watch/swmr_check_compat_vfd.c b/hl/tools/h5watch/swmr_check_compat_vfd.c
index 87b87c4..7d11a73 100644
--- a/hl/tools/h5watch/swmr_check_compat_vfd.c
+++ b/hl/tools/h5watch/swmr_check_compat_vfd.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Purpose: This is a small program that checks if the HDF5_DRIVER
diff --git a/hl/tools/h5watch/testh5watch.sh.in b/hl/tools/h5watch/testh5watch.sh.in
index 89fbf95..05e66bf 100644
--- a/hl/tools/h5watch/testh5watch.sh.in
+++ b/hl/tools/h5watch/testh5watch.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5watch tool
#
@@ -30,6 +28,10 @@ if [[ $rc != 0 ]] ; then
exit 0
fi
+#echo "h5watch tests are skipped temporarily."
+#echo
+#exit 0
+
H5WATCH=h5watch # The tool name
H5WATCH_BIN=`pwd`/$H5WATCH # The path of H5WATCH
EXTEND_DSET=extend_dset # Routine to extend the dataset when watching
@@ -42,6 +44,7 @@ GEN_TEST=h5watchgentest # Generate HDF5 file with various datasets
GEN_TEST_BIN=`pwd`/$GEN_TEST # Path of the binary GEN_TEST
WATCHFILE=`pwd`/WATCH.h5 # The HDF5 file generated to test h5watch
TESTFILE=TEST.h5 # The temporary file (a copy of WATCH.h5) used by tests
+TRY_MAX=10 # Try running the test again
#
# These 3 defines should be the same as the defines in ./extend_dset.c
WRITER_MESSAGE=writer_message # The message file created by the "extend" process
@@ -114,29 +117,30 @@ TOOLTEST() {
exitcode=$?
cat $actual_err >> $actual
if [ $exitcode -ne $retvalexpect ]; then
- $ECHO "*FAILED*"
- nerrors="`expr $nerrors + 1`"
- if [ yes = "$verbose" ]; then
- $ECHO "test returned with exit code $exitcode"
- $ECHO "test output: (up to $NLINES lines)"
- head -$NLINES $actual
- $ECHO "***end of test output***"
- $ECHO ""
- fi
- elif $CMP $expect $actual; then
- $ECHO " PASSED"
- else
$ECHO "*FAILED*"
- $ECHO " Expected result differs from actual result"
- nerrors="`expr $nerrors + 1`"
- test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
- fi
+ nerrors="`expr $nerrors + 1`"
+ if [ yes = "$verbose" ]; then
+ $ECHO "test returned with exit code $exitcode"
+ $ECHO "test output: (up to $NLINES lines)"
+ head -$NLINES $actual
+ $ECHO "***end of test output***"
+ $ECHO ""
+ fi
+ elif $CMP $expect $actual; then
+ $ECHO " PASSED"
+ else
+ $ECHO "*FAILED*"
+ $ECHO " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
+ fi
# Clean up output file
if test -z "$HDF5_NOCLEANUP"; then
rm -f $actual $actual_err
fi
}
+
#
#
#
@@ -146,8 +150,13 @@ TOOLTEST() {
# $1 -- the specified dataset to watch and to extend
# $2 -- the options to h5watch (can be NULL)
# $3 -- expected output from watching the object
+# $4 -- action 1
+# $5 -- action 2
#
TEST_WATCH() {
+try=0
+while [ $try -lt $TRY_MAX ] ;
+do
cp $WATCHFILE $TESTFILE # Copy the file being watched/extended to a temporary file
actual="../testfiles/`basename $3 .ddl`.out" # The actual output
expect="$srcdir/../testfiles/$3" # The expected output
@@ -161,7 +170,7 @@ TEST_WATCH() {
rm -f $WRITER_MESSAGE # Remove the file just to be sure
rm -f $READER_MESSAGE # Remove the file just to be sure
#
- $EXTEND_BIN $TESTFILE $1 & # Extend the dataset; put in background
+ $EXTEND_BIN $TESTFILE $1 $4 $5& # Extend the dataset; put in background
extend_pid=$! # Get "extend" process ID
#
# Wait for message from "extend_dset" process to start h5watch--
@@ -174,10 +183,10 @@ TEST_WATCH() {
do
t1=`date +%s` # Get current time in seconds
difft=`expr $t1 - $t0` # Calculate the time difference
- if [ -e $WRITER_MESSAGE ]; then # If message file is found:
+ if [ -e $WRITER_MESSAGE ]; then # If message file is found:
mexist=1 # indicate the message file is found
- rm $WRITER_MESSAGE # remove the message file
- break # get out of the while loop
+ rm $WRITER_MESSAGE # remove the message file
+ break # get out of the while loop
fi
done;
#
@@ -203,7 +212,7 @@ TEST_WATCH() {
extend_exit=$? # Collect "extend" process' exit code
sleep 1 # Sleep to make sure output is flushed
kill $watch_pid # Kill h5watch
- wait $watch_pid # Wait for "h5watch" process to complete
+ wait $watch_pid # Wait for "h5watch" process to complete
#
if [ $extend_exit -ne 0 ]; then # Error returned from "extend" process
$ECHO "*FAILED*"
@@ -216,13 +225,21 @@ TEST_WATCH() {
$ECHO ""
fi
elif $CMP $expect $actual; then # Compare actual output with expected output
+ try=$TRY_MAX
$ECHO " PASSED"
else
- $ECHO "*FAILED*" # Actual and expected outputs are different
- $ECHO " Expected result differs from actual result"
- nerrors="`expr $nerrors + 1`"
- if test yes = "$verbose"; then
- $DIFF $expect $actual |sed 's/^/ /'
+ try="`expr $try + 1`"
+ if [ $try -lt $TRY_MAX ]; then
+ $ECHO "*RETRY"
+ rm -f $actual
+ rm -f $TESTFILE
+ else
+ $ECHO "*FAILED*" # Actual and expected outputs are different
+ $ECHO " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
+ if test yes = "$verbose"; then
+ $DIFF $expect $actual |sed 's/^/ /'
+ fi
fi
fi
#
@@ -232,45 +249,45 @@ TEST_WATCH() {
rm -f $actual
fi
fi
+done;
}
-
##############################################################################
##############################################################################
-### T H E T E S T S ###
+### T H E T E S T S ###
##############################################################################
##############################################################################
#
#
#################################################################################################
-# #
-# WATCH.h5: file with various types of datasets for testing-- #
-# The following datasets are chunked, H5D_ALLOC_TIME_INCR, max. dimensional setting: #
-# DSET_ONE: one-dimensional dataset #
-# DSET_TWO: two-dimensional dataset #
-# DSET_CMPD: one-dimensional dataset with compound type #
-# DSET_CMPD_ESC: one-dimensional dataset with compound type & escape/separator characters #
-# DSET_CMPD_TWO: two-dimensional dataset with compound type #
-# #
-# The following datasets are one-dimensional, chunked, max. dimension setting: #
-# DSET_ALLOC_EARLY: dataset with H5D_ALLOC_TIME_EARLY #
-# DSET_ALLOC_LATE: dataset H5D_ALLOC_TIME_LATE #
-# #
-# The following datasets are one-dimensional: #
-# DSET_NONE: fixed dimension setting, contiguous, H5D_ALLOC_TIME_LATE #
-# DSET_NOMAX: fixed dimension setting, chunked, H5D_ALLOC_TIME_INCR #
-# #
+# #
+# WATCH.h5: file with various types of datasets for testing-- #
+# The following datasets are chunked, H5D_ALLOC_TIME_INCR, max. dimensional setting: #
+# DSET_ONE: one-dimensional dataset #
+# DSET_TWO: two-dimensional dataset #
+# DSET_CMPD: one-dimensional dataset with compound type #
+# DSET_CMPD_ESC: one-dimensional dataset with compound type & escape/separator characters #
+# DSET_CMPD_TWO: two-dimensional dataset with compound type #
+# #
+# The following datasets are one-dimensional, chunked, max. dimension setting: #
+# DSET_ALLOC_EARLY: dataset with H5D_ALLOC_TIME_EARLY #
+# DSET_ALLOC_LATE: dataset H5D_ALLOC_TIME_LATE #
+# #
+# The following datasets are one-dimensional: #
+# DSET_NONE: fixed dimension setting, contiguous, H5D_ALLOC_TIME_LATE #
+# DSET_NOMAX: fixed dimension setting, chunked, H5D_ALLOC_TIME_INCR #
+# #
#################################################################################################
#
#
#################################################################################################
-# #
-# Tests on expected failures: #
-# Invalid file name #
-# Unable to find dataset, invalid dataset #
-# DSET_NONE and DSET_NOMAX #
-# Invalid input to options --width and --polling #
-# Invalid field names for -f option #
-# #
+# #
+# Tests on expected failures: #
+# Invalid file name #
+# Unable to find dataset, invalid dataset #
+# DSET_NONE and DSET_NOMAX #
+# Invalid input to options --width and --polling #
+# Invalid field names for -f option #
+# #
#################################################################################################
#
# Generate file with various types of datasets
@@ -315,13 +332,13 @@ $GEN_TEST_BIN
# TEST.h5/DSET_CMPD_TWO
# TEST.h5/DSET_CMPD_ESC
#
-TEST_WATCH DSET_ONE '' w-ext-one.ddl
-TEST_WATCH DSET_ALLOC_EARLY '' w-ext-early.ddl
-TEST_WATCH DSET_ALLOC_LATE '' w-ext-late.ddl
-TEST_WATCH DSET_CMPD '' w-ext-cmpd.ddl
-TEST_WATCH DSET_TWO '' w-ext-two.ddl
-TEST_WATCH DSET_CMPD_TWO '' w-ext-cmpd-two.ddl
-TEST_WATCH DSET_CMPD_ESC '' w-ext-cmpd-esc.ddl
+TEST_WATCH DSET_ONE '' w-ext-one.ddl 3 0 #Increase
+TEST_WATCH DSET_ALLOC_EARLY '' w-ext-early.ddl -1 0 #Decrease
+TEST_WATCH DSET_ALLOC_LATE '' w-ext-late.ddl 0 0 #Same
+TEST_WATCH DSET_CMPD '' w-ext-cmpd.ddl 3 0 #Increase
+TEST_WATCH DSET_CMPD_ESC '' w-ext-cmpd-esc.ddl -1 0 #Decrease
+TEST_WATCH DSET_TWO '' w-ext-two.ddl 2 2 #Increase, Increase
+TEST_WATCH DSET_CMPD_TWO '' w-ext-cmpd-two.ddl 2 -9 #Increase, Decrease
#
echo "DONE WITH 2nd SET OF TESTS"
#
@@ -335,25 +352,25 @@ echo "DONE WITH 2nd SET OF TESTS"
# TEST.h5/DSET_CMPD with --fields=field1,field2
# TEST.h5/DSET_CMPD with --fields=field2.b,field4
# TEST.h5/DSET_CMPD with --fields=field2.b.a --fields=field2.c
-TEST_WATCH DSET_CMPD --fields=field1,field2 w-ext-cmpd-f1.ddl
-TEST_WATCH DSET_CMPD --fields=field2.b,field4 w-ext-cmpd-f2.ddl
-TEST_WATCH DSET_CMPD '--fields=field2.b.a --fields=field2.c' w-ext-cmpd-ff3.ddl
+TEST_WATCH DSET_CMPD --fields=field1,field2 w-ext-cmpd-f1.ddl -9 0 #Decrease
+TEST_WATCH DSET_CMPD --fields=field2.b,field4 w-ext-cmpd-f2.ddl 3 0 #Increase
+TEST_WATCH DSET_CMPD '--fields=field2.b.a --fields=field2.c' w-ext-cmpd-ff3.ddl 0 0 #Same
#
#
# TEST.h5/DSET_CMP_TWO with --fields=field1,field2
# TEST.h5/DSET_CMPD_TWO with --fields=field2.b --fields=field4
# TEST.h5/DSET_CMPD_TWO with --fields=field2.b.a,field2.c
-TEST_WATCH DSET_CMPD_TWO --fields=field1,field2 w-ext-cmpd-two-f1.ddl
-TEST_WATCH DSET_CMPD_TWO '--fields=field2.b --fields=field4' w-ext-cmpd-two-ff2.ddl
-TEST_WATCH DSET_CMPD_TWO --fields=field2.b.a,field2.c w-ext-cmpd-two-f3.ddl
+TEST_WATCH DSET_CMPD_TWO --fields=field1,field2 w-ext-cmpd-two-f1.ddl 2 0 #Increase, Same
+TEST_WATCH DSET_CMPD_TWO '--fields=field2.b --fields=field4' w-ext-cmpd-two-ff2.ddl -1 2 #Decrease, Increase
+TEST_WATCH DSET_CMPD_TWO --fields=field2.b.a,field2.c w-ext-cmpd-two-f3.ddl -1 -3 #Decrease, Decrease
#
#
# TEST.h5/DSET_CMPD_ESC with --fields=field\,1,field2\.
# TEST.h5/DSET_CMPD_ESC with --fields=field2\..\,b --fields=field4\,
# TEST.h5/DSET_CMPD_ESC with --fields=field2\..\,b.a,field2\..\\K
-TEST_WATCH DSET_CMPD_ESC '--fields=field\,1,field2\.' w-ext-cmpd-esc-f1.ddl
-TEST_WATCH DSET_CMPD_ESC '--fields=field2\..\,b --fields=field4\,' w-ext-cmpd-esc-ff2.ddl
-TEST_WATCH DSET_CMPD_ESC '--fields=field2\..\,b.a,field2\..\\K' w-ext-cmpd-esc-f3.ddl
+TEST_WATCH DSET_CMPD_ESC '--fields=field\,1,field2\.' w-ext-cmpd-esc-f1.ddl 3 0 #Increase
+TEST_WATCH DSET_CMPD_ESC '--fields=field2\..\,b --fields=field4\,' w-ext-cmpd-esc-ff2.ddl -1 0 #Decrease
+TEST_WATCH DSET_CMPD_ESC '--fields=field2\..\,b.a,field2\..\\K' w-ext-cmpd-esc-f3.ddl 3 0 #Increase
#
#
echo "DONE WITH 3rd SET OF TESTS"
@@ -372,11 +389,11 @@ echo "DONE WITH 3rd SET OF TESTS"
# TEST.h5/DSET_TWO with --width=60 option
# TEST.h5/DSET_CMPD with --label option
# TEST.h5/DSET_ONE with --simple option
-TEST_WATCH DSET_ONE --dim w-ext-one-d.ddl
-TEST_WATCH DSET_TWO --dim w-ext-two-d.ddl
-TEST_WATCH DSET_TWO --width=30 w-ext-two-width.ddl
-TEST_WATCH DSET_CMPD --label w-ext-cmpd-label.ddl
-TEST_WATCH DSET_ONE --simple w-ext-one-simple.ddl
+TEST_WATCH DSET_ONE --dim w-ext-one-d.ddl 3 0 #Increase
+TEST_WATCH DSET_TWO --dim w-ext-two-d.ddl -2 0 #Decrease, Same
+TEST_WATCH DSET_TWO --width=30 w-ext-two-width.ddl 0 2 #Same, Increase
+TEST_WATCH DSET_CMPD --label w-ext-cmpd-label.ddl 3 0 #Increase
+TEST_WATCH DSET_ONE --simple w-ext-one-simple.ddl 2 0 #I
#
echo "DONE WITH 4th SET OF TESTS"
#
diff --git a/hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl b/hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl
index d85594d..09255cc 100644
--- a/hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-esc-f1.ddl
@@ -4,8 +4,3 @@ dimension 0: 10->13 (increases)
Data:
(10) {1, {2, {2, 2, 2}, 2}}, {2, {3, {3, 3, 3}, 3}},
(12) {3, {4, {4, 4, 4}, 4}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {1, {2, {2, 2, 2}, 2}}, {2, {3, {3, 3, 3}, 3}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl b/hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl
index ab6c294..00ed390 100644
--- a/hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-esc-f3.ddl
@@ -3,8 +3,3 @@ Monitoring dataset /DSET_CMPD_ESC...
dimension 0: 10->13 (increases)
Data:
(10) {{{2}}, {2}}, {{{3}}, {3}}, {{{4}}, {4}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {{{2}}, {2}}, {{{3}}, {3}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl b/hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl
index 039cbf3..9b9a971 100644
--- a/hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-esc-ff2.ddl
@@ -1,11 +1,3 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_CMPD_ESC...
-dimension 0: 10->13 (increases)
- Data:
- (10) {{{2, 2, 2}}, {4, 4}}, {{{3, 3, 3}}, {5, 5}}, {{{4, 4, 4}}, {6,
- (12) 6}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {{{2, 2, 2}}, {4, 4}}, {{{3, 3, 3}}, {5, 5}}
+dimension 0: 10->9 (decreases)
diff --git a/hl/tools/testfiles/w-ext-cmpd-esc.ddl b/hl/tools/testfiles/w-ext-cmpd-esc.ddl
index 900b3f2..9b9a971 100644
--- a/hl/tools/testfiles/w-ext-cmpd-esc.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-esc.ddl
@@ -1,13 +1,3 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_CMPD_ESC...
-dimension 0: 10->13 (increases)
- Data:
- (10) {1, {2, {2, 2, 2}, 2}, 3, {4, 4}},
- (11) {2, {3, {3, 3, 3}, 3}, 4, {5, 5}},
- (12) {3, {4, {4, 4, 4}, 4}, 5, {6, 6}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {1, {2, {2, 2, 2}, 2}, 3, {4, 4}}, {2, {3, {3, 3, 3}, 3}, 4, {5,
- (2) 5}}
+dimension 0: 10->9 (decreases)
diff --git a/hl/tools/testfiles/w-ext-cmpd-f1.ddl b/hl/tools/testfiles/w-ext-cmpd-f1.ddl
index 7e0a066..029e4db 100644
--- a/hl/tools/testfiles/w-ext-cmpd-f1.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-f1.ddl
@@ -1,11 +1,3 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_CMPD...
-dimension 0: 10->13 (increases)
- Data:
- (10) {1, {2, {2, 2, 2}, 2}}, {2, {3, {3, 3, 3}, 3}},
- (12) {3, {4, {4, 4, 4}, 4}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {1, {2, {2, 2, 2}, 2}}, {2, {3, {3, 3, 3}, 3}}
+dimension 0: 10->1 (decreases)
diff --git a/hl/tools/testfiles/w-ext-cmpd-f2.ddl b/hl/tools/testfiles/w-ext-cmpd-f2.ddl
index ea69222..362cc94 100644
--- a/hl/tools/testfiles/w-ext-cmpd-f2.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-f2.ddl
@@ -4,8 +4,3 @@ dimension 0: 10->13 (increases)
Data:
(10) {{{2, 2, 2}}, {4, 4}}, {{{3, 3, 3}}, {5, 5}}, {{{4, 4, 4}}, {6,
(12) 6}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {{{2, 2, 2}}, {4, 4}}, {{{3, 3, 3}}, {5, 5}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-ff3.ddl b/hl/tools/testfiles/w-ext-cmpd-ff3.ddl
index 84e2919..6a54b97 100644
--- a/hl/tools/testfiles/w-ext-cmpd-ff3.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-ff3.ddl
@@ -1,10 +1,2 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_CMPD...
-dimension 0: 10->13 (increases)
- Data:
- (10) {{{2}}, {2}}, {{{3}}, {3}}, {{{4}}, {4}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {{{2}}, {2}}, {{{3}}, {3}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-label.ddl b/hl/tools/testfiles/w-ext-cmpd-label.ddl
index 84534ea..394d5a6 100644
--- a/hl/tools/testfiles/w-ext-cmpd-label.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-label.ddl
@@ -8,11 +8,3 @@ dimension 0: 10->13 (increases)
(11) field4={a=5, b=5}},
(12) {field1=3, field2={a=4, b={a=4, b=4, c=4}, c=4}, field3=5,
(12) field4={a=6, b=6}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {field1=1, field2={a=2, b={a=2, b=2, c=2}, c=2}, field3=3,
- (1) field4={a=4, b=4}},
- (2) {field1=2, field2={a=3, b={a=3, b=3, c=3}, c=3}, field3=4,
- (2) field4={a=5, b=5}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-two-f1.ddl b/hl/tools/testfiles/w-ext-cmpd-two-f1.ddl
index 49abde9..9b4c235 100644
--- a/hl/tools/testfiles/w-ext-cmpd-two-f1.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-two-f1.ddl
@@ -1,47 +1,15 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_CMPD_TWO...
dimension 0: 4->6 (increases)
-dimension 1: 10->12 (increases)
- Data:
- (0,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
- (1,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
- (2,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
- (3,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
- (4,0) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (4,2) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (4,4) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (4,6) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (4,8) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (4,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (5,0) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (5,2) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (5,4) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (5,6) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (5,8) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}},
- (5,10) {1, {1, {1, 1, 1}, 1}}, {1, {1, {1, 1, 1}, 1}}
-dimension 0: 6->8 (increases)
-dimension 1: 12->1 (decreases)
- Data:
- (6,0) {2, {2, {2, 2, 2}, 2}},
- (7,0) {2, {2, {2, 2, 2}, 2}}
-dimension 0: 8->10 (increases)
-dimension 1: 1->1 (unchanged)
- Data:
- (8,0) {3, {3, {3, 3, 3}, 3}},
- (9,0) {3, {3, {3, 3, 3}, 3}}
-dimension 0: 10->3 (decreases)
-dimension 1: 1->3 (increases)
- Data:
- (0,1) {4, {4, {4, 4, 4}, 4}}, {4, {4, {4, 4, 4}, 4}}
- (1,1) {4, {4, {4, 4, 4}, 4}}, {4, {4, {4, 4, 4}, 4}}
- (2,1) {4, {4, {4, 4, 4}, 4}}, {4, {4, {4, 4, 4}, 4}}
-dimension 0: 3->2 (decreases)
-dimension 1: 3->2 (decreases)
-dimension 0: 2->1 (decreases)
-dimension 1: 2->2 (unchanged)
-dimension 0: 1->1 (unchanged)
-dimension 1: 2->4 (increases)
- Data:
- (0,2) {7, {7, {7, 7, 7}, 7}}, {7, {7, {7, 7, 7}, 7}}
-dimension 0: 1->1 (unchanged)
-dimension 1: 4->3 (decreases)
+dimension 1: 10->10 (unchanged)
+ Data:
+ (4,0) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (4,2) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (4,4) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (4,6) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (4,8) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (5,0) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (5,2) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (5,4) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (5,6) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}},
+ (5,8) {2, {2, {2, 2, 2}, 2}}, {2, {2, {2, 2, 2}, 2}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-two-f3.ddl b/hl/tools/testfiles/w-ext-cmpd-two-f3.ddl
index 0878f35..94b5c99 100644
--- a/hl/tools/testfiles/w-ext-cmpd-two-f3.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-two-f3.ddl
@@ -1,41 +1,4 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_CMPD_TWO...
-dimension 0: 4->6 (increases)
-dimension 1: 10->12 (increases)
- Data:
- (0,10) {{{1}}, {1}}, {{{1}}, {1}}
- (1,10) {{{1}}, {1}}, {{{1}}, {1}}
- (2,10) {{{1}}, {1}}, {{{1}}, {1}}
- (3,10) {{{1}}, {1}}, {{{1}}, {1}}
- (4,0) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
- (4,4) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
- (4,8) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
- (5,0) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
- (5,4) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}},
- (5,8) {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}, {{{1}}, {1}}
-dimension 0: 6->8 (increases)
-dimension 1: 12->1 (decreases)
- Data:
- (6,0) {{{2}}, {2}},
- (7,0) {{{2}}, {2}}
-dimension 0: 8->10 (increases)
-dimension 1: 1->1 (unchanged)
- Data:
- (8,0) {{{3}}, {3}},
- (9,0) {{{3}}, {3}}
-dimension 0: 10->3 (decreases)
-dimension 1: 1->3 (increases)
- Data:
- (0,1) {{{4}}, {4}}, {{{4}}, {4}}
- (1,1) {{{4}}, {4}}, {{{4}}, {4}}
- (2,1) {{{4}}, {4}}, {{{4}}, {4}}
-dimension 0: 3->2 (decreases)
-dimension 1: 3->2 (decreases)
-dimension 0: 2->1 (decreases)
-dimension 1: 2->2 (unchanged)
-dimension 0: 1->1 (unchanged)
-dimension 1: 2->4 (increases)
- Data:
- (0,2) {{{7}}, {7}}, {{{7}}, {7}}
-dimension 0: 1->1 (unchanged)
-dimension 1: 4->3 (decreases)
+dimension 0: 4->3 (decreases)
+dimension 1: 10->7 (decreases)
diff --git a/hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl b/hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl
index 669547e..482da31 100644
--- a/hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-two-ff2.ddl
@@ -1,47 +1,8 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_CMPD_TWO...
-dimension 0: 4->6 (increases)
+dimension 0: 4->3 (decreases)
dimension 1: 10->12 (increases)
Data:
- (0,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
- (1,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
- (2,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
- (3,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
- (4,0) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (4,2) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (4,4) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (4,6) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (4,8) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (4,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (5,0) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (5,2) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (5,4) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (5,6) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (5,8) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}},
- (5,10) {{{1, 1, 1}}, {1, 1}}, {{{1, 1, 1}}, {1, 1}}
-dimension 0: 6->8 (increases)
-dimension 1: 12->1 (decreases)
- Data:
- (6,0) {{{2, 2, 2}}, {2, 2}},
- (7,0) {{{2, 2, 2}}, {2, 2}}
-dimension 0: 8->10 (increases)
-dimension 1: 1->1 (unchanged)
- Data:
- (8,0) {{{3, 3, 3}}, {3, 3}},
- (9,0) {{{3, 3, 3}}, {3, 3}}
-dimension 0: 10->3 (decreases)
-dimension 1: 1->3 (increases)
- Data:
- (0,1) {{{4, 4, 4}}, {4, 4}}, {{{4, 4, 4}}, {4, 4}}
- (1,1) {{{4, 4, 4}}, {4, 4}}, {{{4, 4, 4}}, {4, 4}}
- (2,1) {{{4, 4, 4}}, {4, 4}}, {{{4, 4, 4}}, {4, 4}}
-dimension 0: 3->2 (decreases)
-dimension 1: 3->2 (decreases)
-dimension 0: 2->1 (decreases)
-dimension 1: 2->2 (unchanged)
-dimension 0: 1->1 (unchanged)
-dimension 1: 2->4 (increases)
- Data:
- (0,2) {{{7, 7, 7}}, {7, 7}}, {{{7, 7, 7}}, {7, 7}}
-dimension 0: 1->1 (unchanged)
-dimension 1: 4->3 (decreases)
+ (0,10) {{{-1, -1, -1}}, {-1, -1}}, {{{-1, -1, -1}}, {-1, -1}}
+ (1,10) {{{-1, -1, -1}}, {-1, -1}}, {{{-1, -1, -1}}, {-1, -1}}
+ (2,10) {{{-1, -1, -1}}, {-1, -1}}, {{{-1, -1, -1}}, {-1, -1}}
diff --git a/hl/tools/testfiles/w-ext-cmpd-two.ddl b/hl/tools/testfiles/w-ext-cmpd-two.ddl
index a461226..57ffc2c 100644
--- a/hl/tools/testfiles/w-ext-cmpd-two.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd-two.ddl
@@ -1,67 +1,7 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_CMPD_TWO...
dimension 0: 4->6 (increases)
-dimension 1: 10->12 (increases)
+dimension 1: 10->1 (decreases)
Data:
- (0,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (0,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
- (1,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (1,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
- (2,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (2,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
- (3,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (3,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
- (4,0) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,1) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,2) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,3) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,4) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,5) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,6) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,7) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,8) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,9) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (4,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,0) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,1) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,2) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,3) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,4) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,5) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,6) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,7) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,8) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,9) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,10) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}},
- (5,11) {1, {1, {1, 1, 1}, 1}, 1, {1, 1}}
-dimension 0: 6->8 (increases)
-dimension 1: 12->1 (decreases)
- Data:
- (6,0) {2, {2, {2, 2, 2}, 2}, 2, {2, 2}},
- (7,0) {2, {2, {2, 2, 2}, 2}, 2, {2, 2}}
-dimension 0: 8->10 (increases)
-dimension 1: 1->1 (unchanged)
- Data:
- (8,0) {3, {3, {3, 3, 3}, 3}, 3, {3, 3}},
- (9,0) {3, {3, {3, 3, 3}, 3}, 3, {3, 3}}
-dimension 0: 10->3 (decreases)
-dimension 1: 1->3 (increases)
- Data:
- (0,1) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}},
- (0,2) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}}
- (1,1) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}},
- (1,2) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}}
- (2,1) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}},
- (2,2) {4, {4, {4, 4, 4}, 4}, 4, {4, 4}}
-dimension 0: 3->2 (decreases)
-dimension 1: 3->2 (decreases)
-dimension 0: 2->1 (decreases)
-dimension 1: 2->2 (unchanged)
-dimension 0: 1->1 (unchanged)
-dimension 1: 2->4 (increases)
- Data:
- (0,2) {7, {7, {7, 7, 7}, 7}, 7, {7, 7}},
- (0,3) {7, {7, {7, 7, 7}, 7}, 7, {7, 7}}
-dimension 0: 1->1 (unchanged)
-dimension 1: 4->3 (decreases)
+ (4,0) {2, {2, {2, 2, 2}, 2}, 2, {2, 2}},
+ (5,0) {2, {2, {2, 2, 2}, 2}, 2, {2, 2}}
diff --git a/hl/tools/testfiles/w-ext-cmpd.ddl b/hl/tools/testfiles/w-ext-cmpd.ddl
index 7e64a14..aac5be3 100644
--- a/hl/tools/testfiles/w-ext-cmpd.ddl
+++ b/hl/tools/testfiles/w-ext-cmpd.ddl
@@ -5,9 +5,3 @@ dimension 0: 10->13 (increases)
(10) {1, {2, {2, 2, 2}, 2}, 3, {4, 4}},
(11) {2, {3, {3, 3, 3}, 3}, 4, {5, 5}},
(12) {3, {4, {4, 4, 4}, 4}, 5, {6, 6}}
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) {1, {2, {2, 2, 2}, 2}, 3, {4, 4}}, {2, {3, {3, 3, 3}, 3}, 4, {5,
- (2) 5}}
diff --git a/hl/tools/testfiles/w-ext-early.ddl b/hl/tools/testfiles/w-ext-early.ddl
index 1d963d7..bb72104 100644
--- a/hl/tools/testfiles/w-ext-early.ddl
+++ b/hl/tools/testfiles/w-ext-early.ddl
@@ -1,10 +1,3 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_ALLOC_EARLY...
-dimension 0: 10->13 (increases)
- Data:
- (10) 0, 1, 2
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) 0, 1
+dimension 0: 10->9 (decreases)
diff --git a/hl/tools/testfiles/w-ext-late.ddl b/hl/tools/testfiles/w-ext-late.ddl
index f3a7cf2..a6f8265 100644
--- a/hl/tools/testfiles/w-ext-late.ddl
+++ b/hl/tools/testfiles/w-ext-late.ddl
@@ -1,10 +1,2 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_ALLOC_LATE...
-dimension 0: 10->13 (increases)
- Data:
- (10) 0, 1, 2
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) 0, 1
diff --git a/hl/tools/testfiles/w-ext-one-d.ddl b/hl/tools/testfiles/w-ext-one-d.ddl
index 249ff95..673370a 100644
--- a/hl/tools/testfiles/w-ext-one-d.ddl
+++ b/hl/tools/testfiles/w-ext-one-d.ddl
@@ -1,6 +1,3 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_ONE...
dimension 0: 10->13 (increases)
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
diff --git a/hl/tools/testfiles/w-ext-one-simple.ddl b/hl/tools/testfiles/w-ext-one-simple.ddl
index 3bf0ef5..5df9dff 100644
--- a/hl/tools/testfiles/w-ext-one-simple.ddl
+++ b/hl/tools/testfiles/w-ext-one-simple.ddl
@@ -1,13 +1,6 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_ONE...
-dimension 0: 10->13 (increases)
- Data:
- 0
- 1
- 2
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
+dimension 0: 10->12 (increases)
Data:
0
1
diff --git a/hl/tools/testfiles/w-ext-one.ddl b/hl/tools/testfiles/w-ext-one.ddl
index c4e75eb..9604020 100644
--- a/hl/tools/testfiles/w-ext-one.ddl
+++ b/hl/tools/testfiles/w-ext-one.ddl
@@ -3,8 +3,3 @@ Monitoring dataset /DSET_ONE...
dimension 0: 10->13 (increases)
Data:
(10) 0, 1, 2
-dimension 0: 13->12 (decreases)
-dimension 0: 12->1 (decreases)
-dimension 0: 1->3 (increases)
- Data:
- (1) 0, 1
diff --git a/hl/tools/testfiles/w-ext-two-d.ddl b/hl/tools/testfiles/w-ext-two-d.ddl
index b71a9a6..5c7af0a 100644
--- a/hl/tools/testfiles/w-ext-two-d.ddl
+++ b/hl/tools/testfiles/w-ext-two-d.ddl
@@ -1,18 +1,4 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_TWO...
-dimension 0: 4->6 (increases)
-dimension 1: 10->12 (increases)
-dimension 0: 6->8 (increases)
-dimension 1: 12->1 (decreases)
-dimension 0: 8->10 (increases)
-dimension 1: 1->1 (unchanged)
-dimension 0: 10->3 (decreases)
-dimension 1: 1->3 (increases)
-dimension 0: 3->2 (decreases)
-dimension 1: 3->2 (decreases)
-dimension 0: 2->1 (decreases)
-dimension 1: 2->2 (unchanged)
-dimension 0: 1->1 (unchanged)
-dimension 1: 2->4 (increases)
-dimension 0: 1->1 (unchanged)
-dimension 1: 4->3 (decreases)
+dimension 0: 4->2 (decreases)
+dimension 1: 10->10 (unchanged)
diff --git a/hl/tools/testfiles/w-ext-two-width.ddl b/hl/tools/testfiles/w-ext-two-width.ddl
index 52903de..9f09fde 100644
--- a/hl/tools/testfiles/w-ext-two-width.ddl
+++ b/hl/tools/testfiles/w-ext-two-width.ddl
@@ -1,41 +1,9 @@
Opened "TEST.h5" with sec2 driver.
Monitoring dataset /DSET_TWO...
-dimension 0: 4->6 (increases)
+dimension 0: 4->4 (unchanged)
dimension 1: 10->12 (increases)
Data:
- (0,10) 1, 1
- (1,10) 1, 1
- (2,10) 1, 1
- (3,10) 1, 1
- (4,0) 1, 1, 1, 1, 1,
- (4,5) 1, 1, 1, 1, 1,
- (4,10) 1, 1,
- (5,0) 1, 1, 1, 1, 1,
- (5,5) 1, 1, 1, 1, 1,
- (5,10) 1, 1
-dimension 0: 6->8 (increases)
-dimension 1: 12->1 (decreases)
- Data:
- (6,0) 2,
- (7,0) 2
-dimension 0: 8->10 (increases)
-dimension 1: 1->1 (unchanged)
- Data:
- (8,0) 3,
- (9,0) 3
-dimension 0: 10->3 (decreases)
-dimension 1: 1->3 (increases)
- Data:
- (0,1) 4, 4
- (1,1) 4, 4
- (2,1) 4, 4
-dimension 0: 3->2 (decreases)
-dimension 1: 3->2 (decreases)
-dimension 0: 2->1 (decreases)
-dimension 1: 2->2 (unchanged)
-dimension 0: 1->1 (unchanged)
-dimension 1: 2->4 (increases)
- Data:
- (0,2) 7, 7
-dimension 0: 1->1 (unchanged)
-dimension 1: 4->3 (decreases)
+ (0,10) 0, 0
+ (1,10) 0, 0
+ (2,10) 0, 0
+ (3,10) 0, 0
diff --git a/hl/tools/testfiles/w-ext-two.ddl b/hl/tools/testfiles/w-ext-two.ddl
index 31df8d5..28ff075 100644
--- a/hl/tools/testfiles/w-ext-two.ddl
+++ b/hl/tools/testfiles/w-ext-two.ddl
@@ -3,35 +3,9 @@ Monitoring dataset /DSET_TWO...
dimension 0: 4->6 (increases)
dimension 1: 10->12 (increases)
Data:
- (0,10) 1, 1
- (1,10) 1, 1
- (2,10) 1, 1
- (3,10) 1, 1
- (4,0) 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
- (5,0) 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
-dimension 0: 6->8 (increases)
-dimension 1: 12->1 (decreases)
- Data:
- (6,0) 2,
- (7,0) 2
-dimension 0: 8->10 (increases)
-dimension 1: 1->1 (unchanged)
- Data:
- (8,0) 3,
- (9,0) 3
-dimension 0: 10->3 (decreases)
-dimension 1: 1->3 (increases)
- Data:
- (0,1) 4, 4
- (1,1) 4, 4
- (2,1) 4, 4
-dimension 0: 3->2 (decreases)
-dimension 1: 3->2 (decreases)
-dimension 0: 2->1 (decreases)
-dimension 1: 2->2 (unchanged)
-dimension 0: 1->1 (unchanged)
-dimension 1: 2->4 (increases)
- Data:
- (0,2) 7, 7
-dimension 0: 1->1 (unchanged)
-dimension 1: 4->3 (decreases)
+ (0,10) 2, 2
+ (1,10) 2, 2
+ (2,10) 2, 2
+ (3,10) 2, 2
+ (4,0) 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2,
+ (5,0) 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2, 2
diff --git a/java/CMakeLists.txt b/java/CMakeLists.txt
index 706b196..d37a409 100644
--- a/java/CMakeLists.txt
+++ b/java/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.1.0)
+cmake_minimum_required(VERSION 3.2.2)
PROJECT ( HDF5_JAVA C Java )
set (CMAKE_MODULE_PATH "${HDF_RESOURCES_DIR};${HDF_RESOURCES_EXT_DIR}")
@@ -15,9 +15,9 @@ INCLUDE_DIRECTORIES ( ${JNI_INCLUDE_DIRS} )
if (WIN32)
set (HDF_JRE_DIRECTORY "C:/Program Files/Java/jre8")
-else (WIN32)
+else ()
set (HDF_JRE_DIRECTORY "/usr/lib/jvm/jre")
-endif (WIN32)
+endif ()
#-----------------------------------------------------------------------------
# Include the main src and config directories
@@ -41,14 +41,14 @@ add_subdirectory (${HDF5_JAVA_SOURCE_DIR}/src ${HDF5_JAVA_BINARY_DIR}/src)
#-----------------------------------------------------------------------------
if (HDF5_BUILD_EXAMPLES)
add_subdirectory (${HDF5_JAVA_SOURCE_DIR}/examples ${HDF5_JAVA_BINARY_DIR}/examples)
-endif (HDF5_BUILD_EXAMPLES)
+endif ()
#-----------------------------------------------------------------------------
# Testing
#-----------------------------------------------------------------------------
if (BUILD_TESTING)
add_subdirectory (${HDF5_JAVA_SOURCE_DIR}/test ${HDF5_JAVA_BINARY_DIR}/test)
-endif (BUILD_TESTING)
+endif ()
#-----------------------------------------------------------------------------
# Add Required Jar(s)
@@ -72,4 +72,4 @@ if (HDF5_JAVA_PACK_JRE)
DESTINATION ${HDF5_INSTALL_BIN_DIR}
USE_SOURCE_PERMISSIONS
)
-endif (HDF5_JAVA_PACK_JRE)
+endif ()
diff --git a/java/COPYING b/java/COPYING
index 6903daf..6497ace 100644
--- a/java/COPYING
+++ b/java/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/java/Makefile.am b/java/Makefile.am
index 10b1e91..7d0e2f0 100644
--- a/java/Makefile.am
+++ b/java/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#
# This makefile mostly just reinvokes make in the various subdirectories
diff --git a/java/examples/CMakeLists.txt b/java/examples/CMakeLists.txt
index 0430bdb..a1a7483 100644
--- a/java/examples/CMakeLists.txt
+++ b/java/examples/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDFJAVA_EXAMPLES)
add_subdirectory (${HDFJAVA_EXAMPLES_SOURCE_DIR}/datasets datasets)
diff --git a/java/examples/Makefile.am b/java/examples/Makefile.am
index e685e01..8ca49f2 100644
--- a/java/examples/Makefile.am
+++ b/java/examples/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#
# This makefile mostly just reinvokes make in the various subdirectories
diff --git a/java/examples/datasets/CMakeLists.txt b/java/examples/datasets/CMakeLists.txt
index 3a69359..d698cfd 100644
--- a/java/examples/datasets/CMakeLists.txt
+++ b/java/examples/datasets/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDFJAVA_EXAMPLES_DATASETS Java)
set (CMAKE_VERBOSE_MAKEFILE 1)
@@ -31,16 +31,16 @@ set (HDF_JAVA_EXAMPLES
if (WIN32)
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ";")
-else (WIN32)
+else ()
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ":")
-endif (WIN32)
+endif ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_JARS}")
set (CMAKE_JAVA_CLASSPATH ".")
foreach (CMAKE_INCLUDE_PATH ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${CMAKE_INCLUDE_PATH}")
-endforeach (CMAKE_INCLUDE_PATH)
+endforeach ()
foreach (example ${HDF_JAVA_EXAMPLES})
file (WRITE ${PROJECT_BINARY_DIR}/${example}_Manifest.txt
@@ -52,23 +52,23 @@ foreach (example ${HDF_JAVA_EXAMPLES})
# install_jar (${example} ${HJAVA_INSTALL_DATA_DIR}/examples examples)
get_target_property (${example}_CLASSPATH ${example} CLASSDIR)
add_dependencies (${example} ${HDF5_JAVA_HDF5_LIB_TARGET})
-endforeach (example ${HDF_JAVA_EXAMPLES})
+endforeach ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_JARS};${HDF5_JAVA_LOGGING_JAR};${HDF5_JAVA_LOGGING_NOP_JAR}")
set (CMAKE_JAVA_CLASSPATH ".")
foreach (HDFJAVA_JAR ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${HDFJAVA_JAR}")
-endforeach (HDFJAVA_JAR)
+endforeach ()
MACRO (ADD_H5_TEST resultfile resultcode)
if (CMAKE_BUILD_TYPE MATCHES Debug)
if (WIN32)
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_D;")
- else()
+ else ()
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_debug;")
- endif()
- endif(CMAKE_BUILD_TYPE MATCHES Debug)
+ endif ()
+ endif ()
add_test (
NAME JAVA_datasets-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -85,16 +85,16 @@ MACRO (ADD_H5_TEST resultfile resultcode)
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_datasets-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "JAVA_datasets-${resultfile}")
-ENDMACRO (ADD_H5_TEST file)
+ENDMACRO ()
if (BUILD_TESTING)
# detect whether the encoder is present.
if (H5_HAVE_FILTER_DEFLATE)
set (USE_FILTER_DEFLATE "true")
- endif (H5_HAVE_FILTER_DEFLATE)
+ endif ()
if (H5_HAVE_FILTER_SZIP)
set (USE_FILTER_SZIP "true")
@@ -109,17 +109,17 @@ if (BUILD_TESTING)
${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5
${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.data
)
- else (${example} STREQUAL "H5Ex_D_External")
+ else ()
add_test (
NAME JAVA_datasets-${example}-clear-objects
COMMAND ${CMAKE_COMMAND}
-E remove
${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5
)
- endif (${example} STREQUAL "H5Ex_D_External")
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_datasets-${example}-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
add_test (
NAME JAVA_datasets-${example}-copy-objects
COMMAND ${CMAKE_COMMAND}
@@ -132,10 +132,10 @@ if (BUILD_TESTING)
if (${example} STREQUAL "H5Ex_D_Szip")
if (USE_FILTER_SZIP)
ADD_H5_TEST (${example} 0)
- endif (USE_FILTER_SZIP)
- else (${example} STREQUAL "H5Ex_D_Szip")
+ endif ()
+ else ()
ADD_H5_TEST (${example} 0)
- endif (${example} STREQUAL "H5Ex_D_Szip")
+ endif ()
- endforeach (example ${HDF_JAVA_EXAMPLES})
-endif (BUILD_TESTING)
+ endforeach ()
+endif ()
diff --git a/java/examples/datasets/H5Ex_D_Alloc.java b/java/examples/datasets/H5Ex_D_Alloc.java
index 69fee38..e40c042 100644
--- a/java/examples/datasets/H5Ex_D_Alloc.java
+++ b/java/examples/datasets/H5Ex_D_Alloc.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Checksum.java b/java/examples/datasets/H5Ex_D_Checksum.java
index 3a2f98f..9de09be 100644
--- a/java/examples/datasets/H5Ex_D_Checksum.java
+++ b/java/examples/datasets/H5Ex_D_Checksum.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Chunk.java b/java/examples/datasets/H5Ex_D_Chunk.java
index 7f02e5a..3d61e26 100644
--- a/java/examples/datasets/H5Ex_D_Chunk.java
+++ b/java/examples/datasets/H5Ex_D_Chunk.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
@@ -44,7 +42,7 @@ public class H5Ex_D_Chunk {
// Values for the status of space allocation
enum H5D_layout {
- H5D_LAYOUT_ERROR(-1), H5D_COMPACT(0), H5D_CONTIGUOUS(1), H5D_CHUNKED(2), H5D_NLAYOUTS(3);
+ H5D_LAYOUT_ERROR(-1), H5D_COMPACT(0), H5D_CONTIGUOUS(1), H5D_CHUNKED(2), H5D_VIRTUAL(3), H5D_NLAYOUTS(4);
private static final Map<Integer, H5D_layout> lookup = new HashMap<Integer, H5D_layout>();
static {
@@ -250,6 +248,9 @@ public class H5Ex_D_Chunk {
case H5D_CHUNKED:
System.out.println("H5D_CHUNKED");
break;
+ case H5D_VIRTUAL:
+ System.out.println("H5D_VIRTUAL");
+ break;
case H5D_LAYOUT_ERROR:
break;
case H5D_NLAYOUTS:
diff --git a/java/examples/datasets/H5Ex_D_Compact.java b/java/examples/datasets/H5Ex_D_Compact.java
index 4f1e2f0..17c09f5 100644
--- a/java/examples/datasets/H5Ex_D_Compact.java
+++ b/java/examples/datasets/H5Ex_D_Compact.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
@@ -38,7 +36,7 @@ public class H5Ex_D_Compact {
// Values for the status of space allocation
enum H5D_layout {
- H5D_LAYOUT_ERROR(-1), H5D_COMPACT(0), H5D_CONTIGUOUS(1), H5D_CHUNKED(2), H5D_NLAYOUTS(3);
+ H5D_LAYOUT_ERROR(-1), H5D_COMPACT(0), H5D_CONTIGUOUS(1), H5D_CHUNKED(2), H5D_VIRTUAL(3), H5D_NLAYOUTS(4);
private static final Map<Integer, H5D_layout> lookup = new HashMap<Integer, H5D_layout>();
static {
@@ -212,6 +210,9 @@ public class H5Ex_D_Compact {
case H5D_CHUNKED:
System.out.println("H5D_CHUNKED");
break;
+ case H5D_VIRTUAL:
+ System.out.println("H5D_VIRTUAL");
+ break;
case H5D_LAYOUT_ERROR:
break;
case H5D_NLAYOUTS:
diff --git a/java/examples/datasets/H5Ex_D_External.java b/java/examples/datasets/H5Ex_D_External.java
index 5fdc696..bf413ba 100644
--- a/java/examples/datasets/H5Ex_D_External.java
+++ b/java/examples/datasets/H5Ex_D_External.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_FillValue.java b/java/examples/datasets/H5Ex_D_FillValue.java
index 982d2cb..29cf4e1 100644
--- a/java/examples/datasets/H5Ex_D_FillValue.java
+++ b/java/examples/datasets/H5Ex_D_FillValue.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Gzip.java b/java/examples/datasets/H5Ex_D_Gzip.java
index b813367..50f8835 100644
--- a/java/examples/datasets/H5Ex_D_Gzip.java
+++ b/java/examples/datasets/H5Ex_D_Gzip.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Hyperslab.java b/java/examples/datasets/H5Ex_D_Hyperslab.java
index 482e2c0..88aa36e 100644
--- a/java/examples/datasets/H5Ex_D_Hyperslab.java
+++ b/java/examples/datasets/H5Ex_D_Hyperslab.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Nbit.java b/java/examples/datasets/H5Ex_D_Nbit.java
index f74b675..0263659 100644
--- a/java/examples/datasets/H5Ex_D_Nbit.java
+++ b/java/examples/datasets/H5Ex_D_Nbit.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_ReadWrite.java b/java/examples/datasets/H5Ex_D_ReadWrite.java
index de94ccb..49bc2e5 100644
--- a/java/examples/datasets/H5Ex_D_ReadWrite.java
+++ b/java/examples/datasets/H5Ex_D_ReadWrite.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Shuffle.java b/java/examples/datasets/H5Ex_D_Shuffle.java
index ac3c1b4..c7b7c53 100644
--- a/java/examples/datasets/H5Ex_D_Shuffle.java
+++ b/java/examples/datasets/H5Ex_D_Shuffle.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Sofloat.java b/java/examples/datasets/H5Ex_D_Sofloat.java
index 26c8d49..f0a437d 100644
--- a/java/examples/datasets/H5Ex_D_Sofloat.java
+++ b/java/examples/datasets/H5Ex_D_Sofloat.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Soint.java b/java/examples/datasets/H5Ex_D_Soint.java
index 7939883..fa4b416 100644
--- a/java/examples/datasets/H5Ex_D_Soint.java
+++ b/java/examples/datasets/H5Ex_D_Soint.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Szip.java b/java/examples/datasets/H5Ex_D_Szip.java
index 5258234..8106557 100644
--- a/java/examples/datasets/H5Ex_D_Szip.java
+++ b/java/examples/datasets/H5Ex_D_Szip.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_Transform.java b/java/examples/datasets/H5Ex_D_Transform.java
index 1f289f3..ada488a 100644
--- a/java/examples/datasets/H5Ex_D_Transform.java
+++ b/java/examples/datasets/H5Ex_D_Transform.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_UnlimitedAdd.java b/java/examples/datasets/H5Ex_D_UnlimitedAdd.java
index ada8df0..7e8ffaa 100644
--- a/java/examples/datasets/H5Ex_D_UnlimitedAdd.java
+++ b/java/examples/datasets/H5Ex_D_UnlimitedAdd.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_UnlimitedGzip.java b/java/examples/datasets/H5Ex_D_UnlimitedGzip.java
index c08ceef..42a6efd 100644
--- a/java/examples/datasets/H5Ex_D_UnlimitedGzip.java
+++ b/java/examples/datasets/H5Ex_D_UnlimitedGzip.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/H5Ex_D_UnlimitedMod.java b/java/examples/datasets/H5Ex_D_UnlimitedMod.java
index 884cad3..b38b233 100644
--- a/java/examples/datasets/H5Ex_D_UnlimitedMod.java
+++ b/java/examples/datasets/H5Ex_D_UnlimitedMod.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datasets/Makefile.am b/java/examples/datasets/Makefile.am
index 41ba6d1..18100a6 100644
--- a/java/examples/datasets/Makefile.am
+++ b/java/examples/datasets/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/java/examples/datasets/runExample.sh.in b/java/examples/datasets/runExample.sh.in
index 440de33..21c3439 100644
--- a/java/examples/datasets/runExample.sh.in
+++ b/java/examples/datasets/runExample.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
top_builddir=@top_builddir@
@@ -161,6 +159,8 @@ CLEAN_DATAFILES_AND_BLDDIR()
{
$RM $BLDDIR/examples.datasets.H5Ex_D_*.txt
$RM $BLDDIR/H5Ex_D_*.out
+ $RM $BLDDIR/H5Ex_D_*.h5
+ $RM $BLDDIR/H5Ex_D_External.data
}
# Print a line-line message left justified in a field of 70 characters
diff --git a/java/examples/datatypes/CMakeLists.txt b/java/examples/datatypes/CMakeLists.txt
index 3eae115..8569b8b 100644
--- a/java/examples/datatypes/CMakeLists.txt
+++ b/java/examples/datatypes/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDFJAVA_EXAMPLES_DATATYPES Java)
set (CMAKE_VERBOSE_MAKEFILE 1)
@@ -31,16 +31,16 @@ set (HDF_JAVA_EXAMPLES
if (WIN32)
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ";")
-else (WIN32)
+else ()
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ":")
-endif (WIN32)
+endif ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_JARS}")
set (CMAKE_JAVA_CLASSPATH ".")
foreach (CMAKE_INCLUDE_PATH ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${CMAKE_INCLUDE_PATH}")
-endforeach (CMAKE_INCLUDE_PATH)
+endforeach ()
foreach (example ${HDF_JAVA_EXAMPLES})
file (WRITE ${PROJECT_BINARY_DIR}/${example}_Manifest.txt
@@ -52,23 +52,23 @@ foreach (example ${HDF_JAVA_EXAMPLES})
# install_jar (${example} ${HJAVA_INSTALL_DATA_DIR}/examples examples)
get_target_property (${example}_CLASSPATH ${example} CLASSDIR)
add_dependencies (${example} ${HDF5_JAVA_HDF5_LIB_TARGET})
-endforeach (example ${HDF_JAVA_EXAMPLES})
+endforeach ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_JARS};${HDF5_JAVA_LOGGING_JAR};${HDF5_JAVA_LOGGING_NOP_JAR}")
set (CMAKE_JAVA_CLASSPATH ".")
foreach (HDFJAVA_JAR ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${HDFJAVA_JAR}")
-endforeach (HDFJAVA_JAR)
+endforeach ()
MACRO (ADD_H5_TEST resultfile resultcode)
if (CMAKE_BUILD_TYPE MATCHES Debug)
if (WIN32)
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_D;")
- else()
+ else ()
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_debug;")
- endif()
- endif(CMAKE_BUILD_TYPE MATCHES Debug)
+ endif ()
+ endif ()
add_test (
NAME JAVA_datatypes-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -85,9 +85,9 @@ MACRO (ADD_H5_TEST resultfile resultcode)
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_datatypes-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "JAVA_datatypes-${resultfile}")
-ENDMACRO (ADD_H5_TEST file)
+ENDMACRO ()
if (BUILD_TESTING)
foreach (example ${HDF_JAVA_EXAMPLES})
@@ -99,7 +99,7 @@ if (BUILD_TESTING)
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_datatypes-${example}-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
add_test (
NAME JAVA_datatypes-${example}-copy-objects
COMMAND ${CMAKE_COMMAND}
@@ -110,5 +110,5 @@ if (BUILD_TESTING)
set_tests_properties (JAVA_datatypes-${example}-copy-objects PROPERTIES DEPENDS JAVA_datatypes-${example}-clear-objects)
set (last_test "JAVA_datatypes-${example}-copy-objects")
ADD_H5_TEST (${example} 0)
- endforeach (example ${HDF_JAVA_EXAMPLES})
-endif (BUILD_TESTING)
+ endforeach ()
+endif ()
diff --git a/java/examples/datatypes/H5Ex_T_Array.java b/java/examples/datatypes/H5Ex_T_Array.java
index 7b7009a..f7f58d2 100644
--- a/java/examples/datatypes/H5Ex_T_Array.java
+++ b/java/examples/datatypes/H5Ex_T_Array.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_ArrayAttribute.java b/java/examples/datatypes/H5Ex_T_ArrayAttribute.java
index ce97457..b571f0c 100644
--- a/java/examples/datatypes/H5Ex_T_ArrayAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_ArrayAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_Bit.java b/java/examples/datatypes/H5Ex_T_Bit.java
index f76c7d5..e46f3b2 100644
--- a/java/examples/datatypes/H5Ex_T_Bit.java
+++ b/java/examples/datatypes/H5Ex_T_Bit.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_BitAttribute.java b/java/examples/datatypes/H5Ex_T_BitAttribute.java
index a5ab81b..43de4ea 100644
--- a/java/examples/datatypes/H5Ex_T_BitAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_BitAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_Commit.java b/java/examples/datatypes/H5Ex_T_Commit.java
index d4e43f9..4108979 100644
--- a/java/examples/datatypes/H5Ex_T_Commit.java
+++ b/java/examples/datatypes/H5Ex_T_Commit.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_Compound.java b/java/examples/datatypes/H5Ex_T_Compound.java
index f270cb9..c021c18 100644
--- a/java/examples/datatypes/H5Ex_T_Compound.java
+++ b/java/examples/datatypes/H5Ex_T_Compound.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_CompoundAttribute.java b/java/examples/datatypes/H5Ex_T_CompoundAttribute.java
index 25581d4..971939a 100644
--- a/java/examples/datatypes/H5Ex_T_CompoundAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_CompoundAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_Float.java b/java/examples/datatypes/H5Ex_T_Float.java
index 1b5fd9b..f677479 100644
--- a/java/examples/datatypes/H5Ex_T_Float.java
+++ b/java/examples/datatypes/H5Ex_T_Float.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_FloatAttribute.java b/java/examples/datatypes/H5Ex_T_FloatAttribute.java
index de1dac7..2e706d9 100644
--- a/java/examples/datatypes/H5Ex_T_FloatAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_FloatAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_Integer.java b/java/examples/datatypes/H5Ex_T_Integer.java
index 2f365cd..56da623 100644
--- a/java/examples/datatypes/H5Ex_T_Integer.java
+++ b/java/examples/datatypes/H5Ex_T_Integer.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_IntegerAttribute.java b/java/examples/datatypes/H5Ex_T_IntegerAttribute.java
index c153d99..9de517c 100644
--- a/java/examples/datatypes/H5Ex_T_IntegerAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_IntegerAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_ObjectReference.java b/java/examples/datatypes/H5Ex_T_ObjectReference.java
index 8a09f5b..8ce4f7b 100644
--- a/java/examples/datatypes/H5Ex_T_ObjectReference.java
+++ b/java/examples/datatypes/H5Ex_T_ObjectReference.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java b/java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java
index e366681..4dc3677 100644
--- a/java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_ObjectReferenceAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_Opaque.java b/java/examples/datatypes/H5Ex_T_Opaque.java
index eb45686..6b0dc63 100644
--- a/java/examples/datatypes/H5Ex_T_Opaque.java
+++ b/java/examples/datatypes/H5Ex_T_Opaque.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_OpaqueAttribute.java b/java/examples/datatypes/H5Ex_T_OpaqueAttribute.java
index e42bfe8..6b8d1f8 100644
--- a/java/examples/datatypes/H5Ex_T_OpaqueAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_OpaqueAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_String.java b/java/examples/datatypes/H5Ex_T_String.java
index 469172d..7c190b7 100644
--- a/java/examples/datatypes/H5Ex_T_String.java
+++ b/java/examples/datatypes/H5Ex_T_String.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_StringAttribute.java b/java/examples/datatypes/H5Ex_T_StringAttribute.java
index 49361bc..f9ec155 100644
--- a/java/examples/datatypes/H5Ex_T_StringAttribute.java
+++ b/java/examples/datatypes/H5Ex_T_StringAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/H5Ex_T_VLString.java b/java/examples/datatypes/H5Ex_T_VLString.java
index c8892ba..39be3e5 100644
--- a/java/examples/datatypes/H5Ex_T_VLString.java
+++ b/java/examples/datatypes/H5Ex_T_VLString.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/datatypes/Makefile.am b/java/examples/datatypes/Makefile.am
index 7d95a56..71f7769 100644
--- a/java/examples/datatypes/Makefile.am
+++ b/java/examples/datatypes/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/java/examples/datatypes/runExample.sh.in b/java/examples/datatypes/runExample.sh.in
index 1b02774..32e9ade 100644
--- a/java/examples/datatypes/runExample.sh.in
+++ b/java/examples/datatypes/runExample.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
top_builddir=@top_builddir@
@@ -158,6 +156,7 @@ CLEAN_DATAFILES_AND_BLDDIR()
{
$RM $BLDDIR/examples.datatypes.H5Ex_T_*.txt
$RM $BLDDIR/H5Ex_T_*.out
+ $RM $BLDDIR/H5Ex_T_*.h5
}
# Print a line-line message left justified in a field of 70 characters
diff --git a/java/examples/groups/CMakeLists.txt b/java/examples/groups/CMakeLists.txt
index 2cab211..bb5f80f 100644
--- a/java/examples/groups/CMakeLists.txt
+++ b/java/examples/groups/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDFJAVA_EXAMPLES_GROUPS Java)
set (CMAKE_VERBOSE_MAKEFILE 1)
@@ -20,16 +20,16 @@ set (HDF_JAVA_EXAMPLES
if (WIN32)
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ";")
-else (WIN32)
+else ()
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ":")
-endif (WIN32)
+endif ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_JARS}")
set (CMAKE_JAVA_CLASSPATH ".")
foreach (CMAKE_INCLUDE_PATH ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${CMAKE_INCLUDE_PATH}")
-endforeach (CMAKE_INCLUDE_PATH)
+endforeach ()
foreach (example ${HDF_JAVA_EXAMPLES})
file (WRITE ${PROJECT_BINARY_DIR}/${example}_Manifest.txt
@@ -41,14 +41,14 @@ foreach (example ${HDF_JAVA_EXAMPLES})
# install_jar (${example} ${HJAVA_INSTALL_DATA_DIR}/examples examples)
get_target_property (${example}_CLASSPATH ${example} CLASSDIR)
add_dependencies (${example} ${HDF5_JAVA_HDF5_LIB_TARGET})
-endforeach (example ${HDF_JAVA_EXAMPLES})
+endforeach ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_JARS};${HDF5_JAVA_LOGGING_JAR};${HDF5_JAVA_LOGGING_NOP_JAR}")
set (CMAKE_JAVA_CLASSPATH ".")
foreach (HDFJAVA_JAR ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${HDFJAVA_JAR}")
-endforeach (HDFJAVA_JAR)
+endforeach ()
set (HDF_JAVA_TEST_FILES
h5ex_g_iterate.h5
@@ -57,17 +57,17 @@ set (HDF_JAVA_TEST_FILES
foreach (h5_file ${HDF_JAVA_TEST_FILES})
HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/${h5_file}" "${PROJECT_BINARY_DIR}/${h5_file}" "H5Ex_G_Visit_files")
-endforeach (h5_file ${HDF_JAVA_TEST_FILES})
+endforeach ()
add_custom_target(H5Ex_G_Visit_files ALL COMMENT "Copying files needed by H5Ex_G_Visit tests" DEPENDS ${H5Ex_G_Visit_files_list})
MACRO (ADD_H5_TEST resultfile resultcode)
if (CMAKE_BUILD_TYPE MATCHES Debug)
if (WIN32)
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_D;")
- else()
+ else ()
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_debug;")
- endif()
- endif(CMAKE_BUILD_TYPE MATCHES Debug)
+ endif ()
+ endif ()
add_test (
NAME JAVA_groups-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -84,9 +84,9 @@ MACRO (ADD_H5_TEST resultfile resultcode)
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_groups-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "JAVA_groups-${resultfile}")
-ENDMACRO (ADD_H5_TEST file)
+ENDMACRO ()
if (BUILD_TESTING)
foreach (example ${HDF_JAVA_EXAMPLES})
@@ -99,19 +99,19 @@ if (BUILD_TESTING)
${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}1.h5
${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}2.h5
)
- else (${example} STREQUAL "H5Ex_G_Compact")
+ else ()
add_test (
NAME JAVA_groups-${example}-clear-h5s
COMMAND ${CMAKE_COMMAND}
-E remove
${HDFJAVA_EXAMPLES_BINARY_DIR}/${example}.h5
)
- endif (${example} STREQUAL "H5Ex_G_Compact")
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_groups-${example}-clear-h5s PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "JAVA_groups-${example}-clear-h5s")
- endif (NOT ${example} STREQUAL "H5Ex_G_Iterate" AND NOT ${example} STREQUAL "H5Ex_G_Visit")
+ endif ()
add_test (
NAME JAVA_groups-${example}-copy-objects
COMMAND ${CMAKE_COMMAND}
@@ -121,8 +121,8 @@ if (BUILD_TESTING)
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_groups-${example}-copy-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "JAVA_groups-${example}-copy-objects")
ADD_H5_TEST (${example} 0)
- endforeach (example ${HDF_JAVA_EXAMPLES})
-endif (BUILD_TESTING)
+ endforeach ()
+endif ()
diff --git a/java/examples/groups/H5Ex_G_Compact.java b/java/examples/groups/H5Ex_G_Compact.java
index ca9b6c8..7e20c2a 100644
--- a/java/examples/groups/H5Ex_G_Compact.java
+++ b/java/examples/groups/H5Ex_G_Compact.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/groups/H5Ex_G_Corder.java b/java/examples/groups/H5Ex_G_Corder.java
index 95790bf..53d0011 100644
--- a/java/examples/groups/H5Ex_G_Corder.java
+++ b/java/examples/groups/H5Ex_G_Corder.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
Creating a file with creation properties and traverse the
diff --git a/java/examples/groups/H5Ex_G_Create.java b/java/examples/groups/H5Ex_G_Create.java
index 1902d86..0e729d5 100644
--- a/java/examples/groups/H5Ex_G_Create.java
+++ b/java/examples/groups/H5Ex_G_Create.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/groups/H5Ex_G_Intermediate.java b/java/examples/groups/H5Ex_G_Intermediate.java
index a3d620b..f7d5a50 100644
--- a/java/examples/groups/H5Ex_G_Intermediate.java
+++ b/java/examples/groups/H5Ex_G_Intermediate.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/groups/H5Ex_G_Iterate.java b/java/examples/groups/H5Ex_G_Iterate.java
index d0b4ea4..3c9ca82 100644
--- a/java/examples/groups/H5Ex_G_Iterate.java
+++ b/java/examples/groups/H5Ex_G_Iterate.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/groups/H5Ex_G_Phase.java b/java/examples/groups/H5Ex_G_Phase.java
index f23d6f2..bfb775b 100644
--- a/java/examples/groups/H5Ex_G_Phase.java
+++ b/java/examples/groups/H5Ex_G_Phase.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/groups/H5Ex_G_Traverse.java b/java/examples/groups/H5Ex_G_Traverse.java
index b00fe97..2a2cba3 100644
--- a/java/examples/groups/H5Ex_G_Traverse.java
+++ b/java/examples/groups/H5Ex_G_Traverse.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/groups/H5Ex_G_Visit.java b/java/examples/groups/H5Ex_G_Visit.java
index c2367a6..f91c707 100644
--- a/java/examples/groups/H5Ex_G_Visit.java
+++ b/java/examples/groups/H5Ex_G_Visit.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/groups/Makefile.am b/java/examples/groups/Makefile.am
index be15b42..08721e0 100644
--- a/java/examples/groups/Makefile.am
+++ b/java/examples/groups/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/java/examples/groups/runExample.sh.in b/java/examples/groups/runExample.sh.in
index 665b307..3732e7f 100644
--- a/java/examples/groups/runExample.sh.in
+++ b/java/examples/groups/runExample.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
top_builddir=@top_builddir@
@@ -153,6 +151,14 @@ CLEAN_DATAFILES_AND_BLDDIR()
{
$RM $BLDDIR/examples.groups.H5Ex_G_*.txt
$RM $BLDDIR/H5Ex_G_*.out
+ $RM $BLDDIR/H5Ex_G_*.h5
+ SDIR=`$DIRNAME $tstfile`
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $BLDDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then SDIR=`$DIRNAME $tstfile`
+ $RM $BLDDIR/h5ex_g_iterate.h5
+ $RM $BLDDIR/h5ex_g_visit.h5
+ fi
}
COPY_REFFILES="$LIST_REF_FILES"
diff --git a/java/examples/intro/CMakeLists.txt b/java/examples/intro/CMakeLists.txt
index 2d0c218..9dc4dd7 100644
--- a/java/examples/intro/CMakeLists.txt
+++ b/java/examples/intro/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDFJAVA_EXAMPLES_INTRO Java)
set (CMAKE_VERBOSE_MAKEFILE 1)
@@ -20,16 +20,16 @@ set (HDF_JAVA_EXAMPLES
if (WIN32)
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ";")
-else (WIN32)
+else ()
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ":")
-endif (WIN32)
+endif ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_JARS}")
set (CMAKE_JAVA_CLASSPATH ".")
foreach (CMAKE_INCLUDE_PATH ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${CMAKE_INCLUDE_PATH}")
-endforeach (CMAKE_INCLUDE_PATH)
+endforeach ()
foreach (example ${HDF_JAVA_EXAMPLES})
file (WRITE ${PROJECT_BINARY_DIR}/${example}_Manifest.txt
@@ -41,14 +41,14 @@ foreach (example ${HDF_JAVA_EXAMPLES})
# install_jar (${example} ${HJAVA_INSTALL_DATA_DIR}/examples examples)
get_target_property (${example}_CLASSPATH ${example} CLASSDIR)
add_dependencies (${example} ${HDF5_JAVA_HDF5_LIB_TARGET})
-endforeach (example ${HDF_JAVA_EXAMPLES})
+endforeach ()
set (CMAKE_JAVA_INCLUDE_PATH "${HDF5_JAVA_JARS};${HDF5_JAVA_LOGGING_JAR};${HDF5_JAVA_LOGGING_NOP_JAR}")
set (CMAKE_JAVA_CLASSPATH ".")
foreach (HDFJAVA_JAR ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${HDFJAVA_JAR}")
-endforeach (HDFJAVA_JAR)
+endforeach ()
foreach (example ${HDF_JAVA_OBJECT_EXAMPLES})
file (WRITE ${PROJECT_BINARY_DIR}/${example}_Manifest.txt
@@ -60,16 +60,16 @@ foreach (example ${HDF_JAVA_OBJECT_EXAMPLES})
# install_jar (${example} ${HJAVA_INSTALL_DATA_DIR}/examples examples)
get_target_property (${example}_CLASSPATH ${example} CLASSDIR)
add_dependencies (${example} ${HDFJAVA_H5_LIB_TARGET})
-endforeach (example ${HDF_JAVA_OBJECT_EXAMPLES})
+endforeach ()
MACRO (ADD_H5_TEST resultfile resultcode)
if (CMAKE_BUILD_TYPE MATCHES Debug)
if (WIN32)
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_D;")
- else()
+ else ()
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_debug;")
- endif()
- endif(CMAKE_BUILD_TYPE MATCHES Debug)
+ endif ()
+ endif ()
add_test (
NAME JAVA_intro-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -86,9 +86,9 @@ MACRO (ADD_H5_TEST resultfile resultcode)
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_intro-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "JAVA_intro-${resultfile}")
-ENDMACRO (ADD_H5_TEST file)
+ENDMACRO ()
if (BUILD_TESTING)
@@ -101,7 +101,7 @@ if (BUILD_TESTING)
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (JAVA_intro-${example}-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
add_test (
NAME JAVA_intro-${example}-copy-objects
COMMAND ${CMAKE_COMMAND}
@@ -112,6 +112,6 @@ if (BUILD_TESTING)
set_tests_properties (JAVA_intro-${example}-copy-objects PROPERTIES DEPENDS JAVA_intro-${example}-clear-objects)
set (last_test "JAVA_intro-${example}-copy-objects")
ADD_H5_TEST (${example} 0)
- endforeach (example ${HDF_JAVA_EXAMPLES})
+ endforeach ()
-endif (BUILD_TESTING)
+endif ()
diff --git a/java/examples/intro/H5_CreateAttribute.java b/java/examples/intro/H5_CreateAttribute.java
index 16c53d6..68749b1 100644
--- a/java/examples/intro/H5_CreateAttribute.java
+++ b/java/examples/intro/H5_CreateAttribute.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/intro/H5_CreateDataset.java b/java/examples/intro/H5_CreateDataset.java
index a16cfe6..3572a31 100644
--- a/java/examples/intro/H5_CreateDataset.java
+++ b/java/examples/intro/H5_CreateDataset.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/intro/H5_CreateFile.java b/java/examples/intro/H5_CreateFile.java
index eb9f277..a8c87ea 100644
--- a/java/examples/intro/H5_CreateFile.java
+++ b/java/examples/intro/H5_CreateFile.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/intro/H5_CreateGroup.java b/java/examples/intro/H5_CreateGroup.java
index 36bd49a..9359605 100644
--- a/java/examples/intro/H5_CreateGroup.java
+++ b/java/examples/intro/H5_CreateGroup.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/intro/H5_CreateGroupAbsoluteRelative.java b/java/examples/intro/H5_CreateGroupAbsoluteRelative.java
index e0127cc..ddc069f 100644
--- a/java/examples/intro/H5_CreateGroupAbsoluteRelative.java
+++ b/java/examples/intro/H5_CreateGroupAbsoluteRelative.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/intro/H5_CreateGroupDataset.java b/java/examples/intro/H5_CreateGroupDataset.java
index 0607bbd..bdb0546 100644
--- a/java/examples/intro/H5_CreateGroupDataset.java
+++ b/java/examples/intro/H5_CreateGroupDataset.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/intro/H5_ReadWrite.java b/java/examples/intro/H5_ReadWrite.java
index 0d73884..5a7dabc 100644
--- a/java/examples/intro/H5_ReadWrite.java
+++ b/java/examples/intro/H5_ReadWrite.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/************************************************************
diff --git a/java/examples/intro/Makefile.am b/java/examples/intro/Makefile.am
index 3d5757a..95743d9 100644
--- a/java/examples/intro/Makefile.am
+++ b/java/examples/intro/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/java/examples/intro/runExample.sh.in b/java/examples/intro/runExample.sh.in
index a8e65ff..4533a1b 100644
--- a/java/examples/intro/runExample.sh.in
+++ b/java/examples/intro/runExample.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
top_builddir=@top_builddir@
@@ -147,6 +145,7 @@ CLEAN_DATAFILES_AND_BLDDIR()
{
$RM $BLDDIR/examples.intro.H5_*.txt
$RM $BLDDIR/H5_*.out
+ $RM $BLDDIR/H5_*.h5
}
# Print a line-line message left justified in a field of 70 characters
diff --git a/java/src/CMakeLists.txt b/java/src/CMakeLists.txt
index ae78201..3f93f39 100644
--- a/java/src/CMakeLists.txt
+++ b/java/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required(VERSION 3.1.0)
+cmake_minimum_required(VERSION 3.2.2)
PROJECT ( HDF5_JAVA_SRC C Java )
#-----------------------------------------------------------------------------
diff --git a/java/src/Makefile.am b/java/src/Makefile.am
index 21aa5ec..64eef0c 100644
--- a/java/src/Makefile.am
+++ b/java/src/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#
# This makefile mostly just reinvokes make in the various subdirectories
@@ -130,6 +128,7 @@ CLEANFILES = classhdf5_java.stamp $(jarfile) $(JAVAROOT)/$(pkgpath)/callbacks/*.
clean:
rm -rf $(JAVAROOT)/*
rm -f $(jarfile)
+ rm -rf javadoc
rm -f classhdf5_java.stamp
diff --git a/java/src/hdf/CMakeLists.txt b/java/src/hdf/CMakeLists.txt
index f465da9..84bda53 100644
--- a/java/src/hdf/CMakeLists.txt
+++ b/java/src/hdf/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_JAVA_HDF)
add_subdirectory (${HDF5_JAVA_HDF_SOURCE_DIR}/hdf5lib hdf5lib)
diff --git a/java/src/hdf/hdf5lib/CMakeLists.txt b/java/src/hdf/hdf5lib/CMakeLists.txt
index 5362c44..c856c04 100644
--- a/java/src/hdf/hdf5lib/CMakeLists.txt
+++ b/java/src/hdf/hdf5lib/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_JAVA_HDF_HDF5 Java)
set (CMAKE_VERBOSE_MAKEFILE 1)
@@ -105,7 +105,7 @@ add_jar (${HDF5_JAVA_HDF5_LIB_TARGET} OUTPUT_NAME "${HDF5_JAVA_HDF5_LIB_TARGET}-
install_jar (${HDF5_JAVA_HDF5_LIB_TARGET} LIBRARY DESTINATION ${HDF5_INSTALL_JAR_DIR} COMPONENT libraries)
#if (NOT WIN32)
# install_jni_symlink (${HDF5_JAVA_HDF5_LIB_TARGET} ${HDF5_INSTALL_JAR_DIR} libraries)
-#endif (NOT WIN32)
+#endif ()
get_target_property (${HDF5_JAVA_HDF5_LIB_TARGET}_JAR_FILE ${HDF5_JAVA_HDF5_LIB_TARGET} JAR_FILE)
SET_GLOBAL_VARIABLE (HDF5_JAVA_JARS_TO_EXPORT "${HDF5_JAVA_JARS_TO_EXPORT};${${HDF5_JAVA_HDF5_LIB_TARGET}_JAR_FILE}")
diff --git a/java/src/hdf/hdf5lib/H5.java b/java/src/hdf/hdf5lib/H5.java
index 6b2af4b..7b5b5c9 100644
--- a/java/src/hdf/hdf5lib/H5.java
+++ b/java/src/hdf/hdf5lib/H5.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -151,7 +149,7 @@ import hdf.hdf5lib.structs.H5O_info_t;
* layout of the source and destination, and the data for the array passed as a block of bytes, for instance,
*
* <pre>
- * herr_t H5Dread(int fid, int filetype, int memtype, int memspace,
+ * herr_t H5Dread(long fid, long filetype, long memtype, long memspace,
* void * data);
* </pre>
*
@@ -171,7 +169,7 @@ import hdf.hdf5lib.structs.H5O_info_t;
* library. So the function above would be declared:
*
* <pre>
- * public synchronized static native int H5Dread(int fid, int filetype, int memtype, int memspace, Object data);
+ * public synchronized static native int H5Dread(long fid, long filetype, long memtype, long memspace, Object data);
* </pre>
* OPEN_IDS.addElement(id);
@@ -192,7 +190,7 @@ import hdf.hdf5lib.structs.H5O_info_t;
* can be accessed as public variables of the Java class, such as:
*
* <pre>
- * int data_type = HDF5CDataTypes.JH5T_NATIVE_INT;
+ * long data_type = HDF5CDataTypes.JH5T_NATIVE_INT;
* </pre>
*
* The Java application uses both types of constants the same way, the only difference is that the
@@ -216,7 +214,7 @@ import hdf.hdf5lib.structs.H5O_info_t;
* exception handlers to print out the HDF-5 error stack.
* <hr>
*
- * @version HDF5 1.9 <BR>
+ * @version HDF5 1.11.0 <BR>
* <b>See also: <a href ="./hdf.hdf5lib.HDFArray.html"> hdf.hdf5lib.HDFArray</a> </b><BR>
* <a href ="./hdf.hdf5lib.HDF5Constants.html"> hdf.hdf5lib.HDF5Constants</a><BR>
* <a href ="./hdf.hdf5lib.HDF5CDataTypes.html"> hdf.hdf5lib.HDF5CDataTypes</a><BR>
@@ -239,7 +237,7 @@ public class H5 implements java.io.Serializable {
*
* Make sure to update the versions number when a different library is used.
*/
- public final static int LIB_VERSION[] = { 1, 9, 9999 };
+ public final static int LIB_VERSION[] = { 1, 11, 0 };
public final static String H5PATH_PROPERTY_KEY = "hdf.hdf5lib.H5.hdf5lib";
@@ -1798,6 +1796,9 @@ public class H5 implements java.io.Serializable {
return H5Dread_short(dataset_id, mem_type_id, mem_space_id, file_space_id, xfer_plist_id, buf, true);
}
+ public synchronized static native int H5DreadVL(long dataset_id, long mem_type_id, long mem_space_id,
+ long file_space_id, long xfer_plist_id, Object[] buf) throws HDF5LibraryException, NullPointerException;
+
public synchronized static native int H5Dread_string(long dataset_id, long mem_type_id, long mem_space_id,
long file_space_id, long xfer_plist_id, String[] buf) throws HDF5LibraryException, NullPointerException;
@@ -2892,7 +2893,8 @@ public class H5 implements java.io.Serializable {
* @param file_id
* IN: Identifier of the target file.
*
- * @param mdc_logging_status, the status
+ * @param mdc_logging_status
+ * the status
* mdc_logging_status[0] = is_enabled, whether logging is enabled
* mdc_logging_status[1] = is_currently_logging, whether events are currently being logged
*
@@ -5356,39 +5358,24 @@ public class H5 implements java.io.Serializable {
throws HDF5LibraryException, IllegalArgumentException;
/**
- * H5Pset_file_space sets the file space management strategy for the file associated with fcpl_id to strategy.
+ * H5Pset_file_space_strategy sets the file space management strategy for the file associated with fcpl_id to strategy.
* There are four strategies that applications can select and they are described in the Parameters section.
*
* @param fcpl_id
* IN: File creation property list identifier
* @param strategy
* IN: The strategy for file space management.
- * Passing a value of zero (0) indicates that the value of strategy is not to be modified.
- * H5F_FILE_SPACE_ALL_PERSIST
- * With this strategy, the free-space managers track the free space that results from the
- * manipulation of HDF5 objects in the HDF5 file. The free space information is saved when the
- * file is closed, and reloaded when the file is reopened. When space is needed for file metadata
- * or raw data, the HDF5 library first requests space from the library's free-space managers.
- * If the request is not satisfied, the library requests space from the aggregators. If the request
- * is still not satisfied, the library requests space from the virtual file driver. That is, the
- * library will use all of the mechanisms for allocating space.
- * H5F_FILE_SPACE_ALL (Default file space management strategy)
- * With this strategy, the free-space managers track the free space that results from the manipulation
- * of HDF5 objects in the HDF5 file. The free space information is NOT saved when the file is closed
- * and the free space that exists upon file closing becomes unaccounted space in the file.
- * Like the previous strategy, the library will try all of the mechanisms for allocating space. When
- * space is needed for file metadata or raw data, the library first requests space from the free-space
- * managers. If the request is not satisfied, the library requests space from the aggregators. If the
- * request is still not satisfied, the library requests space from the virtual file driver.
- * H5F_FILE_SPACE_AGGR_VFD
- * With this strategy, the library does not track free space that results from the manipulation of HDF5
- * obejcts in the HDF5 file and the free space becomes unaccounted space in the file.
- * When space is needed for file metadata or raw data, the library first requests space from the
- * aggregators. If the request is not satisfied, the library requests space from the virtual file driver.
- * H5F_FILE_SPACE_VFD
- * With this strategy, the library does not track free space that results from the manipulation of HDF5
- * obejcts in the HDF5 file and the free space becomes unaccounted space in the file.
- * When space is needed for file metadata or raw data, the library requests space from the virtual file driver.
+ * H5F_FSPACE_STRATEGY_FSM_AGGR
+ * Mechanisms: free-space managers, aggregators, and virtual file drivers
+ * This is the library default when not set.
+ * H5F_FSPACE_STRATEGY_PAGE
+ * Mechanisms: free-space managers with embedded paged aggregation and virtual file drivers
+ * H5F_FSPACE_STRATEGY_AGGR
+ * Mechanisms: aggregators and virtual file drivers
+ * H5F_FSPACE_STRATEGY_NONE
+ * Mechanisms: virtual file drivers
+ * @param persist
+ * IN: True to persist free-space.
* @param threshold
* IN: The free-space section threshold. The library default is 1, which is to track all free-space sections.
* Passing a value of zero (0) indicates that the value of threshold is not to be modified.
@@ -5399,28 +5386,100 @@ public class H5 implements java.io.Serializable {
* - Invalid values of max_list and min_btree.
*
**/
- public synchronized static native void H5Pset_file_space(long fcpl_id, int strategy, long threshold)
+ public synchronized static native void H5Pset_file_space_strategy(long fcpl_id, int strategy, boolean persist, long threshold)
throws HDF5LibraryException, IllegalArgumentException;
/**
- * H5Pget_file_space provides the means for applications to manage the HDF5 file's file space for their specific needs.
+ * H5Pget_file_space_strategy provides the means for applications to manage the HDF5 file's file space strategy for their specific needs.
*
* @param fcpl_id
* IN: File creation property list identifier
- * @param strategy
- * IN/OUT: The current file space management strategy in use for the file. NULL, strategy not queried.
+ * @param persist
+ * IN/OUT: The current free-space persistence. NULL, persist not queried.
* @param threshold
* IN/OUT: The current free-space section threshold. NULL, threshold not queried.
*
+ * @return the current free-space strategy.
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ * @exception IllegalArgumentException
+ * - Invalid values of max_list and min_btree.
+ *
+ **/
+ public synchronized static native int H5Pget_file_space_strategy(long fcpl_id, boolean[] persist, long[] threshold)
+ throws HDF5LibraryException, IllegalArgumentException;
+
+ /**
+ * H5Pget_file_space_strategy_persist provides the means for applications to manage the HDF5 file's file space strategy for their specific needs.
+ *
+ * @param fcpl_id
+ * IN: File creation property list identifier
+ *
+ * @return the current free-space persistence.
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ * @exception IllegalArgumentException
+ * - Invalid values of max_list and min_btree.
+ *
+ **/
+ public synchronized static native boolean H5Pget_file_space_strategy_persist(long fcpl_id)
+ throws HDF5LibraryException, IllegalArgumentException;
+
+ /**
+ * H5Pget_file_space_strategy_threshold provides the means for applications to manage the HDF5 file's file space strategy for their specific needs.
+ *
+ * @param fcpl_id
+ * IN: File creation property list identifier
+ *
+ * @return the current free-space section threshold.
+ *
* @exception HDF5LibraryException
* - Error from the HDF-5 Library.
* @exception IllegalArgumentException
* - Invalid values of max_list and min_btree.
*
**/
- public synchronized static native void H5Pget_file_space(long fcpl_id, int[] strategy, long[] threshold)
+ public synchronized static native long H5Pget_file_space_strategy_threshold(long fcpl_id)
throws HDF5LibraryException, IllegalArgumentException;
+ /**
+ * H5Pset_file_space_page_size retrieves the file space page size for aggregating small metadata or raw data.
+ *
+ * @param fcpl_id
+ * IN: File creation property list identifier
+ * @param page_size
+ * IN: the file space page size.
+ *
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ * @exception IllegalArgumentException
+ * - Invalid values of max_list and min_btree.
+ *
+ **/
+ public synchronized static native void H5Pset_file_space_page_size(long fcpl_id, long page_size)
+ throws HDF5LibraryException, IllegalArgumentException;
+
+ /**
+ * H5Pget_file_space_page_size Sets the file space page size for paged aggregation.
+ *
+ * @param fcpl_id
+ * IN: File creation property list identifier
+ *
+ * @return the current file space page size.
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ * @exception IllegalArgumentException
+ * - Invalid values of max_list and min_btree.
+ *
+ **/
+ public synchronized static native long H5Pget_file_space_page_size(long fcpl_id)
+ throws HDF5LibraryException, IllegalArgumentException;
+
+
// File access property list (FAPL) routines
/**
@@ -5753,7 +5812,8 @@ public class H5 implements java.io.Serializable {
*
* @param fapl_id
* IN: File access property list identifier
- * @param mdc_log_options, the options
+ * @param mdc_log_options
+ * the options
* mdc_logging_options[0] = is_enabled, whether logging is enabled
* mdc_logging_options[1] = start_on_access, whether the logging begins as soon as the file is opened or created
*
@@ -7145,6 +7205,88 @@ public class H5 implements java.io.Serializable {
**/
public synchronized static native int H5PLget_loading_state() throws HDF5LibraryException;
+ /**
+ * H5PLappend inserts the plugin path at the end of the table.
+ *
+ * @param plugin_path
+ * IN: Path for location of filter plugin libraries.
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ **/
+ public synchronized static native void H5PLappend(String plugin_path) throws HDF5LibraryException;
+
+ /**
+ * H5PLprepend inserts the plugin path at the beginning of the table.
+ *
+ * @param plugin_path
+ * IN: Path for location of filter plugin libraries.
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ **/
+ public synchronized static native void H5PLprepend(String plugin_path) throws HDF5LibraryException;
+
+ /**
+ * H5PLreplace replaces the plugin path at the specified index.
+ *
+ * @param plugin_path
+ * IN: Path for location of filter plugin libraries.
+ * @param index
+ * IN: The table index (0-based).
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ **/
+ public synchronized static native void H5PLreplace(String plugin_path, int index) throws HDF5LibraryException;
+
+ /**
+ * H5PLinsert inserts the plugin path at the specified index.
+ *
+ * @param plugin_path
+ * IN: Path for location of filter plugin libraries.
+ * @param index
+ * IN: The table index (0-based).
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ **/
+ public synchronized static native void H5PLinsert(String plugin_path, int index) throws HDF5LibraryException;
+
+ /**
+ * H5PLremove removes the plugin path at the specified index.
+ *
+ * @param index
+ * IN: The table index (0-based).
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ **/
+ public synchronized static native void H5PLremove(int index) throws HDF5LibraryException;
+
+ /**
+ * H5PLget retrieves the plugin path at the specified index.
+ *
+ * @param index
+ * IN: The table index (0-based).
+ *
+ * @return the current path at the index in plugin path table
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ **/
+ public synchronized static native String H5PLget(int index) throws HDF5LibraryException;
+
+ /**
+ * H5PLsize retrieves the size of the current list of plugin paths.
+ *
+ * @return the current number of paths in the plugin path table
+ *
+ * @exception HDF5LibraryException
+ * - Error from the HDF-5 Library.
+ **/
+ public synchronized static native int H5PLsize() throws HDF5LibraryException;
+
// ////////////////////////////////////////////////////////////
// //
// H5R: HDF5 1.8 Reference API Functions //
diff --git a/java/src/hdf/hdf5lib/HDF5Constants.java b/java/src/hdf/hdf5lib/HDF5Constants.java
index acd3eb5..8a3d6c8 100644
--- a/java/src/hdf/hdf5lib/HDF5Constants.java
+++ b/java/src/hdf/hdf5lib/HDF5Constants.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -219,12 +217,12 @@ public class HDF5Constants {
public static final int H5F_SCOPE_GLOBAL = H5F_SCOPE_GLOBAL();
public static final int H5F_SCOPE_LOCAL = H5F_SCOPE_LOCAL();
public static final int H5F_UNLIMITED = H5F_UNLIMITED();
- public static final int H5F_FILE_SPACE_DEFAULT = H5F_FILE_SPACE_DEFAULT();
- public static final int H5F_FILE_SPACE_ALL_PERSIST = H5F_FILE_SPACE_ALL_PERSIST();
- public static final int H5F_FILE_SPACE_ALL = H5F_FILE_SPACE_ALL();
- public static final int H5F_FILE_SPACE_AGGR_VFD = H5F_FILE_SPACE_AGGR_VFD();
- public static final int H5F_FILE_SPACE_VFD = H5F_FILE_SPACE_VFD();
- public static final int H5F_FILE_SPACE_NTYPES = H5F_FILE_SPACE_NTYPES();
+
+ public static final int H5F_FSPACE_STRATEGY_FSM_AGGR = H5F_FSPACE_STRATEGY_FSM_AGGR();
+ public static final int H5F_FSPACE_STRATEGY_AGGR = H5F_FSPACE_STRATEGY_AGGR();
+ public static final int H5F_FSPACE_STRATEGY_PAGE = H5F_FSPACE_STRATEGY_PAGE();
+ public static final int H5F_FSPACE_STRATEGY_NONE = H5F_FSPACE_STRATEGY_NONE();
+ public static final int H5F_FSPACE_STRATEGY_NTYPES = H5F_FSPACE_STRATEGY_NTYPES();
public static final long H5FD_CORE = H5FD_CORE();
public static final long H5FD_DIRECT = H5FD_DIRECT();
@@ -1024,17 +1022,15 @@ public class HDF5Constants {
private static native final int H5F_UNLIMITED();
- private static native final int H5F_FILE_SPACE_DEFAULT();
-
- private static native final int H5F_FILE_SPACE_ALL_PERSIST();
+ private static native final int H5F_FSPACE_STRATEGY_FSM_AGGR();
- private static native final int H5F_FILE_SPACE_ALL();
+ private static native final int H5F_FSPACE_STRATEGY_AGGR();
- private static native final int H5F_FILE_SPACE_AGGR_VFD();
+ private static native final int H5F_FSPACE_STRATEGY_PAGE();
- private static native final int H5F_FILE_SPACE_VFD();
+ private static native final int H5F_FSPACE_STRATEGY_NONE();
- private static native final int H5F_FILE_SPACE_NTYPES();
+ private static native final int H5F_FSPACE_STRATEGY_NTYPES();
private static native final long H5FD_CORE();
diff --git a/java/src/hdf/hdf5lib/HDF5GroupInfo.java b/java/src/hdf/hdf5lib/HDF5GroupInfo.java
index fa33fec..a45cb7c 100644
--- a/java/src/hdf/hdf5lib/HDF5GroupInfo.java
+++ b/java/src/hdf/hdf5lib/HDF5GroupInfo.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/java/src/hdf/hdf5lib/HDFArray.java b/java/src/hdf/hdf5lib/HDFArray.java
index 55c19e4..529aecb 100644
--- a/java/src/hdf/hdf5lib/HDFArray.java
+++ b/java/src/hdf/hdf5lib/HDFArray.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/java/src/hdf/hdf5lib/HDFNativeData.java b/java/src/hdf/hdf5lib/HDFNativeData.java
index 66929fb..9637f62 100644
--- a/java/src/hdf/hdf5lib/HDFNativeData.java
+++ b/java/src/hdf/hdf5lib/HDFNativeData.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib;
diff --git a/java/src/hdf/hdf5lib/callbacks/Callbacks.java b/java/src/hdf/hdf5lib/callbacks/Callbacks.java
index 5757eef..9fc961a 100644
--- a/java/src/hdf/hdf5lib/callbacks/Callbacks.java
+++ b/java/src/hdf/hdf5lib/callbacks/Callbacks.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java b/java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java
index fa390a2..988c8fb 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5A_iterate_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5A_iterate_t.java b/java/src/hdf/hdf5lib/callbacks/H5A_iterate_t.java
index 4816f8f..51d67d5 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5A_iterate_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5A_iterate_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java b/java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java
index 1b66e34..ead8f73 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5D_append_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5D_append_t.java b/java/src/hdf/hdf5lib/callbacks/H5D_append_t.java
index dcbd331..8bf6410 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5D_append_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5D_append_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java b/java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java
index bac3dd9..a911a1c 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5D_iterate_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5D_iterate_t.java b/java/src/hdf/hdf5lib/callbacks/H5D_iterate_t.java
index 03bcc20..d049711 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5D_iterate_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5D_iterate_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java b/java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java
index 672d151..afc04ae 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5E_walk_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5E_walk_t.java b/java/src/hdf/hdf5lib/callbacks/H5E_walk_t.java
index f3f8a39..0be8977 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5E_walk_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5E_walk_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5L_iterate_cb.java b/java/src/hdf/hdf5lib/callbacks/H5L_iterate_cb.java
index 5218311..ec71911 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5L_iterate_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5L_iterate_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java b/java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java
index b7c25f4..28ffb8a 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5L_iterate_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5O_iterate_cb.java b/java/src/hdf/hdf5lib/callbacks/H5O_iterate_cb.java
index 630205f..89cf206 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5O_iterate_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5O_iterate_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java b/java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java
index da64f19..1491b09 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5O_iterate_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java
index dcd4ed2..e77d386 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_t.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_t.java
index 222bd26..0d5ad9e 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_close_func_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java
index 0dc8a94..139f877 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_t.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_t.java
index eed29bb..12c2601 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_copy_func_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java
index 777e302..e64ec6f 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_t.java b/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_t.java
index 78973f0..73646f1 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_cls_create_func_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java
index 0d98325..5ecb88d 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_iterate_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_iterate_t.java b/java/src/hdf/hdf5lib/callbacks/H5P_iterate_t.java
index dbbf80d..2e320b4 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_iterate_t.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_iterate_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java
index 3ea44ac..103fe5f 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_close_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java
index 0daaca5..46477b9 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_compare_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java
index 4e560e2..57994bb5 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_copy_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java
index dc4fee1..8791c22 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_create_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java
index 9aa27ab..46cd097 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_delete_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java
index bfc7b37..04599a0 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_get_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java b/java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java
index c1e9b00..6ac750f 100644
--- a/java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java
+++ b/java/src/hdf/hdf5lib/callbacks/H5P_prp_set_func_cb.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.callbacks;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5AtomException.java b/java/src/hdf/hdf5lib/exceptions/HDF5AtomException.java
index 3309223..850044c 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5AtomException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5AtomException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java b/java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java
index 8c1a7b8..87b075b 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5AttributeException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java b/java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java
index dc82fd9..5118254 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5BtreeException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java
index 17fd9db..14ae43f 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5DataFiltersException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java
index f22c9ec..721d3ef 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5DataStorageException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java
index e39709d..3c55a6b 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5DatasetInterfaceException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java
index 531202c..c0182ee 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5DataspaceInterfaceException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java
index 3c0c144..4da074b 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5DatatypeInterfaceException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5Exception.java b/java/src/hdf/hdf5lib/exceptions/HDF5Exception.java
index 8a83ca1..1b55437 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5Exception.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5Exception.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java b/java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java
index 62c5dd8..28f5437 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5ExternalFileListException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java
index 0b63b41..c8dbcea 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5FileInterfaceException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java b/java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java
index c984aae..e7f20e0 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5FunctionArgumentException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java b/java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java
index 5dad930..26e836f 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5FunctionEntryExitException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java b/java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java
index dfe4ced..a32e2a1 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5HeapException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java b/java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java
index 45a836c..25ac572 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5InternalErrorException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java b/java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java
index 2db79ef..7c61194 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5JavaException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java b/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java
index 8b21ff8..5ae977d 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5LibraryException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java b/java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java
index e0a95d0..6d792c3 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5LowLevelIOException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java b/java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java
index cbc3b75..02f2d33 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5MetaDataCacheException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java b/java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java
index 0cdd0d1..2bb6861 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5ObjectHeaderException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java
index aa9f668..b1baaad 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5PropertyListInterfaceException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java b/java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java
index 2c8c93b..ea4a89a 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5ReferenceException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java b/java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java
index f1c0c60..fc92578 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5ResourceUnavailableException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java b/java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java
index 87384d1..b90ce64 100644
--- a/java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java
+++ b/java/src/hdf/hdf5lib/exceptions/HDF5SymbolTableException.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.exceptions;
diff --git a/java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java b/java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
index 2128d8a..20863dc 100644
--- a/java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5AC_cache_config_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/hdf/hdf5lib/structs/H5A_info_t.java b/java/src/hdf/hdf5lib/structs/H5A_info_t.java
index b04a757..a46a495 100644
--- a/java/src/hdf/hdf5lib/structs/H5A_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5A_info_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/hdf/hdf5lib/structs/H5E_error2_t.java b/java/src/hdf/hdf5lib/structs/H5E_error2_t.java
index b7cd60a..257ad05 100644
--- a/java/src/hdf/hdf5lib/structs/H5E_error2_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5E_error2_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/hdf/hdf5lib/structs/H5F_info2_t.java b/java/src/hdf/hdf5lib/structs/H5F_info2_t.java
index 8fbde68..4664b5b 100644
--- a/java/src/hdf/hdf5lib/structs/H5F_info2_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5F_info2_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/hdf/hdf5lib/structs/H5G_info_t.java b/java/src/hdf/hdf5lib/structs/H5G_info_t.java
index daa6808..56c876e 100644
--- a/java/src/hdf/hdf5lib/structs/H5G_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5G_info_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/hdf/hdf5lib/structs/H5L_info_t.java b/java/src/hdf/hdf5lib/structs/H5L_info_t.java
index d43853f..3bbb189 100644
--- a/java/src/hdf/hdf5lib/structs/H5L_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5L_info_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java b/java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
index 86cf883..a39c1ed 100644
--- a/java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5O_hdr_info_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/hdf/hdf5lib/structs/H5O_info_t.java b/java/src/hdf/hdf5lib/structs/H5O_info_t.java
index a89aaf0..ac32f6a 100644
--- a/java/src/hdf/hdf5lib/structs/H5O_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5O_info_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/hdf/hdf5lib/structs/H5_ih_info_t.java b/java/src/hdf/hdf5lib/structs/H5_ih_info_t.java
index 450f548..ea36d85 100644
--- a/java/src/hdf/hdf5lib/structs/H5_ih_info_t.java
+++ b/java/src/hdf/hdf5lib/structs/H5_ih_info_t.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package hdf.hdf5lib.structs;
diff --git a/java/src/jni/CMakeLists.txt b/java/src/jni/CMakeLists.txt
index 9da31ee..9e6c099 100644
--- a/java/src/jni/CMakeLists.txt
+++ b/java/src/jni/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_JAVA_JNI C CXX)
set (HDF5_JAVA_JNI_CSRCS
@@ -67,8 +67,8 @@ if (WIN32)
COMMENT "Copying ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/${CMAKE_CFG_INTDIR}/${HDF5_JAVA_JNI_DLL_NAME}${CMAKE_SHARED_LIBRARY_SUFFIX} to ${CMAKE_RUNTIME_OUTPUT_DIRECTORY}/"
)
add_dependencies (HDF5_JAVA_JNI-Test-Copy ${HDF5_JAVA_JNI_LIB_TARGET})
- endif (BUILD_TESTING)
-endif (WIN32)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Add Target(s) to CMake Install for import into other projects
@@ -79,11 +79,11 @@ if (HDF5_EXPORTED_TARGETS)
TARGETS
${HDF5_JAVA_JNI_LIB_TARGET}
EXPORT
- ${HDF5_JAVA_EXPORTED_TARGETS}
+ ${HDF5_EXPORTED_TARGETS}
LIBRARY DESTINATION ${HDF5_INSTALL_LIB_DIR} COMPONENT libraries
ARCHIVE DESTINATION ${HDF5_INSTALL_LIB_DIR} COMPONENT libraries
RUNTIME DESTINATION ${HDF5_INSTALL_LIB_DIR} COMPONENT libraries
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT libraries
INCLUDES DESTINATION include
)
-endif (HDF5_EXPORTED_TARGETS)
+endif ()
diff --git a/java/src/jni/Makefile.am b/java/src/jni/Makefile.am
index 4667407..6ded371 100644
--- a/java/src/jni/Makefile.am
+++ b/java/src/jni/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -31,7 +29,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/java/src/jni $(JNIFLAGS)
lib_LTLIBRARIES=libhdf5_java.la
# Add libtool numbers to the HDF5 Java (JNI) library (from config/lt_vers.am)
-libhdf5_java_la_LDFLAGS= -version-info $(LT_JAVA_VERS_INTERFACE):$(LT_JAVA_VERS_REVISION):$(LT_JAVA_VERS_AGE) $(AM_LDFLAGS)
+libhdf5_java_la_LDFLAGS = -avoid-version -shared -export-dynamic -version-info $(LT_JAVA_VERS_INTERFACE):$(LT_JAVA_VERS_REVISION):$(LT_JAVA_VERS_AGE) $(AM_LDFLAGS)
# Source files for the library
libhdf5_java_la_SOURCES=exceptionImp.c h5Constants.c nativeData.c h5util.c h5Imp.c \
diff --git a/java/src/jni/exceptionImp.c b/java/src/jni/exceptionImp.c
index 28eec21..05c193f 100644
--- a/java/src/jni/exceptionImp.c
+++ b/java/src/jni/exceptionImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/exceptionImp.h b/java/src/jni/exceptionImp.h
index a9b812d..cb74602 100644
--- a/java/src/jni/exceptionImp.h
+++ b/java/src/jni/exceptionImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/h5Constants.c b/java/src/jni/h5Constants.c
index 976a224..cd1c2f0 100644
--- a/java/src/jni/h5Constants.c
+++ b/java/src/jni/h5Constants.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
@@ -28,396 +26,394 @@ extern "C" {
#pragma GCC diagnostic ignored "-Wmissing-prototypes"
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1QUARTER_1HADDR_1MAX(JNIEnv *env, jclass cls) { return (hsize_t)HADDR_MAX/4; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1SZIP_1ALLOW_1K13_1OPTION_1MASK(JNIEnv *env, jclass cls) { return H5_SZIP_ALLOW_K13_OPTION_MASK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1SZIP_1CHIP_1OPTION_1MASK(JNIEnv *env, jclass cls) { return H5_SZIP_CHIP_OPTION_MASK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1SZIP_1EC_1OPTION_1MASK(JNIEnv *env, jclass cls) { return H5_SZIP_EC_OPTION_MASK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1SZIP_1MAX_1PIXELS_1PER_1BLOCK(JNIEnv *env, jclass cls) { return H5_SZIP_MAX_PIXELS_PER_BLOCK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1SZIP_1NN_1OPTION_1MASK(JNIEnv *env, jclass cls) { return H5_SZIP_NN_OPTION_MASK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1INDEX_1UNKNOWN(JNIEnv *env, jclass cls) { return H5_INDEX_UNKNOWN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1INDEX_1NAME(JNIEnv *env, jclass cls) { return H5_INDEX_NAME; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1INDEX_1CRT_1ORDER(JNIEnv *env, jclass cls) { return H5_INDEX_CRT_ORDER; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1INDEX_1N(JNIEnv *env, jclass cls) { return H5_INDEX_N; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1ITER_1UNKNOWN(JNIEnv *env, jclass cls) { return H5_ITER_UNKNOWN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1ITER_1INC(JNIEnv *env, jclass cls) { return H5_ITER_INC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1ITER_1DEC(JNIEnv *env, jclass cls) { return H5_ITER_DEC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1ITER_1NATIVE(JNIEnv *env, jclass cls) { return H5_ITER_NATIVE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5_1ITER_1N(JNIEnv *env, jclass cls) { return H5_ITER_N; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5AC_1CURR_1CACHE_1CONFIG_1VERSION(JNIEnv *env, jclass cls) { return H5AC__CURR_CACHE_CONFIG_VERSION; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5AC_1MAX_1TRACE_1FILE_1NAME_1LEN(JNIEnv *env, jclass cls) { return H5AC__MAX_TRACE_FILE_NAME_LEN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5AC_1METADATA_1WRITE_1STRATEGY_1PROCESS_1ZERO_1ONLY(JNIEnv *env, jclass cls) { return H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5AC_1METADATA_1WRITE_1STRATEGY_1DISTRIBUTED(JNIEnv *env, jclass cls) { return H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5C_1incr_1off(JNIEnv *env, jclass cls) { return H5C_incr__off; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5C_1incr_1threshold(JNIEnv *env, jclass cls) { return H5C_incr__threshold; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5C_1flash_1incr_1off(JNIEnv *env, jclass cls) { return H5C_flash_incr__off; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5C_1flash_1incr_1add_1space(JNIEnv *env, jclass cls) { return H5C_flash_incr__add_space; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5C_1decr_1off(JNIEnv *env, jclass cls) { return H5C_decr__off; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5C_1decr_1threshold(JNIEnv *env, jclass cls) { return H5C_decr__threshold; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5C_1decr_1age_1out(JNIEnv *env, jclass cls) { return H5C_decr__age_out; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5C_1decr_1age_1out_1with_1threshold(JNIEnv *env, jclass cls) { return H5C_decr__age_out_with_threshold; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1CHUNK_1IDX_1BTREE(JNIEnv *env, jclass cls) { return H5D_CHUNK_IDX_BTREE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1ALLOC_1TIME_1DEFAULT(JNIEnv *env, jclass cls) { return H5D_ALLOC_TIME_DEFAULT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1ALLOC_1TIME_1EARLY(JNIEnv *env, jclass cls) { return H5D_ALLOC_TIME_EARLY; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1ALLOC_1TIME_1ERROR(JNIEnv *env, jclass cls) { return H5D_ALLOC_TIME_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1ALLOC_1TIME_1INCR(JNIEnv *env, jclass cls) { return H5D_ALLOC_TIME_INCR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1ALLOC_1TIME_1LATE(JNIEnv *env, jclass cls) { return H5D_ALLOC_TIME_LATE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1FILL_1TIME_1ERROR(JNIEnv *env, jclass cls) { return H5D_FILL_TIME_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1FILL_1TIME_1ALLOC(JNIEnv *env, jclass cls) { return H5D_FILL_TIME_ALLOC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1FILL_1TIME_1NEVER(JNIEnv *env, jclass cls) { return H5D_FILL_TIME_NEVER; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1FILL_1TIME_1IFSET(JNIEnv *env, jclass cls) { return H5D_FILL_TIME_IFSET; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1FILL_1VALUE_1DEFAULT(JNIEnv *env, jclass cls) { return H5D_FILL_VALUE_DEFAULT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1FILL_1VALUE_1ERROR(JNIEnv *env, jclass cls) { return H5D_FILL_VALUE_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1FILL_1VALUE_1UNDEFINED(JNIEnv *env, jclass cls) { return H5D_FILL_VALUE_UNDEFINED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1FILL_1VALUE_1USER_1DEFINED(JNIEnv *env, jclass cls) { return H5D_FILL_VALUE_USER_DEFINED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1LAYOUT_1ERROR(JNIEnv *env, jclass cls) { return H5D_LAYOUT_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1CHUNKED(JNIEnv *env, jclass cls) { return H5D_CHUNKED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1COMPACT(JNIEnv *env, jclass cls) { return H5D_COMPACT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1CONTIGUOUS(JNIEnv *env, jclass cls) { return H5D_CONTIGUOUS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1VIRTUAL(JNIEnv *env, jclass cls) { return H5D_VIRTUAL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1NLAYOUTS(JNIEnv *env, jclass cls) { return H5D_NLAYOUTS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1SPACE_1STATUS_1ALLOCATED(JNIEnv *env, jclass cls) { return H5D_SPACE_STATUS_ALLOCATED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1SPACE_1STATUS_1ERROR(JNIEnv *env, jclass cls) { return H5D_SPACE_STATUS_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1SPACE_1STATUS_1NOT_1ALLOCATED(JNIEnv *env, jclass cls) { return H5D_SPACE_STATUS_NOT_ALLOCATED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1SPACE_1STATUS_1PART_1ALLOCATED(JNIEnv *env, jclass cls) { return H5D_SPACE_STATUS_PART_ALLOCATED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1VDS_1ERROR(JNIEnv *env, jclass cls) { return H5D_VDS_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1VDS_1FIRST_1MISSING(JNIEnv *env, jclass cls) { return H5D_VDS_FIRST_MISSING; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5D_1VDS_1LAST_1AVAILABLE(JNIEnv *env, jclass cls) { return H5D_VDS_LAST_AVAILABLE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1ALIGNMENT(JNIEnv *env, jclass cls) { return H5E_ALIGNMENT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1ALREADYEXISTS(JNIEnv *env, jclass cls) { return H5E_ALREADYEXISTS; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1ALREADYINIT(JNIEnv *env, jclass cls) { return H5E_ALREADYINIT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1ARGS(JNIEnv *env, jclass cls) { return H5E_ARGS; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1ATOM(JNIEnv *env, jclass cls) { return H5E_ATOM; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1ATTR(JNIEnv *env, jclass cls) { return H5E_ATTR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADATOM(JNIEnv *env, jclass cls) { return H5E_BADATOM; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADFILE(JNIEnv *env, jclass cls) { return H5E_BADFILE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADGROUP(JNIEnv *env, jclass cls) { return H5E_BADGROUP; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADMESG(JNIEnv *env, jclass cls) { return H5E_BADMESG; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADRANGE(JNIEnv *env, jclass cls) { return H5E_BADRANGE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADSELECT(JNIEnv *env, jclass cls) { return H5E_BADSELECT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADSIZE(JNIEnv *env, jclass cls) { return H5E_BADSIZE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADTYPE(JNIEnv *env, jclass cls) { return H5E_BADTYPE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BADVALUE(JNIEnv *env, jclass cls) { return H5E_BADVALUE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1BTREE(JNIEnv *env, jclass cls) { return H5E_BTREE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CACHE(JNIEnv *env, jclass cls) { return H5E_CACHE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CALLBACK(JNIEnv *env, jclass cls) { return H5E_CALLBACK; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANAPPLY(JNIEnv *env, jclass cls) { return H5E_CANAPPLY; }
-/*JNIEXPORT jlong JNICALL
+/*JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTALLOC(JNIEnv *env, jclass cls) { return H5E_CANTALLOC; }*/
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTCLIP(JNIEnv *env, jclass cls) { return H5E_CANTCLIP; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTCLOSEFILE(JNIEnv *env, jclass cls) { return H5E_CANTCLOSEFILE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTCONVERT(JNIEnv *env, jclass cls) { return H5E_CANTCONVERT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTCOPY(JNIEnv *env, jclass cls) { return H5E_CANTCOPY; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTCOUNT(JNIEnv *env, jclass cls) { return H5E_CANTCOUNT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTCREATE(JNIEnv *env, jclass cls) { return H5E_CANTCREATE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTDEC(JNIEnv *env, jclass cls) { return H5E_CANTDEC; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTDECODE(JNIEnv *env, jclass cls) { return H5E_CANTDECODE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTDELETE(JNIEnv *env, jclass cls) { return H5E_CANTDELETE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTENCODE(JNIEnv *env, jclass cls) { return H5E_CANTENCODE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTFLUSH(JNIEnv *env, jclass cls) { return H5E_CANTFLUSH; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTFREE(JNIEnv *env, jclass cls) { return H5E_CANTFREE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTGET(JNIEnv *env, jclass cls) { return H5E_CANTGET; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTINC(JNIEnv *env, jclass cls) { return H5E_CANTINC; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTINIT(JNIEnv *env, jclass cls) { return H5E_CANTINIT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTINSERT(JNIEnv *env, jclass cls) { return H5E_CANTINSERT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTLIST(JNIEnv *env, jclass cls) { return H5E_CANTLIST; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTLOAD(JNIEnv *env, jclass cls) { return H5E_CANTLOAD; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTLOCK(JNIEnv *env, jclass cls) { return H5E_CANTLOCK; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTNEXT(JNIEnv *env, jclass cls) { return H5E_CANTNEXT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTOPENFILE(JNIEnv *env, jclass cls) { return H5E_CANTOPENFILE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTOPENOBJ(JNIEnv *env, jclass cls) { return H5E_CANTOPENOBJ; }
-/*JNIEXPORT jlong JNICALL
+/*JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTRECV(JNIEnv *env, jclass cls) { return H5E_CANTRECV; }*/
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTREGISTER(JNIEnv *env, jclass cls) { return H5E_CANTREGISTER; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTRELEASE(JNIEnv *env, jclass cls) { return H5E_CANTRELEASE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTSELECT(JNIEnv *env, jclass cls) { return H5E_CANTSELECT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTSET(JNIEnv *env, jclass cls) { return H5E_CANTSET; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTSPLIT(JNIEnv *env, jclass cls) { return H5E_CANTSPLIT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CANTUNLOCK(JNIEnv *env, jclass cls) { return H5E_CANTUNLOCK; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1CLOSEERROR(JNIEnv *env, jclass cls) { return H5E_CLOSEERROR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1COMPLEN(JNIEnv *env, jclass cls) { return H5E_COMPLEN; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1DATASET(JNIEnv *env, jclass cls) { return H5E_DATASET; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1DATASPACE(JNIEnv *env, jclass cls) { return H5E_DATASPACE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1DATATYPE(JNIEnv *env, jclass cls) { return H5E_DATATYPE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1DEFAULT(JNIEnv *env, jclass cls) { return H5E_DEFAULT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1DUPCLASS(JNIEnv *env, jclass cls) { return H5E_DUPCLASS; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1EFL(JNIEnv *env, jclass cls) { return H5E_EFL; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1EXISTS(JNIEnv *env, jclass cls) { return H5E_EXISTS; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1FCNTL(JNIEnv *env, jclass cls) { return H5E_FCNTL; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1FILE(JNIEnv *env, jclass cls) { return H5E_FILE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1FILEEXISTS(JNIEnv *env, jclass cls) { return H5E_FILEEXISTS; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1FILEOPEN(JNIEnv *env, jclass cls) { return H5E_FILEOPEN; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1FUNC(JNIEnv *env, jclass cls) { return H5E_FUNC; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1HEAP(JNIEnv *env, jclass cls) { return H5E_HEAP; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1INTERNAL(JNIEnv *env, jclass cls) { return H5E_INTERNAL; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1IO(JNIEnv *env, jclass cls) { return H5E_IO; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1LINK(JNIEnv *env, jclass cls) { return H5E_LINK; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1LINKCOUNT(JNIEnv *env, jclass cls) { return H5E_LINKCOUNT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1MAJOR(JNIEnv *env, jclass cls) { return H5E_MAJOR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1MINOR(JNIEnv *env, jclass cls) { return H5E_MINOR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1MOUNT(JNIEnv *env, jclass cls) { return H5E_MOUNT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1MPI(JNIEnv *env, jclass cls) { return H5E_MPI; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1MPIERRSTR(JNIEnv *env, jclass cls) { return H5E_MPIERRSTR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1NOFILTER(JNIEnv *env, jclass cls) { return H5E_NOFILTER; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1NOIDS(JNIEnv *env, jclass cls) { return H5E_NOIDS; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1NONE_1MAJOR(JNIEnv *env, jclass cls) { return H5E_NONE_MAJOR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1NONE_1MINOR(JNIEnv *env, jclass cls) { return H5E_NONE_MINOR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1NOSPACE(JNIEnv *env, jclass cls) { return H5E_NOSPACE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1NOTCACHED(JNIEnv *env, jclass cls) { return H5E_NOTCACHED; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1NOTFOUND(JNIEnv *env, jclass cls) { return H5E_NOTFOUND; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1NOTHDF5(JNIEnv *env, jclass cls) { return H5E_NOTHDF5; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1OHDR(JNIEnv *env, jclass cls) { return H5E_OHDR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1OVERFLOW(JNIEnv *env, jclass cls) { return H5E_OVERFLOW; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1PLINE(JNIEnv *env, jclass cls) { return H5E_PLINE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1PLIST(JNIEnv *env, jclass cls) { return H5E_PLIST; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1PROTECT(JNIEnv *env, jclass cls) { return H5E_PROTECT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1READERROR(JNIEnv *env, jclass cls) { return H5E_READERROR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1REFERENCE(JNIEnv *env, jclass cls) { return H5E_REFERENCE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1RESOURCE(JNIEnv *env, jclass cls) { return H5E_RESOURCE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1RS(JNIEnv *env, jclass cls) { return H5E_RS; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1SEEKERROR(JNIEnv *env, jclass cls) { return H5E_SEEKERROR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1SETLOCAL(JNIEnv *env, jclass cls) { return H5E_SETLOCAL; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1STORAGE(JNIEnv *env, jclass cls) { return H5E_STORAGE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1SYM(JNIEnv *env, jclass cls) { return H5E_SYM; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1TRUNCATED(JNIEnv *env, jclass cls) { return H5E_TRUNCATED; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1TST(JNIEnv *env, jclass cls) { return H5E_TST; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1UNINITIALIZED(JNIEnv *env, jclass cls) { return H5E_UNINITIALIZED; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1UNSUPPORTED(JNIEnv *env, jclass cls) { return H5E_UNSUPPORTED; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1VERSION(JNIEnv *env, jclass cls) { return H5E_VERSION; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1VFL(JNIEnv *env, jclass cls) { return H5E_VFL; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1WALK_1DOWNWARD(JNIEnv *env, jclass cls) { return H5E_WALK_DOWNWARD; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1WALK_1UPWARD(JNIEnv *env, jclass cls) { return H5E_WALK_UPWARD; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5E_1WRITEERROR(JNIEnv *env, jclass cls) { return H5E_WRITEERROR; }
/* Java does not have unsigned native types */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wsign-conversion"
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1ACC_1CREAT(JNIEnv *env, jclass cls) { return H5F_ACC_CREAT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1ACC_1EXCL(JNIEnv *env, jclass cls) { return H5F_ACC_EXCL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1ACC_1RDONLY(JNIEnv *env, jclass cls) { return H5F_ACC_RDONLY; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1ACC_1RDWR(JNIEnv *env, jclass cls) { return H5F_ACC_RDWR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1ACC_1TRUNC(JNIEnv *env, jclass cls) { return H5F_ACC_TRUNC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1ACC_1DEFAULT(JNIEnv *env, jclass cls) { return H5F_ACC_DEFAULT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1ACC_1SWMR_1READ(JNIEnv *env, jclass cls) { return H5F_ACC_SWMR_READ; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1ACC_1SWMR_1WRITE(JNIEnv *env, jclass cls) { return H5F_ACC_SWMR_WRITE; }
#pragma GCC diagnostic pop
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1CLOSE_1DEFAULT(JNIEnv *env, jclass cls) { return H5F_CLOSE_DEFAULT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1CLOSE_1SEMI(JNIEnv *env, jclass cls) { return H5F_CLOSE_SEMI; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1CLOSE_1STRONG(JNIEnv *env, jclass cls) { return H5F_CLOSE_STRONG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1CLOSE_1WEAK(JNIEnv *env, jclass cls) { return H5F_CLOSE_WEAK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1EARLIEST(JNIEnv *env, jclass cls){return H5F_LIBVER_EARLIEST;}
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1LIBVER_1LATEST(JNIEnv *env, jclass cls){return H5F_LIBVER_LATEST;}
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1OBJ_1ALL(JNIEnv *env, jclass cls) { return H5F_OBJ_ALL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1OBJ_1ATTR(JNIEnv *env, jclass cls) { return H5F_OBJ_ATTR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1OBJ_1DATASET(JNIEnv *env, jclass cls) { return H5F_OBJ_DATASET; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1OBJ_1DATATYPE(JNIEnv *env, jclass cls) { return H5F_OBJ_DATATYPE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1OBJ_1FILE(JNIEnv *env, jclass cls) { return H5F_OBJ_FILE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1OBJ_1GROUP(JNIEnv *env, jclass cls) { return H5F_OBJ_GROUP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1OBJ_1LOCAL(JNIEnv *env, jclass cls) { return H5F_OBJ_LOCAL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1SCOPE_1GLOBAL(JNIEnv *env, jclass cls) { return H5F_SCOPE_GLOBAL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1SCOPE_1LOCAL(JNIEnv *env, jclass cls) { return H5F_SCOPE_LOCAL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5F_1UNLIMITED(JNIEnv *env, jclass cls) { return (jint)H5F_UNLIMITED; }
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_HDF5Constants_H5F_1FILE_1SPACE_1DEFAULT(JNIEnv *env, jclass cls) { return H5F_FILE_SPACE_DEFAULT; }
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_HDF5Constants_H5F_1FILE_1SPACE_1ALL_1PERSIST(JNIEnv *env, jclass cls) { return H5F_FILE_SPACE_ALL_PERSIST; }
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_HDF5Constants_H5F_1FILE_1SPACE_1ALL(JNIEnv *env, jclass cls) { return H5F_FILE_SPACE_ALL; }
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_HDF5Constants_H5F_1FILE_1SPACE_1AGGR_1VFD(JNIEnv *env, jclass cls) { return H5F_FILE_SPACE_AGGR_VFD; }
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_HDF5Constants_H5F_1FILE_1SPACE_1VFD(JNIEnv *env, jclass cls) { return H5F_FILE_SPACE_VFD; }
-JNIEXPORT jint JNICALL
-Java_hdf_hdf5lib_HDF5Constants_H5F_1FILE_1SPACE_1NTYPES(JNIEnv *env, jclass cls) { return H5F_FILE_SPACE_NTYPES; }
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_HDF5Constants_H5F_1FSPACE_1STRATEGY_1FSM_1AGGR(JNIEnv *env, jclass cls) { return H5F_FSPACE_STRATEGY_FSM_AGGR; }
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_HDF5Constants_H5F_1FSPACE_1STRATEGY_1AGGR(JNIEnv *env, jclass cls) { return H5F_FSPACE_STRATEGY_AGGR; }
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_HDF5Constants_H5F_1FSPACE_1STRATEGY_1PAGE(JNIEnv *env, jclass cls) { return H5F_FSPACE_STRATEGY_PAGE; }
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_HDF5Constants_H5F_1FSPACE_1STRATEGY_1NONE(JNIEnv *env, jclass cls) { return H5F_FSPACE_STRATEGY_NONE; }
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_HDF5Constants_H5F_1FSPACE_1STRATEGY_1NTYPES(JNIEnv *env, jclass cls) { return H5F_FSPACE_STRATEGY_NTYPES; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1CORE(JNIEnv *env, jclass cls) { return H5FD_CORE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1DIRECT(JNIEnv *env, jclass cls) {
#ifdef H5_HAVE_DIRECT
return H5FD_DIRECT;
@@ -425,19 +421,19 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1DIRECT(JNIEnv *env, jclass cls) {
return -1;
#endif
}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1FAMILY(JNIEnv *env, jclass cls) { return H5FD_FAMILY; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG(JNIEnv *env, jclass cls) { return H5FD_LOG; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MPIO(JNIEnv *env, jclass cls) { return H5FD_MPIO; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MULTI(JNIEnv *env, jclass cls) { return H5FD_MULTI; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1SEC2(JNIEnv *env, jclass cls) { return H5FD_SEC2; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1STDIO(JNIEnv *env, jclass cls) { return H5FD_STDIO; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1WINDOWS(JNIEnv *env, jclass cls) {
#ifdef H5_HAVE_WINDOWS
return H5FD_DIRECT;
@@ -445,84 +441,84 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1WINDOWS(JNIEnv *env, jclass cls) {
return -1;
#endif
}
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1LOC_1READ(JNIEnv *env, jclass cls) { return H5FD_LOG_LOC_READ; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1LOC_1WRITE(JNIEnv *env, jclass cls) { return H5FD_LOG_LOC_WRITE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1LOC_1SEEK(JNIEnv *env, jclass cls) { return H5FD_LOG_LOC_SEEK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1LOC_1IO(JNIEnv *env, jclass cls) { return H5FD_LOG_LOC_IO; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1FILE_1READ(JNIEnv *env, jclass cls) { return H5FD_LOG_FILE_READ; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1FILE_1WRITE(JNIEnv *env, jclass cls) { return H5FD_LOG_FILE_WRITE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1FILE_1IO(JNIEnv *env, jclass cls) { return H5FD_LOG_FILE_IO; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1FLAVOR(JNIEnv *env, jclass cls) { return H5FD_LOG_FLAVOR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1NUM_1READ(JNIEnv *env, jclass cls) { return H5FD_LOG_NUM_READ; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1NUM_1WRITE(JNIEnv *env, jclass cls) { return H5FD_LOG_NUM_WRITE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1NUM_1SEEK(JNIEnv *env, jclass cls) { return H5FD_LOG_NUM_SEEK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1NUM_1TRUNCATE(JNIEnv *env, jclass cls) { return H5FD_LOG_NUM_TRUNCATE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1NUM_1IO(JNIEnv *env, jclass cls) { return H5FD_LOG_NUM_IO; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1TIME_1OPEN(JNIEnv *env, jclass cls) { return H5FD_LOG_TIME_OPEN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1TIME_1STAT(JNIEnv *env, jclass cls) { return H5FD_LOG_TIME_STAT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1TIME_1READ(JNIEnv *env, jclass cls) { return H5FD_LOG_TIME_READ; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1TIME_1WRITE(JNIEnv *env, jclass cls) { return H5FD_LOG_TIME_WRITE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1TIME_1SEEK(JNIEnv *env, jclass cls) { return H5FD_LOG_TIME_SEEK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1TIME_1CLOSE(JNIEnv *env, jclass cls) { return H5FD_LOG_TIME_CLOSE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1TIME_1IO(JNIEnv *env, jclass cls) { return H5FD_LOG_TIME_IO; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1ALLOC(JNIEnv *env, jclass cls) { return H5FD_LOG_ALLOC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1LOG_1ALL(JNIEnv *env, jclass cls) { return H5FD_LOG_ALL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1NOLIST(JNIEnv *env, jclass cls) { return H5FD_MEM_NOLIST; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT(JNIEnv *env, jclass cls) { return H5FD_MEM_DEFAULT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1SUPER(JNIEnv *env, jclass cls) { return H5FD_MEM_SUPER; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1BTREE(JNIEnv *env, jclass cls) { return H5FD_MEM_BTREE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DRAW(JNIEnv *env, jclass cls) { return H5FD_MEM_DRAW; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1GHEAP(JNIEnv *env, jclass cls) { return H5FD_MEM_GHEAP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1LHEAP(JNIEnv *env, jclass cls) { return H5FD_MEM_LHEAP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1OHDR(JNIEnv *env, jclass cls) { return H5FD_MEM_OHDR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1NTYPES(JNIEnv *env, jclass cls) { return H5FD_MEM_NTYPES; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1DEFAULT_1HADDR_1SIZE(JNIEnv *env, jclass cls) { return (hsize_t)(HADDR_MAX/H5FD_MEM_NTYPES); }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT_1SIZE(JNIEnv *env, jclass cls) { return (hsize_t)0; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT_1SUPER_1SIZE(JNIEnv *env, jclass cls) { return (hsize_t)0; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT_1BTREE_1SIZE(JNIEnv *env, jclass cls) { return (hsize_t)(1 * (HADDR_MAX / (H5FD_MEM_NTYPES-1))); }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT_1DRAW_1SIZE(JNIEnv *env, jclass cls) { return (hsize_t)(2 * (HADDR_MAX / (H5FD_MEM_NTYPES-1))); }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT_1GHEAP_1SIZE(JNIEnv *env, jclass cls) { return (hsize_t)(3 * (HADDR_MAX / (H5FD_MEM_NTYPES-1))); }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT_1LHEAP_1SIZE(JNIEnv *env, jclass cls) { return (hsize_t)(4 * (HADDR_MAX / (H5FD_MEM_NTYPES-1))); }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT_1OHDR_1SIZE(JNIEnv *env, jclass cls) { return (hsize_t)(5 * (HADDR_MAX / (H5FD_MEM_NTYPES-1))); }
@@ -532,769 +528,769 @@ Java_hdf_hdf5lib_HDF5Constants_H5FD_1MEM_1DEFAULT_1OHDR_1SIZE(JNIEnv *env, jclas
*/
#ifndef H5_NO_DEPRECATED_SYMBOLS
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1DATASET(JNIEnv *env, jclass cls) { return H5G_DATASET; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1GROUP(JNIEnv *env, jclass cls) { return H5G_GROUP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1LINK(JNIEnv *env, jclass cls) { return H5G_LINK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1UDLINK(JNIEnv *env, jclass cls) { return H5G_UDLINK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1RESERVED_15(JNIEnv *env, jclass cls) { return H5G_RESERVED_5; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1RESERVED_16(JNIEnv *env, jclass cls) { return H5G_RESERVED_6; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1RESERVED_17(JNIEnv *env, jclass cls) { return H5G_RESERVED_7; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1TYPE(JNIEnv *env, jclass cls) { return H5G_TYPE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1UNKNOWN(JNIEnv *env, jclass cls) { return H5G_UNKNOWN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1LINK_1ERROR(JNIEnv *env, jclass cls) { return H5G_LINK_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1LINK_1HARD(JNIEnv *env, jclass cls) { return H5G_LINK_HARD; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1LINK_1SOFT(JNIEnv *env, jclass cls) { return H5G_LINK_SOFT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1NLIBTYPES(JNIEnv *env, jclass cls) { return H5G_NLIBTYPES; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1NTYPES(JNIEnv *env, jclass cls) { return H5G_NTYPES; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1NUSERTYPES(JNIEnv *env, jclass cls) { return H5G_NUSERTYPES; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1SAME_1LOC(JNIEnv *env, jclass cls) { return H5G_SAME_LOC; }
#endif /* H5_NO_DEPRECATED_SYMBOLS */
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1STORAGE_1TYPE_1UNKNOWN(JNIEnv *env, jclass cls){ return H5G_STORAGE_TYPE_UNKNOWN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1STORAGE_1TYPE_1SYMBOL_1TABLE(JNIEnv *env, jclass cls){ return H5G_STORAGE_TYPE_SYMBOL_TABLE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1STORAGE_1TYPE_1COMPACT(JNIEnv *env, jclass cls){ return H5G_STORAGE_TYPE_COMPACT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5G_1STORAGE_1TYPE_1DENSE(JNIEnv *env, jclass cls){ return H5G_STORAGE_TYPE_DENSE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1UNINIT(JNIEnv *env, jclass cls) { return H5I_UNINIT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1BADID(JNIEnv *env, jclass cls) { return H5I_BADID; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1FILE(JNIEnv *env, jclass cls) { return H5I_FILE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1GROUP(JNIEnv *env, jclass cls) { return H5I_GROUP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1DATATYPE(JNIEnv *env, jclass cls) { return H5I_DATATYPE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1DATASPACE(JNIEnv *env, jclass cls) { return H5I_DATASPACE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1DATASET(JNIEnv *env, jclass cls) { return H5I_DATASET; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1ATTR(JNIEnv *env, jclass cls) { return H5I_ATTR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1REFERENCE(JNIEnv *env, jclass cls) { return H5I_REFERENCE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1VFL(JNIEnv *env, jclass cls) { return H5I_VFL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1INVALID_1HID(JNIEnv *env, jclass cls) { return H5I_INVALID_HID; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1GENPROP_1CLS(JNIEnv *env, jclass cls) { return H5I_GENPROP_CLS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1GENPROP_1LST(JNIEnv *env, jclass cls) { return H5I_GENPROP_LST; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1ERROR_1CLASS(JNIEnv *env, jclass cls) { return H5I_ERROR_CLASS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1ERROR_1MSG(JNIEnv *env, jclass cls) { return H5I_ERROR_MSG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1ERROR_1STACK(JNIEnv *env, jclass cls) { return H5I_ERROR_STACK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5I_1NTYPES(JNIEnv *env, jclass cls) { return H5I_NTYPES; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5L_1TYPE_1ERROR(JNIEnv *env, jclass cls) { return H5L_TYPE_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5L_1TYPE_1HARD(JNIEnv *env, jclass cls) { return H5L_TYPE_HARD; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5L_1TYPE_1SOFT(JNIEnv *env, jclass cls) { return H5L_TYPE_SOFT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5L_1TYPE_1EXTERNAL(JNIEnv *env, jclass cls) { return H5L_TYPE_EXTERNAL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5L_1TYPE_1MAX(JNIEnv *env, jclass cls) { return H5L_TYPE_MAX; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1COPY_1SHALLOW_1HIERARCHY_1FLAG(JNIEnv *env, jclass cls){return H5O_COPY_SHALLOW_HIERARCHY_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1COPY_1EXPAND_1SOFT_1LINK_1FLAG(JNIEnv *env, jclass cls){return H5O_COPY_EXPAND_SOFT_LINK_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1COPY_1EXPAND_1EXT_1LINK_1FLAG(JNIEnv *env, jclass cls){return H5O_COPY_EXPAND_EXT_LINK_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1COPY_1EXPAND_1REFERENCE_1FLAG(JNIEnv *env, jclass cls){return H5O_COPY_EXPAND_REFERENCE_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1COPY_1WITHOUT_1ATTR_1FLAG(JNIEnv *env, jclass cls){return H5O_COPY_WITHOUT_ATTR_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1COPY_1PRESERVE_1NULL_1FLAG(JNIEnv *env, jclass cls){return H5O_COPY_PRESERVE_NULL_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1SHMESG_1NONE_1FLAG(JNIEnv *env, jclass cls){return H5O_SHMESG_NONE_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1SHMESG_1SDSPACE_1FLAG(JNIEnv *env, jclass cls){return H5O_SHMESG_SDSPACE_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1SHMESG_1DTYPE_1FLAG(JNIEnv *env, jclass cls){return H5O_SHMESG_DTYPE_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1SHMESG_1FILL_1FLAG(JNIEnv *env, jclass cls){return H5O_SHMESG_FILL_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1SHMESG_1PLINE_1FLAG(JNIEnv *env, jclass cls){return H5O_SHMESG_PLINE_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1SHMESG_1ATTR_1FLAG(JNIEnv *env, jclass cls){return H5O_SHMESG_ATTR_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1SHMESG_1ALL_1FLAG(JNIEnv *env, jclass cls){return H5O_SHMESG_ALL_FLAG; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1TYPE_1UNKNOWN(JNIEnv *env, jclass cls) { return H5O_TYPE_UNKNOWN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1TYPE_1GROUP(JNIEnv *env, jclass cls) { return H5O_TYPE_GROUP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1TYPE_1DATASET(JNIEnv *env, jclass cls) { return H5O_TYPE_DATASET; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1TYPE_1NAMED_1DATATYPE(JNIEnv *env, jclass cls) { return H5O_TYPE_NAMED_DATATYPE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5O_1TYPE_1NTYPES(JNIEnv *env, jclass cls) { return H5O_TYPE_NTYPES; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1ROOT(JNIEnv *env, jclass cls){return H5P_ROOT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1OBJECT_1CREATE(JNIEnv *env, jclass cls){return H5P_OBJECT_CREATE;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1FILE_1CREATE(JNIEnv *env, jclass cls){return H5P_FILE_CREATE;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1FILE_1ACCESS(JNIEnv *env, jclass cls){return H5P_FILE_ACCESS;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATASET_1CREATE(JNIEnv *env, jclass cls){return H5P_DATASET_CREATE;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATASET_1ACCESS(JNIEnv *env, jclass cls){return H5P_DATASET_ACCESS;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATASET_1XFER(JNIEnv *env, jclass cls){return H5P_DATASET_XFER;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1FILE_1MOUNT(JNIEnv *env, jclass cls){return H5P_FILE_MOUNT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1GROUP_1CREATE(JNIEnv *env, jclass cls){return H5P_GROUP_CREATE;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1GROUP_1ACCESS(JNIEnv *env, jclass cls){return H5P_GROUP_ACCESS;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATATYPE_1CREATE(JNIEnv *env, jclass cls){return H5P_DATATYPE_CREATE;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATATYPE_1ACCESS(JNIEnv *env, jclass cls){return H5P_DATATYPE_ACCESS;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1STRING_1CREATE(JNIEnv *env, jclass cls){return H5P_STRING_CREATE;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1ATTRIBUTE_1CREATE(JNIEnv *env, jclass cls){return H5P_ATTRIBUTE_CREATE;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1ATTRIBUTE_1ACCESS(JNIEnv *env, jclass cls){return H5P_ATTRIBUTE_ACCESS;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1OBJECT_1COPY(JNIEnv *env, jclass cls){return H5P_OBJECT_COPY;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1LINK_1CREATE(JNIEnv *env, jclass cls){return H5P_LINK_CREATE;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1LINK_1ACCESS(JNIEnv *env, jclass cls){return H5P_LINK_ACCESS;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1FILE_1CREATE_1DEFAULT(JNIEnv *env, jclass cls){return H5P_FILE_CREATE_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1FILE_1ACCESS_1DEFAULT(JNIEnv *env, jclass cls){return H5P_FILE_ACCESS_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATASET_1CREATE_1DEFAULT(JNIEnv *env, jclass cls){return H5P_DATASET_CREATE_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATASET_1ACCESS_1DEFAULT(JNIEnv *env, jclass cls){return H5P_DATASET_ACCESS_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATASET_1XFER_1DEFAULT(JNIEnv *env, jclass cls){return H5P_DATASET_XFER_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1FILE_1MOUNT_1DEFAULT(JNIEnv *env, jclass cls){return H5P_FILE_MOUNT_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1GROUP_1CREATE_1DEFAULT(JNIEnv *env, jclass cls){return H5P_GROUP_CREATE_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1GROUP_1ACCESS_1DEFAULT(JNIEnv *env, jclass cls){return H5P_GROUP_ACCESS_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATATYPE_1CREATE_1DEFAULT(JNIEnv *env, jclass cls){return H5P_DATATYPE_CREATE_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DATATYPE_1ACCESS_1DEFAULT(JNIEnv *env, jclass cls){return H5P_DATATYPE_ACCESS_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1ATTRIBUTE_1CREATE_1DEFAULT(JNIEnv *env, jclass cls){return H5P_ATTRIBUTE_CREATE_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1ATTRIBUTE_1ACCESS_1DEFAULT(JNIEnv *env, jclass cls){return H5P_ATTRIBUTE_ACCESS_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1OBJECT_1COPY_1DEFAULT(JNIEnv *env, jclass cls){return H5P_OBJECT_COPY_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1LINK_1CREATE_1DEFAULT(JNIEnv *env, jclass cls){return H5P_LINK_CREATE_DEFAULT;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1LINK_1ACCESS_1DEFAULT(JNIEnv *env, jclass cls){return H5P_LINK_ACCESS_DEFAULT;}
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1CRT_1ORDER_1TRACKED(JNIEnv *env, jclass cls){return H5P_CRT_ORDER_TRACKED;}
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1CRT_1ORDER_1INDEXED(JNIEnv *env, jclass cls){return H5P_CRT_ORDER_INDEXED;}
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1DEFAULT(JNIEnv *env, jclass cls) { return H5P_DEFAULT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5P_1NO_1CLASS(JNIEnv *env, jclass cls) { return H5P_ROOT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5PL_1TYPE_1ERROR(JNIEnv *env, jclass cls) { return H5PL_TYPE_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5PL_1TYPE_1FILTER(JNIEnv *env, jclass cls) { return H5PL_TYPE_FILTER; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5PL_1FILTER_1PLUGIN(JNIEnv *env, jclass cls) { return H5PL_FILTER_PLUGIN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5PL_1ALL_1PLUGIN(JNIEnv *env, jclass cls) { return H5PL_ALL_PLUGIN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5R_1BADTYPE(JNIEnv *env, jclass cls) { return H5R_BADTYPE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5R_1MAXTYPE(JNIEnv *env, jclass cls) { return H5R_MAXTYPE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5R_1OBJ_1REF_1BUF_1SIZE(JNIEnv *env, jclass cls) { return H5R_OBJ_REF_BUF_SIZE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5R_1DSET_1REG_1REF_1BUF_1SIZE(JNIEnv *env, jclass cls) { return H5R_DSET_REG_REF_BUF_SIZE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5R_1OBJECT(JNIEnv *env, jclass cls) { return H5R_OBJECT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5R_1DATASET_1REGION(JNIEnv *env, jclass cls) { return H5R_DATASET_REGION; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1ALL(JNIEnv *env, jclass cls) { return H5S_ALL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1MAX_1RANK(JNIEnv *env, jclass cls) { return H5S_MAX_RANK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1NO_1CLASS(JNIEnv *env, jclass cls) { return H5S_NO_CLASS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1NULL(JNIEnv *env, jclass cls) { return H5S_NULL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SCALAR(JNIEnv *env, jclass cls) { return H5S_SCALAR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SEL_1ALL(JNIEnv *env, jclass cls) { return H5S_SEL_ALL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SEL_1ERROR(JNIEnv *env, jclass cls) { return H5S_SEL_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SEL_1HYPERSLABS(JNIEnv *env, jclass cls) { return H5S_SEL_HYPERSLABS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SEL_1N(JNIEnv *env, jclass cls) { return H5S_SEL_N; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SEL_1NONE(JNIEnv *env, jclass cls) { return H5S_SEL_NONE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SEL_1POINTS(JNIEnv *env, jclass cls) { return H5S_SEL_POINTS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1AND(JNIEnv *env, jclass cls) { return H5S_SELECT_AND; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1APPEND(JNIEnv *env, jclass cls) { return H5S_SELECT_APPEND; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1INVALID(JNIEnv *env, jclass cls) { return H5S_SELECT_INVALID; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1NOOP(JNIEnv *env, jclass cls) { return H5S_SELECT_NOOP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1NOTA(JNIEnv *env, jclass cls) { return H5S_SELECT_NOTA; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1NOTB(JNIEnv *env, jclass cls) { return H5S_SELECT_NOTB; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1OR(JNIEnv *env, jclass cls) { return H5S_SELECT_OR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1PREPEND(JNIEnv *env, jclass cls) { return H5S_SELECT_PREPEND; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1SET(JNIEnv *env, jclass cls) { return H5S_SELECT_SET; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SELECT_1XOR(JNIEnv *env, jclass cls) { return H5S_SELECT_XOR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1SIMPLE(JNIEnv *env, jclass cls) { return H5S_SIMPLE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5S_1UNLIMITED(JNIEnv *env, jclass cls) { return (jint)H5S_UNLIMITED; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1B16(JNIEnv *env, jclass cls) { return H5T_ALPHA_B16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1B32(JNIEnv *env, jclass cls) { return H5T_ALPHA_B32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1B64(JNIEnv *env, jclass cls) { return H5T_ALPHA_B64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1B8(JNIEnv *env, jclass cls) { return H5T_ALPHA_B8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1F32(JNIEnv *env, jclass cls) { return H5T_ALPHA_F32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1F64(JNIEnv *env, jclass cls) { return H5T_ALPHA_F64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1I16(JNIEnv *env, jclass cls) { return H5T_ALPHA_I16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1I32(JNIEnv *env, jclass cls) { return H5T_ALPHA_I32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1I64(JNIEnv *env, jclass cls) { return H5T_ALPHA_I64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1I8(JNIEnv *env, jclass cls) { return H5T_ALPHA_I8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1U16(JNIEnv *env, jclass cls) { return H5T_ALPHA_U16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1U32(JNIEnv *env, jclass cls) { return H5T_ALPHA_U32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1U64(JNIEnv *env, jclass cls) { return H5T_ALPHA_U64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ALPHA_1U8(JNIEnv *env, jclass cls) { return H5T_ALPHA_U8; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ARRAY(JNIEnv *env, jclass cls) { return H5T_ARRAY; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1BITFIELD(JNIEnv *env, jclass cls) { return H5T_BITFIELD; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1BKG_1NO(JNIEnv *env, jclass cls) { return H5T_BKG_NO; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1BKG_1YES(JNIEnv *env, jclass cls) { return H5T_BKG_YES; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1C_1S1(JNIEnv *env, jclass cls) { return H5T_C_S1; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1COMPOUND(JNIEnv *env, jclass cls) { return H5T_COMPOUND; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CONV_1CONV(JNIEnv *env, jclass cls) { return H5T_CONV_CONV; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CONV_1FREE(JNIEnv *env, jclass cls) { return H5T_CONV_FREE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CONV_1INIT(JNIEnv *env, jclass cls) { return H5T_CONV_INIT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1ERROR(JNIEnv *env, jclass cls) { return H5T_CSET_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1ASCII(JNIEnv *env, jclass cls) { return H5T_CSET_ASCII; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1UTF8(JNIEnv *env, jclass cls) { return H5T_CSET_UTF8; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_110(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_10; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_111(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_11; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_112(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_12; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_113(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_13; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_114(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_14; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_115(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_15; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_12(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_2; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_13(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_3; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_14(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_4; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_15(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_5; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_16(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_6; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_17(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_7; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_18(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_8; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1CSET_1RESERVED_19(JNIEnv *env, jclass cls) { return H5T_CSET_RESERVED_9; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1DIR_1ASCEND(JNIEnv *env, jclass cls) { return H5T_DIR_ASCEND; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1DIR_1DEFAULT(JNIEnv *env, jclass cls) { return H5T_DIR_DEFAULT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1DIR_1DESCEND(JNIEnv *env, jclass cls) { return H5T_DIR_DESCEND; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ENUM(JNIEnv *env, jclass cls) { return H5T_ENUM; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1FLOAT(JNIEnv *env, jclass cls) { return H5T_FLOAT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1FORTRAN_1S1(JNIEnv *env, jclass cls) { return H5T_FORTRAN_S1; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1IEEE_1F32BE(JNIEnv *env, jclass cls) { return H5T_IEEE_F32BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1IEEE_1F32LE(JNIEnv *env, jclass cls) { return H5T_IEEE_F32LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1IEEE_1F64BE(JNIEnv *env, jclass cls) { return H5T_IEEE_F64BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1IEEE_1F64LE(JNIEnv *env, jclass cls) { return H5T_IEEE_F64LE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEGER(JNIEnv *env, jclass cls) { return H5T_INTEGER; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1B16(JNIEnv *env, jclass cls) { return H5T_INTEL_B16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1B32(JNIEnv *env, jclass cls) { return H5T_INTEL_B32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1B64(JNIEnv *env, jclass cls) { return H5T_INTEL_B64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1B8(JNIEnv *env, jclass cls) { return H5T_INTEL_B8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1F32(JNIEnv *env, jclass cls) { return H5T_INTEL_F32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1F64(JNIEnv *env, jclass cls) { return H5T_INTEL_F64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1I16(JNIEnv *env, jclass cls) { return H5T_INTEL_I16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1I32(JNIEnv *env, jclass cls) { return H5T_INTEL_I32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1I64(JNIEnv *env, jclass cls) { return H5T_INTEL_I64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1I8(JNIEnv *env, jclass cls) { return H5T_INTEL_I8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1U16(JNIEnv *env, jclass cls) { return H5T_INTEL_U16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1U32(JNIEnv *env, jclass cls) { return H5T_INTEL_U32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1U64(JNIEnv *env, jclass cls) { return H5T_INTEL_U64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1INTEL_1U8(JNIEnv *env, jclass cls) { return H5T_INTEL_U8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1B16(JNIEnv *env, jclass cls) { return H5T_MIPS_B16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1B32(JNIEnv *env, jclass cls) { return H5T_MIPS_B32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1B64(JNIEnv *env, jclass cls) { return H5T_MIPS_B64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1B8(JNIEnv *env, jclass cls) { return H5T_MIPS_B8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1F32(JNIEnv *env, jclass cls) { return H5T_MIPS_F32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1F64(JNIEnv *env, jclass cls) { return H5T_MIPS_F64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1I16(JNIEnv *env, jclass cls) { return H5T_MIPS_I16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1I32(JNIEnv *env, jclass cls) { return H5T_MIPS_I32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1I64(JNIEnv *env, jclass cls) { return H5T_MIPS_I64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1I8(JNIEnv *env, jclass cls) { return H5T_MIPS_I8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1U16(JNIEnv *env, jclass cls) { return H5T_MIPS_U16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1U32(JNIEnv *env, jclass cls) { return H5T_MIPS_U32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1U64(JNIEnv *env, jclass cls) { return H5T_MIPS_U64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1MIPS_1U8(JNIEnv *env, jclass cls) { return H5T_MIPS_U8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1B16(JNIEnv *env, jclass cls) { return H5T_NATIVE_B16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1B32(JNIEnv *env, jclass cls) { return H5T_NATIVE_B32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1B64(JNIEnv *env, jclass cls) { return H5T_NATIVE_B64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1B8(JNIEnv *env, jclass cls) { return H5T_NATIVE_B8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1CHAR(JNIEnv *env, jclass cls) { return H5T_NATIVE_CHAR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1DOUBLE(JNIEnv *env, jclass cls) { return H5T_NATIVE_DOUBLE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1FLOAT(JNIEnv *env, jclass cls) { return H5T_NATIVE_FLOAT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1HADDR(JNIEnv *env, jclass cls) { return H5T_NATIVE_HADDR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1HBOOL(JNIEnv *env, jclass cls) { return H5T_NATIVE_HBOOL; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1HERR(JNIEnv *env, jclass cls) { return H5T_NATIVE_HERR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1HSIZE(JNIEnv *env, jclass cls) { return H5T_NATIVE_HSIZE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1HSSIZE(JNIEnv *env, jclass cls) { return H5T_NATIVE_HSSIZE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT_1FAST16(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT_FAST16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT_1FAST32(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT_FAST32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT_1FAST64(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT_FAST64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT_1FAST8(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT_FAST8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT_1LEAST16(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT_LEAST16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT_1LEAST32(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT_LEAST32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT_1LEAST64(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT_LEAST64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT_1LEAST8(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT_LEAST8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT16(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT32(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT64(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1INT8(JNIEnv *env, jclass cls) { return H5T_NATIVE_INT8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1LDOUBLE(JNIEnv *env, jclass cls) { return H5T_NATIVE_LDOUBLE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1LLONG(JNIEnv *env, jclass cls) { return H5T_NATIVE_LLONG; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1LONG(JNIEnv *env, jclass cls) { return H5T_NATIVE_LONG; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1OPAQUE(JNIEnv *env, jclass cls) { return H5T_NATIVE_OPAQUE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1SCHAR(JNIEnv *env, jclass cls) { return H5T_NATIVE_SCHAR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1SHORT(JNIEnv *env, jclass cls) { return H5T_NATIVE_SHORT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UCHAR(JNIEnv *env, jclass cls) { return H5T_NATIVE_UCHAR; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT_1FAST16(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT_FAST16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT_1FAST32(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT_FAST32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT_1FAST64(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT_FAST64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT_1FAST8(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT_FAST8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT_1LEAST16(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT_LEAST16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT_1LEAST32(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT_LEAST32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT_1LEAST64(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT_LEAST64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT_1LEAST8(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT_LEAST8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT16(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT16; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT32(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT32; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT64(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT64; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1UINT8(JNIEnv *env, jclass cls) { return H5T_NATIVE_UINT8; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1ULLONG(JNIEnv *env, jclass cls) { return H5T_NATIVE_ULLONG; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1ULONG(JNIEnv *env, jclass cls) { return H5T_NATIVE_ULONG; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NATIVE_1USHORT(JNIEnv *env, jclass cls) { return H5T_NATIVE_USHORT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NCLASSES(JNIEnv *env, jclass cls) { return H5T_NCLASSES; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NO_1CLASS(JNIEnv *env, jclass cls) { return H5T_NO_CLASS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NORM_1ERROR(JNIEnv *env, jclass cls) { return H5T_NORM_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NORM_1IMPLIED(JNIEnv *env, jclass cls) { return H5T_NORM_IMPLIED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NORM_1MSBSET(JNIEnv *env, jclass cls) { return H5T_NORM_MSBSET; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NORM_1NONE(JNIEnv *env, jclass cls) { return H5T_NORM_NONE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NPAD(JNIEnv *env, jclass cls) { return H5T_NPAD; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1NSGN(JNIEnv *env, jclass cls) { return H5T_NSGN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1OPAQUE(JNIEnv *env, jclass cls) { return H5T_OPAQUE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1OPAQUE_1TAG_1MAX(JNIEnv *env, jclass cls) { return H5T_OPAQUE_TAG_MAX; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ORDER_1BE(JNIEnv *env, jclass cls) { return H5T_ORDER_BE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ORDER_1ERROR(JNIEnv *env, jclass cls) { return H5T_ORDER_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ORDER_1LE(JNIEnv *env, jclass cls) { return H5T_ORDER_LE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ORDER_1NONE(JNIEnv *env, jclass cls) { return H5T_ORDER_NONE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1ORDER_1VAX(JNIEnv *env, jclass cls) { return H5T_ORDER_VAX; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1PAD_1BACKGROUND(JNIEnv *env, jclass cls) { return H5T_PAD_BACKGROUND; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1PAD_1ERROR(JNIEnv *env, jclass cls) { return H5T_PAD_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1PAD_1ONE(JNIEnv *env, jclass cls) { return H5T_PAD_ONE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1PAD_1ZERO(JNIEnv *env, jclass cls) { return H5T_PAD_ZERO; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1PERS_1DONTCARE(JNIEnv *env, jclass cls) { return H5T_PERS_DONTCARE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1PERS_1HARD(JNIEnv *env, jclass cls) { return H5T_PERS_HARD; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1PERS_1SOFT(JNIEnv *env, jclass cls) { return H5T_PERS_SOFT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1REFERENCE(JNIEnv *env, jclass cls) { return H5T_REFERENCE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1SGN_12(JNIEnv *env, jclass cls) { return H5T_SGN_2; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1SGN_1ERROR(JNIEnv *env, jclass cls) { return H5T_SGN_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1SGN_1NONE(JNIEnv *env, jclass cls) { return H5T_SGN_NONE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1B16BE(JNIEnv *env, jclass cls) { return H5T_STD_B16BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1B16LE(JNIEnv *env, jclass cls) { return H5T_STD_B16LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1B32BE(JNIEnv *env, jclass cls) { return H5T_STD_B32BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1B32LE(JNIEnv *env, jclass cls) { return H5T_STD_B32LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1B64BE(JNIEnv *env, jclass cls) { return H5T_STD_B64BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1B64LE(JNIEnv *env, jclass cls) { return H5T_STD_B64LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1B8BE(JNIEnv *env, jclass cls) { return H5T_STD_B8BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1B8LE(JNIEnv *env, jclass cls) { return H5T_STD_B8LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1I16BE(JNIEnv *env, jclass cls) { return H5T_STD_I16BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1I16LE(JNIEnv *env, jclass cls) { return H5T_STD_I16LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1I32BE(JNIEnv *env, jclass cls) { return H5T_STD_I32BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1I32LE(JNIEnv *env, jclass cls) { return H5T_STD_I32LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1I64BE(JNIEnv *env, jclass cls) { return H5T_STD_I64BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1I64LE(JNIEnv *env, jclass cls) { return H5T_STD_I64LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1I8BE(JNIEnv *env, jclass cls) { return H5T_STD_I8BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1I8LE(JNIEnv *env, jclass cls) { return H5T_STD_I8LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1REF_1DSETREG(JNIEnv *env, jclass cls) { return H5T_STD_REF_DSETREG; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1REF_1OBJ(JNIEnv *env, jclass cls) { return H5T_STD_REF_OBJ; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1U16BE(JNIEnv *env, jclass cls) { return H5T_STD_U16BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1U16LE(JNIEnv *env, jclass cls) { return H5T_STD_U16LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1U32BE(JNIEnv *env, jclass cls) { return H5T_STD_U32BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1U32LE(JNIEnv *env, jclass cls) { return H5T_STD_U32LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1U64BE(JNIEnv *env, jclass cls) { return H5T_STD_U64BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1U64LE(JNIEnv *env, jclass cls) { return H5T_STD_U64LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1U8BE(JNIEnv *env, jclass cls) { return H5T_STD_U8BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STD_1U8LE(JNIEnv *env, jclass cls) { return H5T_STD_U8LE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1ERROR(JNIEnv *env, jclass cls) { return H5T_STR_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1NULLPAD(JNIEnv *env, jclass cls) { return H5T_STR_NULLPAD; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1NULLTERM(JNIEnv *env, jclass cls) { return H5T_STR_NULLTERM; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_110(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_10; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_111(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_11; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_112(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_12; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_113(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_13; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_114(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_14; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_115(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_15; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_13(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_3; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_14(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_4; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_15(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_5; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_16(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_6; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_17(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_7; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_18(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_8; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1RESERVED_19(JNIEnv *env, jclass cls) { return H5T_STR_RESERVED_9; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STR_1SPACEPAD(JNIEnv *env, jclass cls) { return H5T_STR_SPACEPAD; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1STRING(JNIEnv *env, jclass cls) { return H5T_STRING; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1TIME(JNIEnv *env, jclass cls) { return H5T_TIME; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1UNIX_1D32BE(JNIEnv *env, jclass cls) { return H5T_UNIX_D32BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1UNIX_1D32LE(JNIEnv *env, jclass cls) { return H5T_UNIX_D32LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1UNIX_1D64BE(JNIEnv *env, jclass cls) { return H5T_UNIX_D64BE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1UNIX_1D64LE(JNIEnv *env, jclass cls) { return H5T_UNIX_D64LE; }
-JNIEXPORT jlong JNICALL
+JNIEXPORT jlong JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1VARIABLE(JNIEnv *env, jclass cls) { return (int)H5T_VARIABLE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5T_1VLEN(JNIEnv *env, jclass cls) { return H5T_VLEN; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1CB_1CONT(JNIEnv *env, jclass cls) { return H5Z_CB_CONT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1CB_1ERROR(JNIEnv *env, jclass cls) { return H5Z_CB_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1CB_1FAIL(JNIEnv *env, jclass cls) { return H5Z_CB_FAIL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1CB_1NO(JNIEnv *env, jclass cls) { return H5Z_CB_NO; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1DISABLE_1EDC(JNIEnv *env, jclass cls) { return H5Z_DISABLE_EDC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1ENABLE_1EDC(JNIEnv *env, jclass cls) { return H5Z_ENABLE_EDC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1ERROR_1EDC(JNIEnv *env, jclass cls) { return H5Z_ERROR_EDC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1CONFIG_1DECODE_1ENABLED(JNIEnv *env, jclass cls) { return H5Z_FILTER_CONFIG_DECODE_ENABLED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1CONFIG_1ENCODE_1ENABLED(JNIEnv *env, jclass cls) { return H5Z_FILTER_CONFIG_ENCODE_ENABLED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1DEFLATE(JNIEnv *env, jclass cls) { return H5Z_FILTER_DEFLATE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1ERROR(JNIEnv *env, jclass cls) { return H5Z_FILTER_ERROR; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1FLETCHER32(JNIEnv *env, jclass cls) { return H5Z_FILTER_FLETCHER32; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1MAX(JNIEnv *env, jclass cls) { return H5Z_FILTER_MAX; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1NBIT(JNIEnv *env, jclass cls) {return H5Z_FILTER_NBIT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1NONE(JNIEnv *env, jclass cls) { return H5Z_FILTER_NONE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1RESERVED(JNIEnv *env, jclass cls) { return H5Z_FILTER_RESERVED; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1SCALEOFFSET(JNIEnv *env, jclass cls){ return H5Z_FILTER_SCALEOFFSET; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1SHUFFLE(JNIEnv *env, jclass cls) { return H5Z_FILTER_SHUFFLE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1SZIP(JNIEnv *env, jclass cls) { return H5Z_FILTER_SZIP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FLAG_1DEFMASK(JNIEnv *env, jclass cls) { return H5Z_FLAG_DEFMASK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FLAG_1INVMASK(JNIEnv *env, jclass cls) { return H5Z_FLAG_INVMASK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FLAG_1MANDATORY(JNIEnv *env, jclass cls) { return H5Z_FLAG_MANDATORY; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FLAG_1OPTIONAL(JNIEnv *env, jclass cls) { return H5Z_FLAG_OPTIONAL; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FLAG_1REVERSE(JNIEnv *env, jclass cls) { return H5Z_FLAG_REVERSE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FLAG_1SKIP_1EDC(JNIEnv *env, jclass cls) { return H5Z_FLAG_SKIP_EDC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1MAX_1NFILTERS(JNIEnv *env, jclass cls) { return H5Z_MAX_NFILTERS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1NO_1EDC(JNIEnv *env, jclass cls) { return H5Z_NO_EDC; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SO_1INT_1MINBITS_1DEFAULT(JNIEnv *env, jclass cls) { return H5Z_SO_INT_MINBITS_DEFAULT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SO_1FLOAT_1DSCALE(JNIEnv *env, jclass cls){return H5Z_SO_FLOAT_DSCALE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SO_1FLOAT_1ESCALE(JNIEnv *env, jclass cls){return H5Z_SO_FLOAT_ESCALE; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SO_1INT(JNIEnv *env, jclass cls){return H5Z_SO_INT; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SHUFFLE_1USER_1NPARMS(JNIEnv *env, jclass cls) { return H5Z_SHUFFLE_USER_NPARMS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SHUFFLE_1TOTAL_1NPARMS(JNIEnv *env, jclass cls) { return H5Z_SHUFFLE_TOTAL_NPARMS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SZIP_1USER_1NPARMS(JNIEnv *env, jclass cls) { return H5Z_SZIP_USER_NPARMS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SZIP_1TOTAL_1NPARMS(JNIEnv *env, jclass cls) { return H5Z_SZIP_TOTAL_NPARMS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SZIP_1PARM_1MASK(JNIEnv *env, jclass cls) { return H5Z_SZIP_PARM_MASK; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SZIP_1PARM_1PPB(JNIEnv *env, jclass cls) { return H5Z_SZIP_PARM_PPB; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SZIP_1PARM_1BPP(JNIEnv *env, jclass cls) { return H5Z_SZIP_PARM_BPP; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SZIP_1PARM_1PPS(JNIEnv *env, jclass cls) { return H5Z_SZIP_PARM_PPS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1NBIT_1USER_1NPARMS(JNIEnv *env, jclass cls) { return H5Z_NBIT_USER_NPARMS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1SCALEOFFSET_1USER_1NPARMS(JNIEnv *env, jclass cls) { return H5Z_SCALEOFFSET_USER_NPARMS; }
-JNIEXPORT jint JNICALL
+JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_HDF5Constants_H5Z_1FILTER_1ALL(JNIEnv *env, jclass cls) { return H5Z_FILTER_ALL; }
#pragma GCC diagnostic pop
diff --git a/java/src/jni/h5Imp.c b/java/src/jni/h5Imp.c
index 7a24b6f..2eeb075 100644
--- a/java/src/jni/h5Imp.c
+++ b/java/src/jni/h5Imp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5Imp.h b/java/src/jni/h5Imp.h
index 79ff3e4..756b8bb 100644
--- a/java/src/jni/h5Imp.h
+++ b/java/src/jni/h5Imp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/h5aImp.c b/java/src/jni/h5aImp.c
index 5af8aae..26ec4fc 100644
--- a/java/src/jni/h5aImp.c
+++ b/java/src/jni/h5aImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5aImp.h b/java/src/jni/h5aImp.h
index 5362ab7..62769fd 100644
--- a/java/src/jni/h5aImp.h
+++ b/java/src/jni/h5aImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/h5dImp.c b/java/src/jni/h5dImp.c
index ed1db41..66efed0 100644
--- a/java/src/jni/h5dImp.c
+++ b/java/src/jni/h5dImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
@@ -55,6 +53,7 @@ extern jobject visit_callback;
/* Local Prototypes */
/********************/
+static herr_t H5DreadVL_asstr (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
static herr_t H5DreadVL_str (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
static herr_t H5DreadVL_array (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
static herr_t H5DwriteVL_str (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf);
@@ -994,6 +993,96 @@ Java_hdf_hdf5lib_H5_H5Dwrite_1double
/*
* Class: hdf_hdf5lib_H5
+ * Method: H5DreadVL
+ * Signature: (JJJJJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5DreadVL
+ (JNIEnv *env, jclass clss, jlong dataset_id, jlong mem_type_id, jlong mem_space_id,
+ jlong file_space_id, jlong xfer_plist_id, jobjectArray buf)
+{
+ herr_t status = -1;
+ htri_t isVlenStr=0;
+
+ if (buf == NULL) {
+ h5nullArgument(env, "H5DreadVL: buf is NULL");
+ } /* end if */
+ else {
+ isVlenStr = H5Tdetect_class((hid_t)mem_type_id, H5T_STRING);
+
+ if (isVlenStr)
+ h5badArgument(env, "H5DreadVL: type is not variable length non-string");
+ else
+ status = H5DreadVL_asstr(env, (hid_t)dataset_id, (hid_t)mem_type_id,
+ (hid_t)mem_space_id, (hid_t)file_space_id,
+ (hid_t)xfer_plist_id, buf);
+ } /* end else */
+
+ return (jint)status;
+} /* end Java_hdf_hdf5lib_H5_H5Dread_1VLStrings */
+
+herr_t
+H5DreadVL_asstr
+ (JNIEnv *env, hid_t did, hid_t tid, hid_t mem_sid, hid_t file_sid, hid_t xfer_plist_id, jobjectArray buf)
+{
+ jint i;
+ jint n;
+ jstring jstr;
+ h5str_t h5str;
+ hvl_t *rdata;
+ size_t size;
+ size_t max_len = 0;
+ herr_t status = -1;
+
+ n = ENVPTR->GetArrayLength(ENVPAR buf);
+ rdata = (hvl_t*)HDcalloc((size_t)n, sizeof(hvl_t));
+ if (rdata == NULL) {
+ h5JNIFatalError(env, "H5DreadVL_notstr: failed to allocate buff for read");
+ } /* end if */
+ else {
+ status = H5Dread(did, tid, mem_sid, file_sid, xfer_plist_id, rdata);
+
+ if (status < 0) {
+ H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
+ HDfree(rdata);
+ h5JNIFatalError(env, "H5DreadVL_notstr: failed to read data");
+ } /* end if */
+ else {
+ max_len = 1;
+ for (i=0; i < n; i++) {
+ if ((rdata + i)->len > max_len)
+ max_len = (rdata + i)->len;
+ }
+
+ size = H5Tget_size(tid) * max_len;
+ HDmemset(&h5str, 0, sizeof(h5str_t));
+ h5str_new(&h5str, 4 * size);
+
+ if (h5str.s == NULL) {
+ H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
+ HDfree(rdata);
+ h5JNIFatalError(env, "H5DreadVL_notstr: failed to allocate buf");
+ } /* end if */
+ else {
+ for (i=0; i < n; i++) {
+ h5str.s[0] = '\0';
+ h5str_sprintf(&h5str, did, tid, rdata+i, 0);
+ jstr = ENVPTR->NewStringUTF(ENVPAR h5str.s);
+ ENVPTR->SetObjectArrayElement(ENVPAR buf, i, jstr);
+ } /* end for */
+ h5str_free(&h5str);
+
+ H5Dvlen_reclaim(tid, mem_sid, xfer_plist_id, rdata);
+ HDfree(rdata);
+ } /* end else */
+ } /* end else */
+ } /* end else */
+
+ return status;
+}
+
+/*
+ * Class: hdf_hdf5lib_H5
* Method: H5Dread_string
* Signature: (JJJJJ[Ljava/lang/String;)I
*/
diff --git a/java/src/jni/h5dImp.h b/java/src/jni/h5dImp.h
index 2a91334..3cf24fe 100644
--- a/java/src/jni/h5dImp.h
+++ b/java/src/jni/h5dImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
@@ -206,6 +204,15 @@ Java_hdf_hdf5lib_H5_H5Dwrite_1double
/*
* Class: hdf_hdf5lib_H5
+ * Method: H5DreadVL
+ * Signature: (JJJJJ[Ljava/lang/String;)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5DreadVL
+(JNIEnv*, jclass, jlong, jlong, jlong, jlong, jlong, jobjectArray);
+
+/*
+ * Class: hdf_hdf5lib_H5
* Method: H5Dread_string
* Signature: (JJJJJ[Ljava/lang/String;)I
*/
@@ -353,7 +360,7 @@ Java_hdf_hdf5lib_H5_H5Diterate
* Method: H5Dflush
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Dflush
(JNIEnv*, jclass, jlong);
@@ -362,7 +369,7 @@ Java_hdf_hdf5lib_H5_H5Dflush
* Method: H5Drefresh
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Drefresh
(JNIEnv*, jclass, jlong);
diff --git a/java/src/jni/h5eImp.c b/java/src/jni/h5eImp.c
index 6e05515..24ddcbc 100644
--- a/java/src/jni/h5eImp.c
+++ b/java/src/jni/h5eImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5eImp.h b/java/src/jni/h5eImp.h
index 88bbdc1..051b2ed 100644
--- a/java/src/jni/h5eImp.h
+++ b/java/src/jni/h5eImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/h5fImp.c b/java/src/jni/h5fImp.c
index 058ba26..8cf5252 100644
--- a/java/src/jni/h5fImp.c
+++ b/java/src/jni/h5fImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5fImp.h b/java/src/jni/h5fImp.h
index 7aff835..fcfdedf 100644
--- a/java/src/jni/h5fImp.h
+++ b/java/src/jni/h5fImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
@@ -217,7 +215,7 @@ Java_hdf_hdf5lib_H5_H5Fclear_1elink_1file_1cache
* Method: H5Fstart_swmr_write
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Fstart_1swmr_1write
(JNIEnv *, jclass, jlong);
@@ -226,7 +224,7 @@ Java_hdf_hdf5lib_H5_H5Fstart_1swmr_1write
* Method: H5Fstart_mdc_logging
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Fstart_1mdc_1logging
(JNIEnv *, jclass, jlong);
@@ -235,7 +233,7 @@ Java_hdf_hdf5lib_H5_H5Fstart_1mdc_1logging
* Method: H5Fstop_mdc_logging
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Fstop_1mdc_1logging
(JNIEnv *, jclass, jlong);
@@ -244,7 +242,7 @@ Java_hdf_hdf5lib_H5_H5Fstop_1mdc_1logging
* Method: H5Fget_mdc_logging_status
* Signature: (J[Z)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Fget_1mdc_1logging_1status
(JNIEnv *, jclass, jlong, jbooleanArray);
diff --git a/java/src/jni/h5gImp.c b/java/src/jni/h5gImp.c
index c40ed64..41ec382 100644
--- a/java/src/jni/h5gImp.c
+++ b/java/src/jni/h5gImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5gImp.h b/java/src/jni/h5gImp.h
index 3113689..a411658 100644
--- a/java/src/jni/h5gImp.h
+++ b/java/src/jni/h5gImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
@@ -102,7 +100,7 @@ Java_hdf_hdf5lib_H5_H5Gget_1info_1by_1idx
* Method: H5Gflush
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Gflush
(JNIEnv*, jclass, jlong);
@@ -111,7 +109,7 @@ Java_hdf_hdf5lib_H5_H5Gflush
* Method: H5Grefresh
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Grefresh
(JNIEnv*, jclass, jlong);
diff --git a/java/src/jni/h5iImp.c b/java/src/jni/h5iImp.c
index 71e1b71..47574ca 100644
--- a/java/src/jni/h5iImp.c
+++ b/java/src/jni/h5iImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5iImp.h b/java/src/jni/h5iImp.h
index 2bf839bc..2c854e6 100644
--- a/java/src/jni/h5iImp.h
+++ b/java/src/jni/h5iImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/h5jni.h b/java/src/jni/h5jni.h
index 9414d31..2970e14 100644
--- a/java/src/jni/h5jni.h
+++ b/java/src/jni/h5jni.h
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5lImp.c b/java/src/jni/h5lImp.c
index 473b1c7..bf2d7b1 100644
--- a/java/src/jni/h5lImp.c
+++ b/java/src/jni/h5lImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5lImp.h b/java/src/jni/h5lImp.h
index 6b2f3e1..3a7d8ea 100644
--- a/java/src/jni/h5lImp.h
+++ b/java/src/jni/h5lImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/h5oImp.c b/java/src/jni/h5oImp.c
index 24f6988..e8abead 100644
--- a/java/src/jni/h5oImp.c
+++ b/java/src/jni/h5oImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5oImp.h b/java/src/jni/h5oImp.h
index 293dc2e..224f298 100644
--- a/java/src/jni/h5oImp.h
+++ b/java/src/jni/h5oImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
@@ -191,7 +189,7 @@ Java_hdf_hdf5lib_H5__1H5Oopen_1by_1idx
* Method: H5Oflush
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Oflush
(JNIEnv*, jclass, jlong);
@@ -200,7 +198,7 @@ Java_hdf_hdf5lib_H5_H5Oflush
* Method: H5Orefresh
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Orefresh
(JNIEnv*, jclass, jlong);
diff --git a/java/src/jni/h5pImp.c b/java/src/jni/h5pImp.c
index 1368139..df8b3c9 100644
--- a/java/src/jni/h5pImp.c
+++ b/java/src/jni/h5pImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
@@ -4883,61 +4881,130 @@ Java_hdf_hdf5lib_H5_H5Pset_1virtual_1printf_1gap
/*
* Class: hdf_hdf5lib_H5
- * Method: H5Pget_file_space
- * Signature: (J[I[J)V
+ * Method: H5Pget_file_space_strategy
+ * Signature: (J[Z[J)I
*/
-JNIEXPORT void JNICALL
-Java_hdf_hdf5lib_H5_H5Pget_1file_1space
- (JNIEnv *env, jclass clss, jlong fcpl_id, jintArray strategy, jlongArray threshold)
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1strategy
+ (JNIEnv *env, jclass clss, jlong fcpl_id, jbooleanArray persist, jlongArray threshold)
{
herr_t status = -1;
- jint *thestrategyArray = NULL;
+ H5F_fspace_strategy_t thestrategy = H5F_FSPACE_STRATEGY_FSM_AGGR; /* Library default */
jlong *thethresholdArray = NULL;
+ jboolean *thepersistArray = NULL;
jboolean isCopy;
- if (strategy) {
- thestrategyArray = (jint*)ENVPTR->GetIntArrayElements(ENVPAR strategy, &isCopy);
- if (thestrategyArray == NULL) {
- h5JNIFatalError(env, "H5Pget_file_space: strategy not pinned");
- return;
+ if (persist) {
+ thepersistArray = (jboolean*)ENVPTR->GetBooleanArrayElements(ENVPAR persist, &isCopy);
+ if (thepersistArray == NULL) {
+ h5JNIFatalError(env, "H5Pget_file_space: persist not pinned");
+ return -1;
}
}
if (threshold) {
thethresholdArray = (jlong*)ENVPTR->GetLongArrayElements(ENVPAR threshold, &isCopy);
if (thethresholdArray == NULL) {
- if (strategy) ENVPTR->ReleaseIntArrayElements(ENVPAR strategy, thestrategyArray, JNI_ABORT);
+ if (persist) ENVPTR->ReleaseBooleanArrayElements(ENVPAR persist, thepersistArray, JNI_ABORT);
h5JNIFatalError(env, "H5Pget_file_space: threshold not pinned");
- return;
+ return -1;
} /* end if */
} /* end if */
- status = H5Pget_file_space((hid_t)fcpl_id, (H5F_file_space_type_t*)thestrategyArray, (hsize_t*)thethresholdArray);
+ status = H5Pget_file_space_strategy((hid_t)fcpl_id, &thestrategy, (hbool_t*)thepersistArray, (hsize_t*)thethresholdArray);
if (status < 0) {
- if (strategy) ENVPTR->ReleaseIntArrayElements(ENVPAR strategy, thestrategyArray, JNI_ABORT);
+ if (persist) ENVPTR->ReleaseBooleanArrayElements(ENVPAR persist, thepersistArray, JNI_ABORT);
if (threshold) ENVPTR->ReleaseLongArrayElements(ENVPAR threshold, thethresholdArray, JNI_ABORT);
h5libraryError(env);
} /* end if */
else {
- if (strategy) ENVPTR->ReleaseIntArrayElements(ENVPAR strategy, thestrategyArray, 0);
+ if (persist) ENVPTR->ReleaseBooleanArrayElements(ENVPAR persist, thepersistArray, 0);
if (threshold) ENVPTR->ReleaseLongArrayElements(ENVPAR threshold, thethresholdArray, 0);
} /* end else */
-} /* end Java_hdf_hdf5lib_H5_H5Pget_1file_1space */
+ return (jint)thestrategy;
+} /* end Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1strategy */
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5Pget_file_space_strategy_persist
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL
+Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1strategy_1persist
+ (JNIEnv *env, jclass clss, jlong fcpl_id)
+{
+ herr_t status = -1;
+ hbool_t thepersist = FALSE;
+
+ status = H5Pget_file_space_strategy((hid_t)fcpl_id, NULL, &thepersist, NULL);
+
+ if (status < 0)
+ h5libraryError(env);
+
+ return (jboolean)thepersist;
+} /* end Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1strategy_1persist */
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5Pget_file_space_strategy_threshold
+ * Signature: (J)J
+ */
+JNIEXPORT jlong JNICALL
+Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1strategy_1threshold
+ (JNIEnv *env, jclass clss, jlong fcpl_id)
+{
+ herr_t status = -1;
+ hsize_t thethreshold;
+
+ status = H5Pget_file_space_strategy((hid_t)fcpl_id, NULL, NULL, &thethreshold);
+
+ if (status < 0)
+ h5libraryError(env);
+
+ return (jlong)thethreshold;
+} /* end Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1threshold */
/*
* Class: hdf_hdf5lib_H5
- * Method: H5Pset_file_space
- * Signature: (JIJ)V
+ * Method: H5Pset_file_space_strategy
+ * Signature: (JIZJ)V
*/
JNIEXPORT void JNICALL
-Java_hdf_hdf5lib_H5_H5Pset_1file_1space
- (JNIEnv *env, jclass clss, jlong fcpl_id, jint strategy, jlong threshold)
+Java_hdf_hdf5lib_H5_H5Pset_1file_1space_1strategy
+ (JNIEnv *env, jclass clss, jlong fcpl_id, jint strategy, jboolean persist, jlong threshold)
{
- if (H5Pset_file_space((hid_t)fcpl_id, (H5F_file_space_type_t)strategy, (hsize_t)threshold) < 0)
+ if (H5Pset_file_space_strategy((hid_t)fcpl_id, (H5F_fspace_strategy_t)strategy, (hbool_t)persist, (hsize_t)threshold) < 0)
h5libraryError(env);
-} /* end Java_hdf_hdf5lib_H5_H5Pset_1file_1space */
+} /* end Java_hdf_hdf5lib_H5_H5Pset_file_space_strategy */
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5Pset_file_space_page_size
+ * Signature: (JJ)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5Pset_1file_1space_1page_1size
+ (JNIEnv *env, jclass clss, jlong fcpl_id, jlong fsp_size)
+{
+ if (H5Pset_file_space_page_size((hid_t)fcpl_id, (hsize_t)fsp_size) < 0)
+ h5libraryError(env);
+}
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5Pget_file_space_page_size
+ * Signature: (J)J
+ */
+JNIEXPORT jlong JNICALL
+Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1page_1size
+ (JNIEnv *env, jclass clss, jlong fcpl_id)
+{
+ hsize_t fsp_size = 0;
+ if (H5Pget_file_space_page_size((hid_t)fcpl_id, &fsp_size) < 0)
+ h5libraryError(env);
+ return (jlong)fsp_size;
+}
static herr_t
H5P_cls_create_cb
diff --git a/java/src/jni/h5pImp.h b/java/src/jni/h5pImp.h
index 927501e..d413f69 100644
--- a/java/src/jni/h5pImp.h
+++ b/java/src/jni/h5pImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
@@ -1385,22 +1383,57 @@ Java_hdf_hdf5lib_H5_H5Pset_1virtual_1printf_1gap
/*
* Class: hdf_hdf5lib_H5
- * Method: H5Pget_file_space
- * Signature: (J[I[J)V
+ * Method: H5Pget_file_space_strategy
+ * Signature: (J[Z[J)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1strategy
+(JNIEnv *, jclass, jlong, jbooleanArray, jlongArray);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5Pget_file_space_strategy_persist
+ * Signature: (J)Z
+ */
+JNIEXPORT jboolean JNICALL
+Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1strategy_1persist
+(JNIEnv *, jclass, jlong);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5Pget_file_space_strategy_threshold
+ * Signature: (J)J
+ */
+JNIEXPORT jlong JNICALL
+Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1strategy_1threshold
+(JNIEnv *, jclass, jlong);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5Pset_file_space_strategy
+ * Signature: (JIZJ)V
*/
JNIEXPORT void JNICALL
-Java_hdf_hdf5lib_H5_H5Pget_1file_1space
-(JNIEnv *, jclass, jlong, jintArray, jlongArray);
+Java_hdf_hdf5lib_H5_H5Pset_1file_1space_1strategy
+(JNIEnv *, jclass, jlong, jint, jboolean, jlong);
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5Pget_file_space_page_size
+ * Signature: (J)J
+ */
+JNIEXPORT jlong JNICALL
+Java_hdf_hdf5lib_H5_H5Pget_1file_1space_1page_1size
+(JNIEnv *, jclass, jlong);
/*
* Class: hdf_hdf5lib_H5
- * Method: H5Pset_file_space
- * Signature: (JIJ)V
+ * Method: H5Pset_file_space_page_size
+ * Signature: (JJ)V
*/
JNIEXPORT void JNICALL
-Java_hdf_hdf5lib_H5_H5Pset_1file_1space
-(JNIEnv *, jclass, jlong, jint, jlong);
+Java_hdf_hdf5lib_H5_H5Pset_1file_1space_1page_1size
+(JNIEnv *, jclass, jlong, jlong);
/*
* Class: hdf_hdf5lib_H5
diff --git a/java/src/jni/h5plImp.c b/java/src/jni/h5plImp.c
index 59de3cf..f064634 100644
--- a/java/src/jni/h5plImp.c
+++ b/java/src/jni/h5plImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
@@ -61,6 +59,165 @@ Java_hdf_hdf5lib_H5_H5PLget_1loading_1state
return (jint)plugin_type;
} /* end Java_hdf_hdf5lib_H5_H5PLget_1loading_1state */
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLappend
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLappend
+ (JNIEnv *env, jclass clss, jobjectArray plugin_path)
+{
+ char *aName;
+ herr_t retVal = -1;
+
+ PIN_JAVA_STRING(plugin_path, aName);
+ if (aName != NULL) {
+ retVal = H5PLappend(aName);
+
+ UNPIN_JAVA_STRING(plugin_path, aName);
+
+ if (retVal < 0)
+ h5libraryError(env);
+ }
+} /* end Java_hdf_hdf5lib_H5_H5PLappend */
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLprepend
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLprepend
+ (JNIEnv *env, jclass clss, jobjectArray plugin_path)
+{
+ char *aName;
+ herr_t retVal = -1;
+
+ PIN_JAVA_STRING(plugin_path, aName);
+ if (aName != NULL) {
+ retVal = H5PLprepend(aName);
+
+ UNPIN_JAVA_STRING(plugin_path, aName);
+
+ if (retVal < 0)
+ h5libraryError(env);
+ }
+} /* end Java_hdf_hdf5lib_H5_H5PLprepend */
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLreplace
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLreplace
+ (JNIEnv *env, jclass clss, jobjectArray plugin_path, jint index)
+{
+ char *aName;
+ herr_t retVal = -1;
+
+ PIN_JAVA_STRING(plugin_path, aName);
+ if (aName != NULL) {
+ retVal = H5PLreplace(aName, index);
+
+ UNPIN_JAVA_STRING(plugin_path, aName);
+
+ if (retVal < 0)
+ h5libraryError(env);
+ }
+} /* end Java_hdf_hdf5lib_H5_H5PLreplace */
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLinsert
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLinsert
+ (JNIEnv *env, jclass clss, jobjectArray plugin_path, jint index)
+{
+ char *aName;
+ herr_t retVal = -1;
+
+ PIN_JAVA_STRING(plugin_path, aName);
+ if (aName != NULL) {
+ retVal = H5PLinsert(aName, index);
+
+ UNPIN_JAVA_STRING(plugin_path, aName);
+
+ if (retVal < 0)
+ h5libraryError(env);
+ }
+} /* end Java_hdf_hdf5lib_H5_H5PLinsert */
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLremove
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLremove
+ (JNIEnv *env, jclass clss, jint index)
+{
+ if (H5PLremove(index) < 0)
+ h5libraryError(env);
+} /* end Java_hdf_hdf5lib_H5_H5PLremove */
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLget
+ * Signature: (I)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL
+Java_hdf_hdf5lib_H5_H5PLget
+ (JNIEnv *env, jclass clss, jint index)
+{
+ char *aName;
+ jstring str = NULL;
+ ssize_t buf_size;
+
+ /* get the length of the name */
+ buf_size = H5PLget(index, NULL, 0);
+
+ if (buf_size <= 0) {
+ h5badArgument(env, "H5PLget: buf_size <= 0");
+ } /* end if */
+ else {
+ buf_size++; /* add extra space for the null terminator */
+ aName = (char*)HDmalloc(sizeof(char) * (size_t)buf_size);
+ if (aName == NULL) {
+ h5outOfMemory(env, "H5PLget: malloc failed");
+ } /* end if */
+ else {
+ buf_size = H5PLget(index, aName, (size_t)buf_size);
+ if (buf_size < 0) {
+ h5libraryError(env);
+ } /* end if */
+ else {
+ str = ENVPTR->NewStringUTF(ENVPAR aName);
+ }
+ HDfree(aName);
+ }
+ }
+ return str;
+} /* end Java_hdf_hdf5lib_H5_H5PLget */
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLsize
+ * Signature: (V)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5PLsize
+ (JNIEnv *env, jclass clss)
+{
+ unsigned int listsize = 0;
+ if (H5PLsize(&listsize) < 0) {
+ h5libraryError(env);
+ }
+ return (jint)listsize;
+} /* end Java_hdf_hdf5lib_H5_H5PLsize */
+
#ifdef __cplusplus
} /* end extern "C" */
#endif /* __cplusplus */
diff --git a/java/src/jni/h5plImp.h b/java/src/jni/h5plImp.h
index d0507a8..5336621 100644
--- a/java/src/jni/h5plImp.h
+++ b/java/src/jni/h5plImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
@@ -41,6 +39,69 @@ JNIEXPORT jint JNICALL
Java_hdf_hdf5lib_H5_H5PLget_1loading_1state
(JNIEnv *, jclass);
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLappend
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLappend
+ (JNIEnv *, jclass, jobjectArray);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLprepend
+ * Signature: (Ljava/lang/String;)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLprepend
+ (JNIEnv *, jclass, jobjectArray);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLreplace
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLreplace
+ (JNIEnv *, jclass, jobjectArray, jint);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLinsert
+ * Signature: (Ljava/lang/String;I)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLinsert
+ (JNIEnv *, jclass, jobjectArray, jint);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLremove
+ * Signature: (I)V
+ */
+JNIEXPORT void JNICALL
+Java_hdf_hdf5lib_H5_H5PLremove
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLget
+ * Signature: (I)Ljava/lang/String;
+ */
+JNIEXPORT jstring JNICALL
+Java_hdf_hdf5lib_H5_H5PLget
+ (JNIEnv *, jclass, jint);
+
+/*
+ * Class: hdf_hdf5lib_H5
+ * Method: H5PLsize
+ * Signature: (V)I
+ */
+JNIEXPORT jint JNICALL
+Java_hdf_hdf5lib_H5_H5PLsize
+ (JNIEnv *, jclass);
+
#ifdef __cplusplus
} /* end extern "C" */
#endif /* __cplusplus */
diff --git a/java/src/jni/h5rImp.c b/java/src/jni/h5rImp.c
index 647f973..b250550 100644
--- a/java/src/jni/h5rImp.c
+++ b/java/src/jni/h5rImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
#ifdef __cplusplus
diff --git a/java/src/jni/h5rImp.h b/java/src/jni/h5rImp.h
index 1f8d206..e28329b 100644
--- a/java/src/jni/h5rImp.h
+++ b/java/src/jni/h5rImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/h5sImp.c b/java/src/jni/h5sImp.c
index d996ae5..c578ab8 100644
--- a/java/src/jni/h5sImp.c
+++ b/java/src/jni/h5sImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5sImp.h b/java/src/jni/h5sImp.h
index fd2184d..141e504 100644
--- a/java/src/jni/h5sImp.h
+++ b/java/src/jni/h5sImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/h5tImp.c b/java/src/jni/h5tImp.c
index 1467b41..7b4af56 100644
--- a/java/src/jni/h5tImp.c
+++ b/java/src/jni/h5tImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5tImp.h b/java/src/jni/h5tImp.h
index e614082..374d992 100644
--- a/java/src/jni/h5tImp.h
+++ b/java/src/jni/h5tImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
@@ -613,7 +611,7 @@ Java_hdf_hdf5lib_H5_H5Tconvert
* Method: H5Tflush
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Tflush
(JNIEnv*, jclass, jlong);
@@ -622,7 +620,7 @@ Java_hdf_hdf5lib_H5_H5Tflush
* Method: H5Trefresh
* Signature: (J)V
*/
-JNIEXPORT void JNICALL
+JNIEXPORT void JNICALL
Java_hdf_hdf5lib_H5_H5Trefresh
(JNIEnv*, jclass, jlong);
diff --git a/java/src/jni/h5util.c b/java/src/jni/h5util.c
index 0d2999a..bd9fc0f 100644
--- a/java/src/jni/h5util.c
+++ b/java/src/jni/h5util.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
@@ -352,7 +350,7 @@ h5str_sprintf
this_str = (char*)HDmalloc(4 * (nll + 1));
if (1 == nll) {
- sprintf(this_str, "0x%02x", ucptr[0]);
+ sprintf(this_str, "%#02x", ucptr[0]);
}
else {
for (i = 0; i < (int)nll; i++)
@@ -483,7 +481,7 @@ h5str_sprintf
this_str = (char*)HDmalloc(4 * (nll + 1));
if (1 == nll) {
- sprintf(this_str, "0x%02x", ucptr[0]);
+ sprintf(this_str, "%#02x", ucptr[0]);
}
else {
for (i = 0; i < (int)nll; i++)
@@ -1053,8 +1051,20 @@ h5str_get_little_endian_type
p_type=H5Tcopy(H5T_IEEE_F64LE);
break;
- case H5T_TIME:
case H5T_BITFIELD:
+ {
+ if ( size == 1)
+ p_type=H5Tcopy(H5T_STD_B8LE);
+ else if ( size == 2)
+ p_type=H5Tcopy(H5T_STD_B16LE);
+ else if ( size == 4)
+ p_type=H5Tcopy(H5T_STD_B32LE);
+ else if ( size == 8)
+ p_type=H5Tcopy(H5T_STD_B64LE);
+ }
+ break;
+
+ case H5T_TIME:
case H5T_OPAQUE:
case H5T_STRING:
case H5T_COMPOUND:
@@ -1124,8 +1134,20 @@ h5str_get_big_endian_type
p_type=H5Tcopy(H5T_IEEE_F64BE);
break;
- case H5T_TIME:
case H5T_BITFIELD:
+ {
+ if ( size == 1)
+ p_type=H5Tcopy(H5T_STD_B8BE);
+ else if ( size == 2)
+ p_type=H5Tcopy(H5T_STD_B16BE);
+ else if ( size == 4)
+ p_type=H5Tcopy(H5T_STD_B32BE);
+ else if ( size == 8)
+ p_type=H5Tcopy(H5T_STD_B64BE);
+ }
+ break;
+
+ case H5T_TIME:
case H5T_OPAQUE:
case H5T_STRING:
case H5T_COMPOUND:
diff --git a/java/src/jni/h5util.h b/java/src/jni/h5util.h
index f690e8b..434a107 100644
--- a/java/src/jni/h5util.h
+++ b/java/src/jni/h5util.h
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5zImp.c b/java/src/jni/h5zImp.c
index 2c8d8c3..a5e6cd8 100644
--- a/java/src/jni/h5zImp.c
+++ b/java/src/jni/h5zImp.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
diff --git a/java/src/jni/h5zImp.h b/java/src/jni/h5zImp.h
index bbdc506..bffe9d8 100644
--- a/java/src/jni/h5zImp.h
+++ b/java/src/jni/h5zImp.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/src/jni/nativeData.c b/java/src/jni/nativeData.c
index 8388c99..33f4953 100644
--- a/java/src/jni/nativeData.c
+++ b/java/src/jni/nativeData.c
@@ -5,17 +5,15 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* For details of the HDF libraries, see the HDF Documentation at:
- * http://hdfdfgroup.org/HDF5/doc/
+ * http://hdfgroup.org/HDF5/doc/
*
*/
/*
diff --git a/java/src/jni/nativeData.h b/java/src/jni/nativeData.h
index f8df4ce..e9e1a5d 100644
--- a/java/src/jni/nativeData.h
+++ b/java/src/jni/nativeData.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <jni.h>
diff --git a/java/test/CMakeLists.txt b/java/test/CMakeLists.txt
index b3a996b..8912b3f 100644
--- a/java/test/CMakeLists.txt
+++ b/java/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_JAVA_TEST Java)
set (CMAKE_VERBOSE_MAKEFILE 1)
@@ -70,31 +70,31 @@ set (HDF_JAVA_TEST_FILES
foreach (h5_file ${HDF_JAVA_TEST_FILES})
HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/${h5_file}" "${PROJECT_BINARY_DIR}/${h5_file}" "${HDF5_JAVA_TEST_LIB_TARGET}_files")
-endforeach (h5_file ${HDF_JAVA_TEST_FILES})
+endforeach ()
HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/h5ex_g_iterate.orig" "${PROJECT_BINARY_DIR}/h5ex_g_iterate.hdf" "${HDF5_JAVA_TEST_LIB_TARGET}_files")
add_custom_target(${HDF5_JAVA_TEST_LIB_TARGET}_files ALL COMMENT "Copying files needed by ${HDF5_JAVA_TEST_LIB_TARGET} tests" DEPENDS ${${HDF5_JAVA_TEST_LIB_TARGET}_files_list})
if (WIN32)
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ";")
-else (WIN32)
+else ()
set (CMAKE_JAVA_INCLUDE_FLAG_SEP ":")
-endif (WIN32)
+endif ()
set (CMAKE_JAVA_CLASSPATH ".")
foreach (CMAKE_INCLUDE_PATH ${CMAKE_JAVA_INCLUDE_PATH})
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${CMAKE_INCLUDE_PATH}")
-endforeach (CMAKE_INCLUDE_PATH)
+endforeach ()
set (CMAKE_JAVA_CLASSPATH "${CMAKE_JAVA_CLASSPATH}${CMAKE_JAVA_INCLUDE_FLAG_SEP}${${HDF5_JAVA_TEST_LIB_TARGET}_JAR_FILE}")
set (testfilter "OK (598 tests)")
if (CMAKE_BUILD_TYPE MATCHES Debug)
if (WIN32)
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_D;")
- else()
+ else ()
set (CMD_ARGS "-Dhdf.hdf5lib.H5.loadLibraryName=hdf5_java_debug;")
- endif()
-endif(CMAKE_BUILD_TYPE MATCHES Debug)
+ endif ()
+endif ()
add_test (
NAME JUnit-interface-clearall-objects
diff --git a/java/test/JUnit-interface.txt b/java/test/JUnit-interface.txt
index 7fd80f8..ab2f3b1 100644
--- a/java/test/JUnit-interface.txt
+++ b/java/test/JUnit-interface.txt
@@ -416,6 +416,7 @@ JUnit version 4.11
.testH5P_layout
.testH5Pget_link_creation_order
.testH5Pset_shared_mesg_nindexes_InvalidHIGHnindexes
+.testH5P_file_space_page_size
.testH5Pget_shared_mesg_index_Invalid_indexnum
.testH5Pset_data_transform_NullExpression
.testH5Pset_elink_prefix_null
@@ -438,9 +439,9 @@ JUnit version 4.11
.testH5Pset_shared_mesg_index
.testH5Pset_copy_object
.testH5Pset_link_creation_order_trackedPLUSindexed
+.testH5P_file_space_strategy
.testH5Pset_copy_object_invalidobject
.testH5Pset_est_link_info_InvalidValues
-.testH5P_file_space
.testH5Pset_local_heap_size_hint
.testH5Pget_est_link_info
.testH5Pset_scaleoffset
@@ -530,6 +531,7 @@ JUnit version 4.11
.testH5Pvirtual_storage
.testH5Pget_selection_source_dataset
.testH5Pget_source_filename
+.testH5Pset_get_virtual_printf_gap
.testH5Pget_virtual_count
.testH5Pset_get_virtual_view
.testH5Pget_mapping_parameters
@@ -631,13 +633,14 @@ JUnit version 4.11
.testH5Ocomment_clear
.testH5Ocopy_cur_not_exists
.TestH5PLplugins
+.TestH5PLpaths
.testH5Zfilter_avail
.testH5Zunregister_predefined
.testH5Zget_filter_info
Time: XXXX
-OK (635 tests)
+OK (638 tests)
HDF5-DIAG: Error detected in HDF5 (version (number)) thread (IDs):
#000: (file name) line (number) in H5Fopen(): can't set access and transfer property lists
diff --git a/java/test/Makefile.am b/java/test/Makefile.am
index 92ac12d..b7409cf 100644
--- a/java/test/Makefile.am
+++ b/java/test/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/java/test/TestAll.java b/java/test/TestAll.java
index a4b44c3..de5d333 100644
--- a/java/test/TestAll.java
+++ b/java/test/TestAll.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5.java b/java/test/TestH5.java
index 1a78bea..413e58d 100644
--- a/java/test/TestH5.java
+++ b/java/test/TestH5.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
@@ -164,7 +162,7 @@ public class TestH5 {
*/
@Test
public void testH5get_libversion() {
- int libversion[] = { 1, 9, 0 };
+ int libversion[] = { 1, 11, 0 };
try {
H5.H5get_libversion(libversion);
@@ -186,7 +184,7 @@ public class TestH5 {
*/
@Test
public void testH5check_version() {
- int majnum = 1, minnum = 9, relnum = 0;
+ int majnum = 1, minnum = 11, relnum = 0;
try {
H5.H5check_version(majnum, minnum, relnum);
diff --git a/java/test/TestH5A.java b/java/test/TestH5A.java
index fb7b31a..5e1e3f6 100644
--- a/java/test/TestH5A.java
+++ b/java/test/TestH5A.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5D.java b/java/test/TestH5D.java
index fa051db..372fdba 100644
--- a/java/test/TestH5D.java
+++ b/java/test/TestH5D.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Dparams.java b/java/test/TestH5Dparams.java
index 2cdd121..a3618f2 100644
--- a/java/test/TestH5Dparams.java
+++ b/java/test/TestH5Dparams.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Dplist.java b/java/test/TestH5Dplist.java
index eb1669f..6feaa23 100644
--- a/java/test/TestH5Dplist.java
+++ b/java/test/TestH5Dplist.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5E.java b/java/test/TestH5E.java
index 028369d..9a36365 100644
--- a/java/test/TestH5E.java
+++ b/java/test/TestH5E.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
@@ -543,8 +541,8 @@ public class TestH5E {
}
assertFalse("testH5Ewalk:H5Ewalk2 ",((H5E_walk_data)walk_data).walkdata.isEmpty());
assertTrue("testH5Ewalk:H5Ewalk2 "+((H5E_walk_data)walk_data).walkdata.size(),((H5E_walk_data)walk_data).walkdata.size()==3);
- assertTrue("testH5Ewalk:H5Ewalk2 "+((wdata)((H5E_walk_data)walk_data).walkdata.get(0)).line,((wdata)((H5E_walk_data)walk_data).walkdata.get(0)).line==3767);
- assertTrue("testH5Ewalk:H5Ewalk2 "+((wdata)((H5E_walk_data)walk_data).walkdata.get(1)).line,((wdata)((H5E_walk_data)walk_data).walkdata.get(1)).line==5506);
+ assertTrue("testH5Ewalk:H5Ewalk2 "+((wdata)((H5E_walk_data)walk_data).walkdata.get(0)).line,((wdata)((H5E_walk_data)walk_data).walkdata.get(0)).line==3765);
+ assertTrue("testH5Ewalk:H5Ewalk2 "+((wdata)((H5E_walk_data)walk_data).walkdata.get(1)).line,((wdata)((H5E_walk_data)walk_data).walkdata.get(1)).line==5504);
assertTrue("testH5Ewalk:H5Ewalk2 "+((wdata)((H5E_walk_data)walk_data).walkdata.get(1)).func_name,((wdata)((H5E_walk_data)walk_data).walkdata.get(1)).func_name.compareToIgnoreCase("H5P_verify_apl_and_dxpl")==0);
assertTrue("testH5Ewalk:H5Ewalk2 "+((wdata)((H5E_walk_data)walk_data).walkdata.get(0)).err_desc,((wdata)((H5E_walk_data)walk_data).walkdata.get(0)).err_desc.compareToIgnoreCase("not a property list")==0);
assertTrue("testH5Ewalk:H5Ewalk2 "+((wdata)((H5E_walk_data)walk_data).walkdata.get(1)).err_desc,((wdata)((H5E_walk_data)walk_data).walkdata.get(1)).err_desc.compareToIgnoreCase("not the required access property list")==0);
diff --git a/java/test/TestH5Edefault.java b/java/test/TestH5Edefault.java
index 510aa2f..835ccba 100644
--- a/java/test/TestH5Edefault.java
+++ b/java/test/TestH5Edefault.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Eregister.java b/java/test/TestH5Eregister.java
index 13e0ca2..742b47f 100644
--- a/java/test/TestH5Eregister.java
+++ b/java/test/TestH5Eregister.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5F.java b/java/test/TestH5F.java
index 3451187..ac2f70d 100644
--- a/java/test/TestH5F.java
+++ b/java/test/TestH5F.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Fbasic.java b/java/test/TestH5Fbasic.java
index c08daff..faf0bf7 100644
--- a/java/test/TestH5Fbasic.java
+++ b/java/test/TestH5Fbasic.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Fparams.java b/java/test/TestH5Fparams.java
index 2d67f3d..fffded1 100644
--- a/java/test/TestH5Fparams.java
+++ b/java/test/TestH5Fparams.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Fswmr.java b/java/test/TestH5Fswmr.java
index b65ebf2..5ca1a97 100644
--- a/java/test/TestH5Fswmr.java
+++ b/java/test/TestH5Fswmr.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5G.java b/java/test/TestH5G.java
index 32329bb..1a67990 100644
--- a/java/test/TestH5G.java
+++ b/java/test/TestH5G.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Gbasic.java b/java/test/TestH5Gbasic.java
index b049e15..6ff7d03 100644
--- a/java/test/TestH5Gbasic.java
+++ b/java/test/TestH5Gbasic.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Giterate.java b/java/test/TestH5Giterate.java
index 17f594e..06c59e7 100644
--- a/java/test/TestH5Giterate.java
+++ b/java/test/TestH5Giterate.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Lbasic.java b/java/test/TestH5Lbasic.java
index c35519e..9e832f3 100644
--- a/java/test/TestH5Lbasic.java
+++ b/java/test/TestH5Lbasic.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Lcreate.java b/java/test/TestH5Lcreate.java
index 2fbd9e3..dcb076d 100644
--- a/java/test/TestH5Lcreate.java
+++ b/java/test/TestH5Lcreate.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Lparams.java b/java/test/TestH5Lparams.java
index 9a2c204..c8d5f5d 100644
--- a/java/test/TestH5Lparams.java
+++ b/java/test/TestH5Lparams.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Obasic.java b/java/test/TestH5Obasic.java
index b564089..923d2b1 100644
--- a/java/test/TestH5Obasic.java
+++ b/java/test/TestH5Obasic.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Ocopy.java b/java/test/TestH5Ocopy.java
index 62dd886..e730b9f 100644
--- a/java/test/TestH5Ocopy.java
+++ b/java/test/TestH5Ocopy.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
@@ -239,6 +237,7 @@ public class TestH5Ocopy {
try {H5.H5Pclose(ocp_plist_id);} catch (Exception ex) {}
try {H5.H5Fclose(H5fid2);} catch (Exception ex) {}
}
+ _deleteFile("copy.h5");
}
@Test
diff --git a/java/test/TestH5Ocreate.java b/java/test/TestH5Ocreate.java
index 0edecba..559e12b 100644
--- a/java/test/TestH5Ocreate.java
+++ b/java/test/TestH5Ocreate.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Oparams.java b/java/test/TestH5Oparams.java
index 9398940..2af190f 100644
--- a/java/test/TestH5Oparams.java
+++ b/java/test/TestH5Oparams.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5P.java b/java/test/TestH5P.java
index b8a4376..5aafb5b 100644
--- a/java/test/TestH5P.java
+++ b/java/test/TestH5P.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
@@ -1198,25 +1196,40 @@ public class TestH5P {
}
@Test
- public void testH5P_file_space() {
+ public void testH5P_file_space_strategy() {
long[] threshold = {0};
- int[] strategy = {0};
+ boolean[] persist = {false};
+ int strategy = 0;
try {
- H5.H5Pget_file_space(fcpl_id, strategy, threshold);
- assertTrue("strategy: "+strategy[0], strategy[0] == HDF5Constants.H5F_FILE_SPACE_ALL);
+ strategy = H5.H5Pget_file_space_strategy(fcpl_id, persist, threshold);
+ assertTrue("strategy(default): "+strategy, strategy == HDF5Constants.H5F_FSPACE_STRATEGY_FSM_AGGR);
+ assertTrue("persist(default): "+persist[0], persist[0] == false);
+ assertTrue("theshold(default): "+threshold[0], threshold[0] == 1);
+ H5.H5Pset_file_space_strategy(fcpl_id, HDF5Constants.H5F_FSPACE_STRATEGY_PAGE, true, 1);
+ strategy = H5.H5Pget_file_space_strategy(fcpl_id, persist, threshold);
+ assertTrue("strategy: "+strategy, strategy == HDF5Constants.H5F_FSPACE_STRATEGY_PAGE);
+ assertTrue("persist: "+persist[0], persist[0] == true);
assertTrue("theshold: "+threshold[0], threshold[0] == 1);
- H5.H5Pset_file_space(fcpl_id, HDF5Constants.H5F_FILE_SPACE_ALL_PERSIST, 10);
- H5.H5Pget_file_space(fcpl_id, strategy, threshold);
- assertTrue("strategy: "+strategy[0], strategy[0] == HDF5Constants.H5F_FILE_SPACE_ALL_PERSIST);
- assertTrue("theshold: "+threshold[0], threshold[0] == 10);
- H5.H5Pset_file_space(fcpl_id, HDF5Constants.H5F_FILE_SPACE_VFD, 0);
- H5.H5Pget_file_space(fcpl_id, strategy, threshold);
- assertTrue("strategy: "+strategy[0], strategy[0] == HDF5Constants.H5F_FILE_SPACE_VFD);
- assertTrue("theshold: "+threshold[0], threshold[0] == 10);
}
catch (Throwable err) {
err.printStackTrace();
- fail("testH5P_file_space: " + err);
+ fail("testH5P_file_space_strategy: " + err);
+ }
+ }
+
+ @Test
+ public void testH5P_file_space_page_size() {
+ long page_size = 0;
+ try {
+ page_size = H5.H5Pget_file_space_page_size(fcpl_id);
+ assertTrue("page_size(default): "+page_size, page_size == 4096);
+ H5.H5Pset_file_space_page_size(fcpl_id, 512);
+ page_size = H5.H5Pget_file_space_page_size(fcpl_id);
+ assertTrue("page_size: "+page_size, page_size == 512);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("testH5P_file_space_page_size: " + err);
}
}
}
diff --git a/java/test/TestH5PData.java b/java/test/TestH5PData.java
index 7fc154d..c414d67 100644
--- a/java/test/TestH5PData.java
+++ b/java/test/TestH5PData.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5PL.java b/java/test/TestH5PL.java
index 9f1876c..aa59478 100644
--- a/java/test/TestH5PL.java
+++ b/java/test/TestH5PL.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
@@ -69,6 +67,36 @@ public class TestH5PL {
}
}
+ @Test
+ public void TestH5PLpaths() {
+ try {
+ int original_entries = H5.H5PLsize();
+ H5.H5PLappend("path_one");
+ int plugin_entries = H5.H5PLsize();
+ assertTrue("H5.H5PLsize: "+plugin_entries, (original_entries+1) == plugin_entries);
+ H5.H5PLprepend("path_two");
+ plugin_entries = H5.H5PLsize();
+ assertTrue("H5.H5PLsize: "+plugin_entries, (original_entries+2) == plugin_entries);
+ H5.H5PLinsert("path_three", original_entries);
+ plugin_entries = H5.H5PLsize();
+ assertTrue("H5.H5PLsize: "+plugin_entries, (original_entries+3) == plugin_entries);
+ String first_path = H5.H5PLget(original_entries);
+ assertTrue("First path was : "+first_path + " ",first_path.compareToIgnoreCase("path_three")==0);
+ H5.H5PLreplace("path_four", original_entries);
+ first_path = H5.H5PLget(original_entries);
+ assertTrue("First path changed to : "+first_path + " ",first_path.compareToIgnoreCase("path_four")==0);
+ H5.H5PLremove(original_entries);
+ first_path = H5.H5PLget(original_entries);
+ assertTrue("First path now : "+first_path + " ",first_path.compareToIgnoreCase("path_two")==0);
+ plugin_entries = H5.H5PLsize();
+ assertTrue("H5.H5PLsize: "+plugin_entries, (original_entries+2) == plugin_entries);
+ }
+ catch (Throwable err) {
+ err.printStackTrace();
+ fail("TestH5PLpaths " + err);
+ }
+ }
+
@Ignore
public void TestH5PLdlopen() {
long file_id = -1;
diff --git a/java/test/TestH5Pfapl.java b/java/test/TestH5Pfapl.java
index e888e20..d4a2231 100644
--- a/java/test/TestH5Pfapl.java
+++ b/java/test/TestH5Pfapl.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Plist.java b/java/test/TestH5Plist.java
index 024237a..e318cc9 100644
--- a/java/test/TestH5Plist.java
+++ b/java/test/TestH5Plist.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Pvirtual.java b/java/test/TestH5Pvirtual.java
index 9372ae1..40cfbac 100644
--- a/java/test/TestH5Pvirtual.java
+++ b/java/test/TestH5Pvirtual.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
@@ -220,6 +218,7 @@ public class TestH5Pvirtual {
try {H5.H5Dclose(H5did);} catch (Exception ex) {}
}
assertTrue("testH5Pget_virtual_count: "+num_map, num_map >= 0);
+ assertEquals(3, num_map);
}
@Test
@@ -406,7 +405,7 @@ public class TestH5Pvirtual {
}
}
- @Ignore
+ @Test
public void testH5Pset_get_virtual_printf_gap() {
long ret_val = -1;
H5did = _createDataset(H5fid, H5dsid, "VDS", H5dcplid, H5dapl_id);
@@ -415,7 +414,7 @@ public class TestH5Pvirtual {
assertTrue("H5Pget_virtual_printf_gap", ret_val >= 0);
assertEquals(0, ret_val);
H5.H5Pset_virtual_printf_gap(H5dapl_id, 2);
- ret_val = H5.H5Pget_virtual_view(H5dapl_id);
+ ret_val = H5.H5Pget_virtual_printf_gap(H5dapl_id);
assertTrue("H5Pget_virtual_printf_gap", ret_val >= 0);
assertEquals(2, ret_val);
}
diff --git a/java/test/TestH5R.java b/java/test/TestH5R.java
index 72e0bfb..5349855 100644
--- a/java/test/TestH5R.java
+++ b/java/test/TestH5R.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5S.java b/java/test/TestH5S.java
index 909ab02..985342b 100644
--- a/java/test/TestH5S.java
+++ b/java/test/TestH5S.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Sbasic.java b/java/test/TestH5Sbasic.java
index 2731a06..3007495 100644
--- a/java/test/TestH5Sbasic.java
+++ b/java/test/TestH5Sbasic.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5T.java b/java/test/TestH5T.java
index e03b97f..0c68d2e 100644
--- a/java/test/TestH5T.java
+++ b/java/test/TestH5T.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Tbasic.java b/java/test/TestH5Tbasic.java
index 950f1c7..3c2500b 100644
--- a/java/test/TestH5Tbasic.java
+++ b/java/test/TestH5Tbasic.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Tparams.java b/java/test/TestH5Tparams.java
index 4ebeea7..53d3a37 100644
--- a/java/test/TestH5Tparams.java
+++ b/java/test/TestH5Tparams.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/TestH5Z.java b/java/test/TestH5Z.java
index bdf3f1d..31adf22 100644
--- a/java/test/TestH5Z.java
+++ b/java/test/TestH5Z.java
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
package test;
diff --git a/java/test/junit.sh.in b/java/test/junit.sh.in
index a9a71cb..32b0832 100644
--- a/java/test/junit.sh.in
+++ b/java/test/junit.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
top_builddir=@top_builddir@
@@ -183,7 +181,16 @@ CLEAN_DATAFILES_AND_BLDDIR()
$RM $BLDDIR/JUnit-interface.out
$RM $BLDDIR/JUnit-interface.err
$RM $BLDDIR/JUnit-interface.ext
+ # skip rm if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=`$DIRNAME $tstfile`
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $BLDDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $RM $BLDDIR/JUnit-interface.ert
$RM $BLDDIR/JUnit-interface.txt
+ fi
}
# Print a line-line message left justified in a field of 70 characters
diff --git a/m4/aclocal_cxx.m4 b/m4/aclocal_cxx.m4
index 9d47193..29a0607 100644
--- a/m4/aclocal_cxx.m4
+++ b/m4/aclocal_cxx.m4
@@ -6,12 +6,10 @@ dnl All rights reserved.
dnl
dnl This file is part of HDF5. The full HDF5 copyright notice, including
dnl terms governing use, modification, and redistribution, is contained in
-dnl the files COPYING and Copyright.html. COPYING can be found at the root
-dnl of the source code distribution tree; Copyright.html can be found at the
-dnl root level of an installed copy of the electronic HDF5 document set and
-dnl is linked from the top-level documents page. It can also be found at
-dnl http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have
-dnl access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu.
+dnl the COPYING file, which can be found at the root of the source code
+dnl distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+dnl If you do not have access to either file, you may request a copy from
+dnl help@hdfgroup.org
dnl
dnl -------------------------------------------------------------------------
dnl -------------------------------------------------------------------------
diff --git a/m4/aclocal_fc.f90 b/m4/aclocal_fc.f90
index ddbe9c7..4c58e4e 100644
--- a/m4/aclocal_fc.f90
+++ b/m4/aclocal_fc.f90
@@ -1,3 +1,16 @@
+! COPYRIGHT
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+! Copyright by The HDF Group. *
+! All rights reserved. *
+! *
+! This file is part of HDF5. The full HDF5 copyright notice, including *
+! terms governing use, modification, and redistribution, is contained in *
+! the COPYING file, which can be found at the root of the source code *
+! distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+! If you do not have access to either file, you may request a copy from *
+! help@hdfgroup.org. *
+! * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+!
! This file contains all the configure test programs
! used by autotools and cmake. This avoids having to
! duplicate code for both cmake and autotool tests.
diff --git a/m4/aclocal_fc.m4 b/m4/aclocal_fc.m4
index 2213a7c..0bf3cb1 100644
--- a/m4/aclocal_fc.m4
+++ b/m4/aclocal_fc.m4
@@ -6,12 +6,10 @@ dnl All rights reserved.
dnl
dnl This file is part of HDF5. The full HDF5 copyright notice, including
dnl terms governing use, modification, and redistribution, is contained in
-dnl the files COPYING and Copyright.html. COPYING can be found at the root
-dnl of the source code distribution tree; Copyright.html can be found at the
-dnl root level of an installed copy of the electronic HDF5 document set and
-dnl is linked from the top-level documents page. It can also be found at
-dnl http://hdf.ncsa.uiuc.edu/HDF5/doc/Copyright.html. If you do not have
-dnl access to either file, you may request a copy from hdfhelp@ncsa.uiuc.edu.
+dnl the COPYING file, which can be found at the root of the source code
+dnl distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+dnl If you do not have access to either file, you may request a copy from
+dnl help@hdfgroup.org
dnl
dnl -------------------------------------------------------------------------
dnl -------------------------------------------------------------------------
diff --git a/release_docs/COPYING b/release_docs/COPYING
index 6903daf..6497ace 100644
--- a/release_docs/COPYING
+++ b/release_docs/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/release_docs/HISTORY-1_10.txt b/release_docs/HISTORY-1_10.txt
new file mode 100644
index 0000000..52eb273
--- /dev/null
+++ b/release_docs/HISTORY-1_10.txt
@@ -0,0 +1,2101 @@
+HDF5 History
+============
+
+This file contains development history of the HDF5 1.10 branch
+
+03. Release Information for hdf5-1.10.1
+02. Release Information for hdf5-1.10.0-patch1
+01. Release Information for hdf5-1.10.0
+
+[Search on the string '%%%%' for section breaks of each release.]
+
+%%%%1.10.1%%%%
+
+HDF5 version 1.10.1 released on 2017-04-27
+================================================================================
+
+INTRODUCTION
+
+This document describes the differences between HDF5-1.10.0-patch1 and
+HDF5 1.10.1, and contains information on the platforms tested and known
+problems in HDF5-1.10.1. For more details check the HISTORY*.txt files
+in the HDF5 source.
+
+Links to HDF5 1.10.1 source code, documentation, and additional materials can
+be found on The HDF5 web page at:
+
+ https://support.hdfgroup.org/HDF5/
+
+The HDF5 1.10.1 release can be obtained from:
+
+ https://support.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for the snapshot can be accessed directly at this location:
+
+ https://support.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.10.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "New Features
+in HDF5 Release 1.10" document:
+
+ https://support.hdfgroup.org/HDF5/docNewFeatures/index.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 10.1 (current
+release) versus Release 1.10.0
+
+ https://support.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+
+- Major New Features Introduced in HDF5 1.10.1
+- Other New Features and Enhancements
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.10.0-patch1
+- Supported Platforms
+- Tested Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+Major New Features Introduced in HDF5 1.10.1
+============================================
+
+For links to the RFCs and documentation in this section please view
+https://support.hdfgroup.org/HDF5/docNewFeatures in a web browser.
+
+________________________________________
+Metadata Cache Image
+________________________________________
+
+ HDF5 metadata is typically small, and scattered throughout the HDF5 file.
+ This can affect performance, particularly on large HPC systems. The
+ Metadata Cache Image feature can improve performance by writing the
+ metadata cache in a single block on file close, and then populating the
+ cache with the contents of this block on file open, thus avoiding the many
+ small I/O operations that would otherwise be required on file open and
+ close. See the RFC for complete details regarding this feature. Also,
+ see the Fine Tuning the Metadata Cache documentation.
+
+ At present, metadata cache images may not be generated by parallel
+ applications. Parallel applications can read files with metadata cache
+ images, but since this is a collective operation, a deadlock is possible
+ if one or more processes do not participate.
+
+________________________________________
+Metadata Cache Evict on Close
+________________________________________
+
+ The HDF5 library's metadata cache is fairly conservative about holding on
+ to HDF5 object metadata (object headers, chunk index structures, etc.),
+ which can cause the cache size to grow, resulting in memory pressure on
+ an application or system. The "evict on close" property will cause all
+ metadata for an object to be evicted from the cache as long as metadata
+ is not referenced from any other open object. See the Fine Tuning the
+ Metadata Cache documentation for information on the APIs.
+
+ At present, evict on close is disabled in parallel builds.
+
+________________________________________
+Paged Aggregation
+________________________________________
+
+ The current HDF5 file space allocation accumulates small pieces of metadata
+ and raw data in aggregator blocks which are not page aligned and vary
+ widely in sizes. The paged aggregation feature was implemented to provide
+ efficient paged access of these small pieces of metadata and raw data.
+ See the RFC for details. Also, see the File Space Management documentation.
+
+________________________________________
+Page Buffering
+________________________________________
+
+ Small and random I/O accesses on parallel file systems result in poor
+ performance for applications. Page buffering in conjunction with paged
+ aggregation can improve performance by giving an application control of
+ minimizing HDF5 I/O requests to a specific granularity and alignment.
+ See the RFC for details. Also, see the Page Buffering documentation.
+
+ At present, page buffering is disabled in parallel builds.
+
+
+
+Other New Features and Enhancements
+===================================
+
+ Library
+ -------
+ - Added a mechanism for disabling the SWMR file locking scheme.
+
+ The file locking calls used in HDF5 1.10.0 (including patch1)
+ will fail when the underlying file system does not support file
+ locking or where locks have been disabled. To disable all file
+ locking operations, an environment variable named
+ HDF5_USE_FILE_LOCKING can be set to the five-character string
+ 'FALSE'. This does not fundamentally change HDF5 library
+ operation (aside from initial file open/create, SWMR is lock-free),
+ but users will have to be more careful about opening files
+ to avoid problematic access patterns (i.e.: multiple writers)
+ that the file locking was designed to prevent.
+
+ Additionally, the error message that is emitted when file lock
+ operations set errno to ENOSYS (typical when file locking has been
+ disabled) has been updated to describe the problem and potential
+ resolution better.
+
+ (DER, 2016/10/26, HDFFV-9918)
+
+ - The return type of H5Pget_driver_info() has been changed from void *
+ to const void *.
+
+ The pointer returned by this function points to internal library
+ memory and should not be freed by the user.
+
+ (DER, 2016/11/04, HDFFV-10017)
+
+ - The direct I/O VFD has been removed from the list of VFDs that
+ support SWMR.
+
+ This configuration was never officially tested and several SWMR
+ tests fail when this VFD is set.
+
+ (DER, 2016/11/03, HDFFV-10169)
+
+ Configuration:
+ --------------
+ - The minimum version of CMake required to build HDF5 is now 3.2.2.
+
+ (ADB, 2017/01/10)
+
+ - An --enable/disable-developer-warnings option has been added to
+ configure.
+
+ This disables warnings that do not indicate poor code quality such
+ as -Winline and gcc's -Wsuggest-attribute. Developer warnings are
+ disabled by default.
+
+ (DER, 2017/01/10)
+
+ - A bin/restore.sh script was added that reverts autogen.sh processing.
+
+ (DER, 2016/11/08)
+
+ - CMake: Added NAMESPACE hdf5:: to package configuration files to allow
+ projects using installed HDF5 binaries built with CMake to link with
+ them without specifying the HDF5 library location via IMPORTED_LOCATION.
+
+ (ABD, 2016/10/17, HDFFV-10003)
+
+ - CMake: Changed the CTEST_BUILD_CONFIGURATION option to
+ CTEST_CONFIGURATION_TYPE as recommended by the CMake documentation.
+
+ (ABD, 2016/10/17, HDFFV-9971)
+
+
+ Fortran Library:
+ ----------------
+
+ - The HDF5 Fortran library can now be compiled with the NAG compiler.
+
+ (MSB, 2017/2/10, HDFFV-9973)
+
+
+ C++ Library:
+ ------------
+
+ - The following C++ API wrappers have been added to the C++ Library:
+
+ // Sets/Gets the strategy and the threshold value that the library
+ // will employ in managing file space.
+ FileCreatPropList::setFileSpaceStrategy - H5Pset_file_space_strategy
+ FileCreatPropList::getFileSpaceStrategy - H5Pget_file_space_strategy
+
+ // Sets/Gets the file space page size for paged aggregation.
+ FileCreatPropList::setFileSpacePagesize - H5Pset_file_space_page_size
+ FileCreatPropList::getFileSpacePagesize - H5Pget_file_space_page_size
+
+ // Checks if the given ID is valid.
+ IdComponent::isValid - H5Iis_valid
+
+ // Sets/Gets the number of soft or user-defined links that can be
+ // traversed before a failure occurs.
+ LinkAccPropList::setNumLinks - H5Pset_nlinks
+ LinkAccPropList::getNumLinks - H5Pget_nlinks
+
+ // Returns a copy of the creation property list of a datatype.
+ DataType::getCreatePlist - H5Tget_create_plist
+
+ // Opens/Closes an object within a group or a file, regardless of object
+ // type
+ Group::getObjId - H5Oopen
+ Group::closeObjId - H5Oclose
+
+ // Maps elements of a virtual dataset to elements of the source dataset.
+ DSetCreatPropList::setVirtual - H5Pset_virtual
+
+ // Gets general information about this file.
+ H5File::getFileInfo - H5Fget_info2
+
+ // Returns the number of members in a type.
+ IdComponent::getNumMembers - H5Inmembers
+
+ // Determines if an element type exists.
+ IdComponent::typeExists - H5Itype_exists
+
+ // Determines if an object exists.
+ H5Location::exists - H5Lexists.
+
+ // Returns the header version of an HDF5 object.
+ H5Object::objVersion - H5Oget_info for version
+
+ (BMR, 2017/03/20, HDFFV-10004, HDFFV-10139, HDFFV-10145)
+
+ - New exception: ObjHeaderIException for H5O interface.
+
+ (BMR, 2017/03/15, HDFFV-10145)
+
+ - New class LinkAccPropList for link access property list, to be used by
+ wrappers of H5Lexists.
+
+ (BMR, 2017/01/04, HDFFV-10145)
+
+ - New constructors to open datatypes in ArrayType, CompType, DataType,
+ EnumType, FloatType, IntType, StrType, and VarLenType.
+
+ (BMR, 2016/12/26, HDFFV-10056)
+
+ - New member functions:
+
+ DSetCreatPropList::setNbit() to setup N-bit compression for a dataset.
+
+ ArrayType::getArrayNDims() const
+ ArrayType::getArrayDims() const
+ both to replace the non-const versions.
+
+ (BMR, 2016/04/25, HDFFV-8623, HDFFV-9725)
+
+
+ Tools:
+ ------
+ - The following options have been added to h5clear:
+ -s: clear the status_flags field in the file's superblock
+ -m: Remove the metadata cache image from the file
+
+ (QAK, 2017/03/22, PR#361)
+
+
+ High-Level APIs:
+ ---------------
+ - Added New Fortran 2003 API for h5tbmake_table_f.
+
+ (MSB, 2017/02/10, HDFFV-8486)
+
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+
+ - Added NAG compiler
+
+
+
+Bug Fixes since HDF5-1.10.0-patch1 release
+==================================
+
+ Library
+ -------
+ - Outdated data structure was used in H5D_CHUNK_DEBUG blocks, causing
+ compilation errors when H5D_CHUNK_DEBUG was defined. This is fixed.
+
+ (BMR, 2017/04/04, HDFFV-8089)
+
+ - SWMR implementation in the HDF5 1.10.0 and 1.10.0-patch1 releases has a
+ broken metadata flush dependency that manifested itself with the following
+ error at the end of the HDF5 error stack:
+
+ H5Dint.c line 846 in H5D__swmr_setup(): dataspace chunk index must be 0
+ for SWMR access, chunkno = 1
+ major: Dataset
+ minor: Bad value
+
+ It was also reported at https://github.com/areaDetector/ADCore/issues/203
+
+ The flush dependency is fixed in this release.
+
+ - Changed the plugins dlopen option from RTLD_NOW to RTLD_LAZY
+
+ (ABD, 2016/12/12, PR#201)
+
+ - A number of issues were fixed when reading/writing from/to corrupted
+ files to ensure that the library fails gracefully in these cases:
+
+ * Writing to a corrupted file that has an object message which is
+ incorrectly marked as sharable on disk results in a buffer overflow /
+ invalid write instead of a clean error message.
+
+ * Decoding data from a corrupted file with a dataset encoded with the
+ H5Z_NBIT decoding can result in a code execution vulnerability under
+ the context of the application using the HDF5 library.
+
+ * When decoding an array datatype from a corrupted file, the HDF5 library
+ fails to return an error in production if the number of dimensions
+ decoded is greater than the maximum rank.
+
+ * When decoding an "old style" array datatype from a corrupted file, the
+ HDF5 library fails to return an error in production if the number of
+ dimensions decoded is greater than the maximum rank.
+
+ (NAF, 2016/10/06, HDFFV-9950, HDFFV-9951, HDFFV-9992, HDFFV-9993)
+
+ - Fixed an error that would occur when copying an object with an attribute
+ which is a compound datatype consisting of a variable length string.
+
+ (VC, 2016/08/24, HDFFV-7991)
+
+ - H5DOappend will no longer fail if a dataset has no append callback
+ registered.
+
+ (VC, 2016/08/14, HDFFV-9960)
+
+ - Fixed an issue where H5Pset_alignment could result in misaligned blocks
+ with some input combinations, causing an assertion failure in debug mode.
+
+ (NAF, 2016/08/11, HDFFV-9948)
+
+ - Fixed a problem where a plugin compiled into a DLL in the default plugin
+ directory could not be found by the HDF5 library at runtime on Windows
+ when the HDF5_PLUGIN_PATH environment variable was not set.
+
+ (ABD, 2016/08/01, HDFFV-9706)
+
+ - Fixed an error that would occur when calling H5Adelete on an attribute
+ which is attached to an externally linked object in the target file and
+ whose datatype is a committed datatype in the main file.
+
+ (VC, 2016/07/06, HDFFV-9940)
+
+ - (a) Throw an error instead of assertion when v1 btree level hits the 1
+ byte limit.
+ (b) Modifications to better handle error recovery when conversion by
+ h5format_convert fails.
+
+ (VC, 2016/05/29, HDFFV-9434)
+
+ - Fixed a memory leak where an array used by the library to track SWMR
+ read retries was unfreed.
+
+ The leaked memory was small (on the order of a few tens of ints) and
+ allocated per-file. The memory was allocated (and lost) only when a
+ file was opened for SWMR access.
+
+ (DER, 2016/04/27, HDFFV-9786)
+
+ - Fixed a memory leak that could occur when opening a file for the first
+ time (including creating) and the call fails.
+
+ This occurred when the file-driver-specific info was not cleaned up.
+ The amount of memory leaked varied with the file driver, but would
+ normally be less than 1 kB.
+
+ (DER, 2016/12/06, HDFFV-10168)
+
+ - Fixed a failure in collective metadata writes.
+
+ This failure only appeared when collective metadata writes
+ were enabled (via H5Pset_coll_metadata_write()).
+
+ (JRM, 2017/04/10, HDFFV-10055)
+
+
+ Parallel Library
+ ----------------
+ - Fixed a bug that could occur when allocating a chunked dataset in parallel
+ with an alignment set and an alignment threshold greater than the chunk
+ size but less than or equal to the raw data aggregator size.
+
+ (NAF, 2016/08/11, HDFFV-9969)
+
+
+ Configuration
+ -------------
+ - Configuration will check for the strtoll and strtoull functions
+ before using alternatives
+
+ (ABD, 2017/03/17, PR#340)
+
+ - CMake uses a Windows pdb directory variable if available and
+ will generate both static and shared pdb files.
+
+ (ABD, 2017/02/06, HDFFV-9875)
+
+ - CMake now builds shared versions of tools.
+
+ (ABD, 2017/02/01, HDFFV-10123)
+
+ - Makefiles and test scripts have been updated to correctly remove files
+ created when running "make check" and to avoid removing any files under
+ source control. In-source builds followed by "make clean" and "make
+ distclean" should result in the original source files.
+ (LRK, 2017/01/17, HDFFV-10099)
+
+ - The tools directory has been divided into two separate source and test
+ directories. This resolves a build dependency and, as a result,
+ 'make check' will no longer fail in the tools directory if 'make' was
+ not executed first.
+
+ (ABD, 2016/10/27, HDFFV-9719)
+
+ - CMake: Fixed a timeout error that would occasionally occur when running
+ the virtual file driver tests simultaneously due to test directory
+ and file name collisions.
+
+ (ABD, 2016/09/19, HDFFV-9431)
+
+ - CMake: Fixed a command length overflow error by converting custom
+ commands inside CMakeTest.cmake files into regular dependencies and
+ targets.
+
+ (ABD, 2016/07/12, HDFFV-9939)
+
+ - Fixed a problem preventing HDF5 to be built on 32-bit CYGWIN by
+ condensing cygwin configuration files into a single file and
+ removing outdated compiler settings.
+
+ (ABD, 2016/07/12, HDFFV-9946)
+
+
+ Fortran
+ --------
+ - Changed H5S_ALL_F from INTEGER to INTEGER(HID_T)
+
+ (MSB, 2016/10/14, HDFFV-9987)
+
+
+ Tools
+ -----
+ - h5diff now correctly ignores strpad in comparing strings.
+
+ (ABD, 2017/03/03, HDFFV-10128)
+
+ - h5repack now correctly parses the command line filter options.
+
+ (ABD, 2017/01/24, HDFFV-10046)
+
+ - h5diff now correctly returns an error when it cannot read data due
+ to an unavailable filter plugin.
+
+ (ADB 2017/01/18, HDFFV-9994 )
+
+ - Fixed an error in the compiler wrapper scripts (h5cc, h5fc, et al.)
+ in which they would erroneously drop the file argument specified via
+ the -o flag when the -o flag was specified before the -c flag on the
+ command line, resulting in a failure to compile.
+
+ (LRK, 2016/11/04, HDFFV-9938, HDFFV-9530)
+
+ - h5repack User Defined (UD) filter parameters were not parsed correctly.
+
+ The UD filter parameters were not being parsed correctly. Reworked coding
+ section to parse the correct values and verify number of parameters.
+
+ (ABD, 2016/10/19, HDFFV-9996, HDFFV-9974, HDFFV-9515, HDFFV-9039)
+
+ - h5repack allows the --enable-error-stack option on the command line.
+
+ (ADB, 2016/08/08, HDFFV-9775)
+
+
+ C++ APIs
+ --------
+ - The member function H5Location::getNumObjs() is moved to
+ class Group because the objects are in a group or a file only,
+ and H5Object::getNumAttrs to H5Location to get the number of
+ attributes at a given location.
+
+ (BMR, 2017/03/17, PR#466)
+
+ - Due to the change in the C API, the overloaded functions of
+ PropList::setProperty now need const for some arguments. They are
+ planned for deprecation and are replaced by new versions with proper
+ consts.
+
+ (BMR, 2017/03/17, PR#344)
+
+ - The high-level API Packet Table (PT) did not write data correctly when
+ the datatype is a compound type that has string type as one of the
+ members. This problem started in 1.8.15, after the fix of HDFFV-9042
+ was applied, which caused the Packet Table to use native type to access
+ the data. It should be up to the application to specify whether the
+ buffer to be read into memory is in the machine's native architecture.
+ Thus, the PT is fixed to not use native type but to make a copy of the
+ user's provided datatype during creation or the packet table's datatype
+ during opening. If an application wishes to use native type to read the
+ data, then the application will request that. However, the Packet Table
+ doesn't provide a way to specify memory datatype in this release. This
+ feature will be available in future releases.
+
+ (BMR, 2016/10/27, HDFFV-9758)
+
+ - The obsolete macros H5_NO_NAMESPACE and H5_NO_STD have been removed from
+ the HDF5 C++ API library.
+
+ (BMR, 2016/10/23, HDFFV-9532)
+
+ - The problem where a user-defined function cannot access both, attribute
+ and dataset, using only one argument is now fixed.
+
+ (BMR, 2016/10/11, HDFFV-9920)
+
+ - In-memory array information, ArrayType::rank and
+ ArrayType::dimensions, were removed. This is an implementation
+ detail and should not affect applications.
+
+ (BMR, 2016/04/25, HDFFV-9725)
+
+
+ Testing
+ -------
+ - Fixed a problem that caused tests using SWMR to occasionally fail when
+ running "make check" using parallel make.
+
+ (LRK, 2016/03/22, PR#338, PR#346, PR#358)
+
+
+Supported Platforms
+===================
+
+ Linux 2.6.32-573.18.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313
+ (Red Hat 4.4.7-4)
+ IBM XL C/C++ V13.1
+ IBM XL Fortran V15.1
+
+ Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ Version 4.9.3, Version 5.2.0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+
+ Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+ Visual Studio 2015 w/ MSMPI 8 (cmake)
+ Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3)
+ gcc and gfortran compilers (GCC 5.4.0)
+ (cmake and autotools)
+
+ Windows 10 Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+ Cygwin(CYGWIN_NT-6.1 2.8.0(0.309/5/3)
+ gcc and gfortran compilers (GCC 5.4.0)
+ (cmake and autotools)
+
+ Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (wren/quail) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X El Capitan 10.11.6 Apple clang/clang++ version 7.3 from Xcode 7.3
+ 64-bit gfortran GNU Fortran (GCC) 5.2.0
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 16.0.2
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+Solaris2.11 32-bit n y/y n y y y
+Solaris2.11 64-bit n y/n n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y y y y y
+Windows 7 Cygwin n y/n n y y y
+Windows 7 x64 Cygwin n y/n n y y y
+Windows 10 y y/y n y y y
+Windows 10 x64 y y/y n y y y
+Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y
+Mac OS X Yosemite 10.10.5 64-bit n y/y n y y y
+Mac OS X El Capitan 10.11.6 64-bit n y/y n y y y
+CentOS 7.2 Linux 2.6.32 x86_64 PGI n y/y n y y y
+CentOS 7.2 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 7.2 Linux 2.6.32 x86_64 Intel n y/y n y y y
+Linux 2.6.32-573.18.1.el6.ppc64 n y/y n y y y
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.11 32-bit y y y y
+Solaris2.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 7 x64 Cygwin n n n y
+Windows 10 y y y y
+Windows 10 x64 y y y y
+Mac OS X Mountain Lion 10.8.5 64-bit y n y y
+Mac OS X Mavericks 10.9.5 64-bit y n y y
+Mac OS X Yosemite 10.10.5 64-bit y n y y
+Mac OS X El Capitan 10.11.6 64-bit y n y y
+CentOS 7.2 Linux 2.6.32 x86_64 PGI y y y n
+CentOS 7.2 Linux 2.6.32 x86_64 GNU y y y y
+CentOS 7.2 Linux 2.6.32 x86_64 Intel y y y n
+Linux 2.6.32-573.18.1.el6.ppc64 y y y n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (mayll/platypus) Version 4.4.7 20120313
+ Version 4.8.4
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 16.10-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 (Build 20150407)
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers
+ #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ (jelly) with NAG Fortran Compiler Release 6.1(Tozai)
+ Intel(R) C (icc) and C++ (icpc) compilers
+ Version 15.0.3.187 (Build 20150407)
+ with NAG Fortran Compiler Release 6.1(Tozai)
+
+ Linux 2.6.32-573.18.1.el6.ppc64 MPICH mpich 3.1.4 compiled with
+ #1 SMP ppc64 GNU/Linux IBM XL C/C++ for Linux, V13.1
+ (ostrich) and IBM XL Fortran for Linux, V15.1
+
+ Debian 8.4 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux
+ gcc, g++ (Debian 4.9.2-10) 4.9.2
+ GNU Fortran (Debian 4.9.2-10) 4.9.2
+ (cmake and autotools)
+
+ Fedora 24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc, g++ (GCC) 6.1.1 20160621
+ (Red Hat 6.1.1-3)
+ GNU Fortran (GCC) 6.1.1 20160621
+ (Red Hat 6.1.1-3)
+ (cmake and autotools)
+
+ Ubuntu 16.04.1 4.4.0-38-generic #57-Ubuntu SMP x86_64 GNU/Linux
+ gcc, g++ (Ubuntu 5.4.0-6ubuntu1~16.04.2)
+ 5.4.0 20160609
+ GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2)
+ 5.4.0 20160609
+ (cmake and autotools)
+
+
+Known Problems
+==============
+
+ At present, metadata cache images may not be generated by parallel
+ applications. Parallel applications can read files with metadata cache
+ images, but since this is a collective operation, a deadlock is possible
+ if one or more processes do not participate.
+
+ Known problems in previous releases can be found in the HISTORY*.txt files
+ in the HDF5 source. Please report any new problems found to
+ help@hdfgroup.org.
+
+
+%%%%1.10.0-patch1%%%%
+
+
+HDF5 version 1.10.0-patch1 released on 2016-05-23
+================================================================================
+
+INTRODUCTION
+
+This document describes the differences between HDF5-1.8 series and
+HDF5 1.10.0 releases, and contains information on the platforms
+tested.
+
+Links to HDF5 1.10.0 source code can be found on The HDF Group's
+development FTP server at the following location:
+
+ https://www.hdfgroup.org/HDF5/release/obtain5110.html
+
+User documentation can be accessed directly at this location:
+
+ https://www.hdfgroup.org/HDF5/docNewFeatures/
+
+For more information, see the HDF5 home page:
+
+ https://www.hdfgroup.org/HDF5/
+
+If you have any questions or comments, please send them to the HDF
+Help Desk:
+
+ help@hdfgroup.org
+
+
+
+CONTENTS
+
+- New Features
+- Issues Addressed in this Release
+- Supported Platforms
+- Tested Configuration Features Summary
+- More Tested Platforms
+- Known Problems and Limitations
+
+
+
+New Features
+============
+This release supports the following features:
+
+ Configuration
+ -------------
+ - API Compatibility with HDF5 1.8 Flag Was Added
+
+ The 1.10 version of the HDF5 Library can be configured to operate
+ identically to the 1.8 library with the --with-default-api-version=v18
+ configure flag. This allows existing code to be compiled with the 1.10
+ library without requiring immediate changes to the application source
+ code. For addtional configuration options and other details, see
+ "API Compatibility Macros in HDF5" at
+ https://www.hdfgroup.org/HDF5/doc/RM/APICompatMacros.html.
+
+ - Autotools Configuration Has Been Extensively Reworked
+
+ The autotools configuration options have been updated to allow more
+ fine-grained control of the build options and to correct some bugs.
+ See configure --help for comprehensive information on each option.
+
+ Specific changes:
+
+ * --enable-debug and --enable-production are no longer accepted.
+ Use --enable-build-mode=(debug | production) instead. These set
+ appropriate defaults for symbols, optimizations, and other
+ configuration options. These defaults can be overridden by the
+ user.
+
+ * Extra debug output messages are no longer enabled with
+ --enable-debug=<package list>. Use --enable-internal-debug=<pkg list>
+ instead.
+
+ * A new --enable-symbols option allows symbols to be generated
+ independently of the build mode. --disable-symbols can be used
+ to strip symbols from the binary.
+
+ * A new --enable-asserts option sets/unsets NDEBUG. This is
+ independent of the build mode. This also enables some extra
+ low-overhead debug checks in the library.
+
+ * A new --enable-profiling option sets profiling flags. This is
+ independent of the build mode.
+
+ * A new --enable-optimization option sets the optimization level.
+ This is independent of the build mode.
+
+ * Many of these options can take a flags string that will be used
+ to build the library. This can be useful for specifying custom
+ optimization flags such as -Os and -Ofast.
+
+ * gnu C++ and Fortran use configure sub-files that update the
+ build flags and turn on warnings. The increase in warnings when
+ building these wrapper libraries is due to these flag changes
+ and not to a decrease in code quality.
+
+ * The option to clear file buffers has been removed. Any buffer that
+ will eventually be written to disk will now always be memset
+ to zero. This prevents the previous contents of the buffer from
+ being written to the disk if the buffer contents are not
+ completely overwritten, which has security implications.
+
+ - LFS Changes
+
+ The way the autotools handle large file support (LFS) has been
+ overhauled in this release.
+
+ * We assume ftello and fseeko exist
+
+ * We no longer explicitly use the *64 I/O functions. Instead, we
+ rely on a mapping provided by _FILE_OFFSET_BITS or its equivalent.
+
+ * _LARGEFILE(64)_SOURCE is no longer exported via AM_CPPFLAGS.
+
+
+
+ Parallel Library
+ -----------------
+ - Collective Metadata I/O
+
+ Calls for HDF5 metadata can result in many small reads and writes.
+ On metadata reads, collective metadata I/O can improve performance
+ by allowing the library to perform optimizations when reading the
+ metadata by having one rank read the data and broadcasting it to
+ all other ranks.
+
+ Collective metadata I/O improves metadata write performance through
+ the construction of an MPI derived datatype that is then written
+ collectively in a single call. For more information, see
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesCollectiveMetadataIoDocs.html.
+
+
+
+ Library
+ --------
+ - Concurrent Access to HDF5 Files - Single Writer/ Multple Reader (SWMR)
+
+ The Single Writer/ Multiple Reader or SWMR feature enables users to
+ read data concurrently while writing it. Communications between the
+ processes and file locking are not required. The processes can run
+ on the same or on different platforms as long as they share a common
+ file system that is POSIX compliant. For more information, see the
+ Single-Writer/Multiple-Reader (SWMR) documentation at
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html.
+
+ - Virtual Dataset (VDS)
+
+ The VDS feature enables data to be accessed across HDF5 files
+ using standard HDF5 objects such as groups and datasets without
+ rewriting or rearranging the data. An HDF5 virtual dataset (VDS)
+ is an HDF5 dataset that is composed of source HDF5 datasets in
+ a predefined mapping. VDS can be used with the SWMR feature. For
+ documentation, check
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesVirtualDatasetDocs.html.
+
+ - Persistent Free File Space Tracking
+
+ Usage patterns when working with an HDF5 file sometimes result in
+ wasted space within the file. This can also impair access times
+ when working with the resulting files. The new file space management
+ feature provides strategies for managing space in a file to improve
+ performance in both of these areas. For more information, see
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesFileSpaceMgmtDocs.html.
+
+ - Version 3 Metadata Cache
+
+ The version 3 metadata cache moves management of metadata I/O from
+ the clients to the metadata cache proper. This change is essential for
+ SWMR and other features that have yet to be released.
+
+
+
+ C++ Library
+ ------------
+ - New Member Function Added to H5::ArrayType
+
+ The assignment operator ArrayType::operator= was added because
+ ArrayType has pointer data members.
+
+ (BMR - 2016/03/07, HDFFV-9562)
+
+
+
+ Tools
+ ------
+ - h5watch
+
+ The h5watch tool allows users to output new records appended to
+ a dataset under SWMR access as it grows. The functionality is
+ similar to the Unix user command "tail" with the follow option,
+ which outputs appended data as the file grows. For more
+ information, see
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html#Tools.
+
+ - h5format_convert
+
+ The h5format_convert tool allows users to convert the indexing
+ type of a chunked dataset made with a 1.10.x version of the HDF5
+ Library when the latest file format is used to the 1.8.x version 1 B-tree indexing
+ type. For example, datasets created using SWMR access, can be
+ converted to be accessed by the HDF5 1.18 library and tools. The
+ tool does not rewrite raw data, but it does rewrite HDF5 metadata.
+
+
+
+ High-Level APIs
+ ----------------
+ - H5DOappend
+
+ The function appends data to a dataset along a specified dimension.
+
+
+ C Packet Table API
+ ------------------
+ - Replacement of a Public Function with H5PTcreate
+
+ The existing function H5PTcreate_fl limits applications so they
+ can use the deflate compression only. The public function
+ H5PTcreate has been added to replace H5PTcreate_fl. H5PTcreate
+ takes a property list identifier to provide flexibility on
+ creation properties.
+
+ (BMR - 2016/03/04, HDFFV-8623)
+
+ - New Public Functions: H5PTget_dataset and H5PTget_type
+
+ Two accessor functions have been added. H5PTget_dataset returns
+ the identifier of the dataset associated with the packet table,
+ and H5PTget_type returns the identifier of the datatype used by
+ the packet table.
+
+ (BMR, 2016/03/04, HDFFV-8623)
+
+ - Regarding #ifdef VLPT_REMOVED
+
+ The #ifdef VLPT_REMOVED blocks have been removed from the packet
+ table (PT) library source except for the following functions:
+ + H5PTis_varlen() has been made available again
+ + H5PTfree_vlen_readbuff() is now H5PTfree_vlen_buff()
+
+ (BMR - 2016/03/04, HDFFV-442)
+
+ C++ Packet Table API
+ --------------------
+ - New Constructor Added to FL_PacketTable
+
+ An overloaded constructor has been added to FL_PacketTable and
+ takes a property list identifier to provide flexibility on
+ creation properties.
+
+ (BMR - 2016/03/08, HDFFV-8623)
+
+ - New Public Functions
+
+ Two accessor wrappers are added to class PacketTable.
+ PacketTable::GetDataset() returns the identifier of the dataset
+ associated with the packet table, and PacketTable::GetDatatype()
+ returns the identifier of the datatype that the packet table uses.
+
+ (BMR - 2016/03/04, HDFFV-8623)
+
+ - Member Functions with "char*" as an Argument
+
+ Overloaded functions were added to provide the "const char*"
+ argument; the existing version will be deprecated in future
+ releases.
+
+ (BMR - 2016/03/04, HDFFV-8623)
+
+ - Regarding #ifdef VLPT_REMOVED
+
+ The #ifdef VLPT_REMOVED blocks have been removed from the packet
+ table library source code except for the following functions:
+ + VL_PacketTable::IsVariableLength() was moved to PacketTable
+ + VL_PacketTable::FreeReadBuff() is now PacketTable::FreeBuff()
+
+ (BMR - 2016/03/04, HDFFV-442)
+
+
+
+ Java Wrapper Library
+ --------------------
+
+ The Java HDF5 JNI library has been integrated into the HDF5 repository.
+ The configure option is "--enable-java", and the CMake option is
+ HDF5_BUILD_JAVA:BOOL=ON. The package hierarchy has changed from the
+ HDF5 1.8 JNI, which was "ncsa.hdf.hdflib.hdf5", to HDF5 1.10,
+ "hdf.hdflib.hdf5".
+
+ A number of new APIs were added including some for VDS and SWMR.
+
+
+
+ Other Important Changes
+ -----------------------
+
+ The hid_t type was changed from 32-bit to a 64-bit value.
+
+
+
+Issues Addressed in this Release Since 1.10.0
+=============================================
+
+ - h5diff would return from a compare attributes abnormally if one of the datatypes
+ was a vlen. This resulted in a memory leak as well as an incorrect report of
+ attribute comparison.
+
+ Fixed.
+ (ADB - 2016/04/26, HDFFV-9784)
+
+ - The JUnit-interface test may fail on Solaris platforms. The result of
+ a test for verifying the content of the error stack to stdout is
+ in a different order on Solaris then other platforms.
+
+ This test is skipped on Solaris
+ (ADB - 2016/04/21, HDFFV-9734)
+
+ - When building HDF5 with Java using CMake and specifying Debug for CMAKE_BUILD_TYPE,
+ there was a missing command argument for the tests of the examples.
+
+ Fixed.
+ (ADB - 2016/04/21, HDFFV-9743)
+
+ - Changed h5diff to print a warning when a dataset is virtual, enabling
+ the data to be compared. In addition h5repack failed to copy the data
+ of a virtual dataset to the new file. Function H5D__get_space_status changed
+ to correctly determine the H5D_space_status_t allocation value.
+
+ CMake added the Fixed Array indexing tests that were only in the autotools
+ test scripts.
+
+ Fixed and tests added for vds issues.
+ (ADB,NAF - 2016/04/21, HDFFV-9756)
+
+ - CMake added the h5format_convert tool and tests that were only in the autotools
+ build and test scripts. The autotools test script was reworked to allow CMake
+ to execute the test suite in parallel.
+
+ Also, h5clear tool and tests were added to the misc folder.
+
+ Fixed.
+ (ADB - 2016/04/21, HDFFV-9766)
+
+ - CMake added the h5watch tool and argument tests that were only in the autotools
+ build and test scripts. The POSIX only tests were not added to CMake.
+
+ CMake HL tools files were refactored to move the CMake test scripts into each tool folder.
+
+ Fixed.
+ (ADB - 2016/04/21, HDFFV-9770)
+
+ - Configure fails to detect valid real KINDs on FreeBSD 9.3 (i386) with Fortran enabled.
+
+ Fixed. Added the exponential option to SELECTED_REAL_KIND to distinguish
+ KINDs of same precision
+ (MSB - 2016/05/14,HDFFV-9912)
+
+
+ - Corrected the f90 H5AWRITE_F integer interface's buf to be INTENT(IN).
+ (MSB - 2016/05/14)
+
+ - Configure fails in sed command on FreeBSD 9.3 (i386) with Fortran enabled.
+
+ Fixed.
+ (MSB - 2016/05/14,HDFFV-9912)
+
+ - Compile time error in H5f90global.F90 with IBM XL Fortran 14.1.0.13 on BG/Q with Fortran
+ enabled.
+
+ Fixed.
+ (MSB - 2016/05/16,HDFFV-9917)
+
+ - A cmake build with Fortran enabled does not install module h5fortkit
+
+ Fixed.
+ (MSB - 2016/05/23,HDFFV-9923)
+
+
+Issues Addressed in this Release Since alpha1
+=============================================
+
+ - H5Pget_virtual_printf_gap, H5Pget_virtual_view, H5Pget_efile_prefix
+
+ The correct access property list settings from the
+ H5Pget_virtual_printf_gap, H5Pget_virtual_view, and
+ H5Pget_efile_prefix function calls could not be retrieved
+ using H5Dget_access_plist().
+
+ Fixed.
+
+ (DER and NAF - 2016/03/14, HDFFV-9716)
+
+ - h5dump
+
+ When h5dump was provided with the name of a non-existing file or
+ when optional arguments were the last option on the command line,
+ h5dump would segfault.
+
+ Fixed.
+
+ (ADB 2016/02/28 HDFFV-9639, HDFFV-9684)
+
+ - No Error Message for Corrupt Metadata
+
+ The HDF5 Library did not propagate an error when it encountered
+ corrupt metadata in an HDF5 file. The issue was fixed for a
+ specific file provided by a user. If you still see the problem,
+ please contact help@hdfgroup.org
+
+ Fixed.
+
+ (MC - 2016/02/18, HDFFV-9670)
+
+ - Problem Reading Chunked Datasets with a String Datatype Larger
+ Than the Chunk Size in Bytes
+
+ When the latest file format was used and when a chunked dataset
+ was created with a datatype with the size bigger than a chunk
+ size, the data could not be read back. The issue was reported
+ for chunked datasets with a string datatype and was confirmed
+ for other datatypes with the sizes bigger than the chunk size in
+ bytes.
+
+ Fixed.
+
+ (JM - 2016/02/13, HDFFV-9672)
+
+ - Control over the Location of External Files
+
+ Users were unable to specify the locations of external files.
+
+ Two APIs - H5Pget_efile_prefix and H5Pset_efile_prefix - were
+ added so that users could specify the locations of external files.
+
+ (DER - 2016/02/04, HDFFV-8740)
+
+
+
+Issues Addressed in this Release Since alpha0
+=============================================
+ - h5format_convert
+
+ The h5format_convert tool did not downgrade the version of the
+ superblock.
+
+ Fixed. The tool now will downgrade the version of the superblock.
+
+ (EIP 2016/01/11)
+
+ - Crashes with multiple threads: invalid pointers
+
+ It was reported that alpha0 crashed when used with multiple
+ threads. The issue exists in the HDF5 Library versions 1.8 and
+ 1.9. The problem is related to a shared file pointer used in some
+ miscellaneous data structures. The thread-safe library exposed
+ paths in the library where a file pointer became invalid.
+
+ The alpha1 release contains the fixes for the specific use case
+ as described in HDFFV-9643. We will keep working on identifying
+ and fixing other paths in the library with similar problems.
+
+ (EIP - 2016/01/15, HDFFV-9643)
+
+
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 6.1 xlc/xlc_r 10.1.0.5
+ (NASA G-ADA) xlC/xlC_r 10.1.0.5
+ xlf90/xlf90_r 12.1.0.6
+
+ Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (mayll/platypus) Version 4.4.7 20120313 (Red Hat 4.4.7-16)
+ Version 4.9.3, Version 5.2.0
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 15.7-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ Linux 2.6.32-573.18.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ IBM XL C/C++ V13.1
+ IBM XL Fortran V15.1
+
+ Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ Version 4.9.3, Version 5.2.0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+ Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (wren/quail) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.0 from Xcode 7.0
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X El Capitan 10.11.4 Apple clang/clang++ version 7.3.0 from Xcode 7.3
+ 64-bit gfortran GNU Fortran (GCC) 5.2.0
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3
+
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+SunOS 5.11 32-bit n y/y n y y y
+SunOS 5.11 64-bit n y/y n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/y n y y n
+Windows 8.1 n y/y n y y y
+Windows 8.1 x64 n y/y n y y y
+Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y
+Mac OS X Yosemeti 10.10.5 64-bit n y/y n y y y
+AIX 6.1 32- and 64-bit n y/n n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y
+CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y
+Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+SunOS 5.11 32-bit y y y y
+SunOS 5.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 8.1 y y y y
+Windows 8.1 x64 y y y y
+Mac OS X Mountain Lion 10.8.5 64-bit y n y y
+Mac OS X Mavericks 10.9.5 64-bit y n y y
+Mac OS X Yosemeti 10.10.5 64-bit y n y y
+AIX 6.1 32- and 64-bit y n n y
+CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y
+Linux 2.6.32-431.11.2.el6.ppc64 y y y y
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-431.11.2.el6 g95 (GCC 4.0.3 (g95 0.94!)
+ #1 SMP x86_64 GNU/Linux
+ (platypus)
+
+ Windows 7 Visual Studio 2008 (cmake)
+
+ Windows 7 x64 Visual Studio 2008 (cmake)
+
+ Windows 7 x64 Visual Studio 2010 (cmake) with SWMR using GPFS
+
+ Windows 10 Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 10 x64 Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ (cmake and autotools)
+
+ hopper.nersc.gov PrgEnv-gnu/5.2.40
+ gcc (GCC) 4.9.2 20141030 (Cray Inc.)
+ GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.)
+ g++ (GCC) 4.9.2 20141030 (Cray Inc.)
+
+
+
+Known Problems and Limitations
+==============================
+This section contains the list of known problems and limitations introduced
+in this release of HDF5.
+
+Note: this list is not exhaustive of all known issues discovered in HDF5
+software to date. For a list of significant problems and known workarounds
+identified in past releases, please refer to:
+
+https://www.hdfgroup.org/HDF5/release/known_problems/
+
+The HDF Group also maintains a JIRA issue-tracking database which is used to
+capture all known issues which are too numerous to reasonably list in this
+document. The HDF Group is taking steps to make our JIRA issue database
+open to the public, and this section will refer to that database in a future
+release. In the meantime, please contact help@hdfgroup.org if you come across
+an issue not listed here or at the link above, and we will provide any
+information about known workarounds that we have or add it to our list of
+known issues if it is a new issue.
+
+ - The flush/refresh test occasionally fails on OS X platforms. This is
+ being investigated but no fix or workaround is available at this time.
+ (DER - 2016/03/22, HDFFV-9731)
+
+ - The VDS/SWMR test will fail with a segmentation fault if the library
+ is built with --enable-using-memchecker. The is due to a VDS shutdown
+ procedure freeing a shared resource too early when the memory
+ checker changes are built. This problem does not arise when the
+ memory checker changes are not used since the internal library free
+ lists behave differently. The memory checker configure option should
+ normally only be used under special circumstances so this should not
+ affect most users. Users should be aware that the --enable-using-memchecker
+ + VDS combination may cause a segfault, however, so Valgrind et al. may
+ have to be used with an HDF5 library built without the feature if this
+ proves to be a problem.
+ (DER - 2016/03/21, HDFFV-9732)
+
+ - SWMR feature limitations
+ The SWMR feature will only work if an HDF5 file under SWMR access resides
+ on a file system that obeys POSIX write() ordering semantics. Because of
+ this, SWMR will not work on network file systems such as NFS or SMB/Windows
+ file shares since those systems do not guarantee write odering. SWMR
+ regression tests are likely to fail if run on a network file system. SWMR
+ is currently not tested on Windows though it can be tested manually
+ (some of the SWMR test programs are built by CMake), and there are no
+ obvious reasons for it to not work on NTFS or GPFS.
+ (EIP - 2016/03/20, HDFFV-9733)
+
+ - VDS feature limitation
+ Currently, the path to a VDS source file is interpreted as relative to the
+ directory where the executable program runs and not to the HDF5 file with
+ the VDS dataset unless a full path to the source file is specified during
+ the mapping.
+ (EIP - 2016/03/20, HDFFV-9724)
+
+ - The H5Lexists API changed behavior in HDF5-1.10 when used with a file handle
+ and root group name ("/"):
+
+ H5Lexists(fileid, "/")
+
+ In HDF5-1.8 it returns false (0) and in HDF5-1.10 it returns true (1).
+ The documentation will be updated with information regarding this change.
+ (LRK - 2016/03/30, HDFFV-8746)
+
+
+%%%%1.10.0%%%%
+
+HDF5 version 1.10.0 released on 2016-03-30
+================================================================================
+
+
+
+INTRODUCTION
+
+This document describes the differences between HDF5-1.8 series and
+HDF5 1.10.0 releases, and contains information on the platforms
+tested.
+
+Links to HDF5 1.10.0 source code can be found on The HDF Group's
+development FTP server at the following location:
+
+ https://www.hdfgroup.org/HDF5/release/obtain5110.html
+
+User documentation can be accessed directly at this location:
+
+ https://www.hdfgroup.org/HDF5/docNewFeatures/
+
+For more information, see the HDF5 home page:
+
+ https://www.hdfgroup.org/HDF5/
+
+If you have any questions or comments, please send them to the HDF
+Help Desk:
+
+ help@hdfgroup.org
+
+
+
+CONTENTS
+
+- New Features
+- Issues Addressed in this Release
+- Supported Platforms
+- Tested Configuration Features Summary
+- More Tested Platforms
+- Known Problems and Limitations
+
+
+
+New Features
+============
+This release supports the following features:
+
+ Configuration
+ -------------
+ - API Compatibility with HDF5 1.8 Flag Was Added
+
+ The 1.10 version of the HDF5 Library can be configured to operate
+ identically to the 1.8 library with the --with-default-api-version=v18
+ configure flag. This allows existing code to be compiled with the 1.10
+ library without requiring immediate changes to the application source
+ code. For addtional configuration options and other details, see
+ "API Compatibility Macros in HDF5" at
+ https://www.hdfgroup.org/HDF5/doc/RM/APICompatMacros.html.
+
+ - Autotools Configuration Has Been Extensively Reworked
+
+ The autotools configuration options have been updated to allow more
+ fine-grained control of the build options and to correct some bugs.
+ See configure --help for comprehensive information on each option.
+
+ Specific changes:
+
+ * --enable-debug and --enable-production are no longer accepted.
+ Use --enable-build-mode=(debug | production) instead. These set
+ appropriate defaults for symbols, optimizations, and other
+ configuration options. These defaults can be overridden by the
+ user.
+
+ * Extra debug output messages are no longer enabled with
+ --enable-debug=<package list>. Use --enable-internal-debug=<pkg list>
+ instead.
+
+ * A new --enable-symbols option allows symbols to be generated
+ independently of the build mode. --disable-symbols can be used
+ to strip symbols from the binary.
+
+ * A new --enable-asserts option sets/unsets NDEBUG. This is
+ independent of the build mode. This also enables some extra
+ low-overhead debug checks in the library.
+
+ * A new --enable-profiling option sets profiling flags. This is
+ independent of the build mode.
+
+ * A new --enable-optimization option sets the optimization level.
+ This is independent of the build mode.
+
+ * Many of these options can take a flags string that will be used
+ to build the library. This can be useful for specifying custom
+ optimization flags such as -Os and -Ofast.
+
+ * gnu C++ and Fortran use configure sub-files that update the
+ build flags and turn on warnings. The increase in warnings when
+ building these wrapper libraries is due to these flag changes
+ and not to a decrease in code quality.
+
+ * The option to clear file buffers has been removed. Any buffer that
+ will eventually be written to disk will now always be memset
+ to zero. This prevents the previous contents of the buffer from
+ being written to the disk if the buffer contents are not
+ completely overwritten, which has security implications.
+
+ - LFS Changes
+
+ The way the autotools handle large file support (LFS) has been
+ overhauled in this release.
+
+ * We assume ftello and fseeko exist
+
+ * We no longer explicitly use the *64 I/O functions. Instead, we
+ rely on a mapping provided by _FILE_OFFSET_BITS or its equivalent.
+
+ * _LARGEFILE(64)_SOURCE is no longer exported via AM_CPPFLAGS.
+
+
+
+ Parallel Library
+ -----------------
+ - Collective Metadata I/O
+
+ Calls for HDF5 metadata can result in many small reads and writes.
+ On metadata reads, collective metadata I/O can improve performance
+ by allowing the library to perform optimizations when reading the
+ metadata by having one rank read the data and broadcasting it to
+ all other ranks.
+
+ Collective metadata I/O improves metadata write performance through
+ the construction of an MPI derived datatype that is then written
+ collectively in a single call. For more information, see
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesCollectiveMetadataIoDocs.html.
+
+
+
+ Library
+ --------
+ - Concurrent Access to HDF5 Files - Single Writer/ Multple Reader (SWMR)
+
+ The Single Writer/ Multiple Reader or SWMR feature enables users to
+ read data concurrently while writing it. Communications between the
+ processes and file locking are not required. The processes can run
+ on the same or on different platforms as long as they share a common
+ file system that is POSIX compliant. For more information, see the
+ Single-Writer/Multiple-Reader (SWMR) documentation at
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html.
+
+ - Virtual Dataset (VDS)
+
+ The VDS feature enables data to be accessed across HDF5 files
+ using standard HDF5 objects such as groups and datasets without
+ rewriting or rearranging the data. An HDF5 virtual dataset (VDS)
+ is an HDF5 dataset that is composed of source HDF5 datasets in
+ a predefined mapping. VDS can be used with the SWMR feature. For
+ documentation, check
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesVirtualDatasetDocs.html.
+
+ - Persistent Free File Space Tracking
+
+ Usage patterns when working with an HDF5 file sometimes result in
+ wasted space within the file. This can also impair access times
+ when working with the resulting files. The new file space management
+ feature provides strategies for managing space in a file to improve
+ performance in both of these areas. For more information, see
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesFileSpaceMgmtDocs.html.
+
+ - Version 3 Metadata Cache
+
+ The version 3 metadata cache moves management of metadata I/O from
+ the clients to the metadata cache proper. This change is essential for
+ SWMR and other features that have yet to be released.
+
+
+
+ C++ Library
+ ------------
+ - New Member Function Added to H5::ArrayType
+
+ The assignment operator ArrayType::operator= was added because
+ ArrayType has pointer data members.
+
+ (BMR - 2016/03/07, HDFFV-9562)
+
+
+
+ Tools
+ ------
+ - h5watch
+
+ The h5watch tool allows users to output new records appended to
+ a dataset under SWMR access as it grows. The functionality is
+ similar to the Unix user command "tail" with the follow option,
+ which outputs appended data as the file grows. For more
+ information, see
+ https://www.hdfgroup.org/HDF5/docNewFeatures/NewFeaturesSwmrDocs.html#Tools.
+
+ - h5format_convert
+
+ The h5format_convert tool allows users to convert the indexing
+ type of a chunked dataset made with a 1.10.x version of the HDF5
+ Library when the latest file format is used to the 1.8.x version 1 B-tree indexing
+ type. For example, datasets created using SWMR access, can be
+ converted to be accessed by the HDF5 1.18 library and tools. The
+ tool does not rewrite raw data, but it does rewrite HDF5 metadata.
+
+
+
+ High-Level APIs
+ ----------------
+ - H5DOappend
+
+ The function appends data to a dataset along a specified dimension.
+
+
+ C Packet Table API
+ ------------------
+ - Replacement of a Public Function with H5PTcreate
+
+ The existing function H5PTcreate_fl limits applications so they
+ can use the deflate compression only. The public function
+ H5PTcreate has been added to replace H5PTcreate_fl. H5PTcreate
+ takes a property list identifier to provide flexibility on
+ creation properties.
+
+ (BMR - 2016/03/04, HDFFV-8623)
+
+ - New Public Functions: H5PTget_dataset and H5PTget_type
+
+ Two accessor functions have been added. H5PTget_dataset returns
+ the identifier of the dataset associated with the packet table,
+ and H5PTget_type returns the identifier of the datatype used by
+ the packet table.
+
+ (BMR, 2016/03/04, HDFFV-8623)
+
+ - Regarding #ifdef VLPT_REMOVED
+
+ The #ifdef VLPT_REMOVED blocks have been removed from the packet
+ table (PT) library source except for the following functions:
+ + H5PTis_varlen() has been made available again
+ + H5PTfree_vlen_readbuff() is now H5PTfree_vlen_buff()
+
+ (BMR - 2016/03/04, HDFFV-442)
+
+ C++ Packet Table API
+ --------------------
+ - New Constructor Added to FL_PacketTable
+
+ An overloaded constructor has been added to FL_PacketTable and
+ takes a property list identifier to provide flexibility on
+ creation properties.
+
+ (BMR - 2016/03/08, HDFFV-8623)
+
+ - New Public Functions
+
+ Two accessor wrappers are added to class PacketTable.
+ PacketTable::GetDataset() returns the identifier of the dataset
+ associated with the packet table, and PacketTable::GetDatatype()
+ returns the identifier of the datatype that the packet table uses.
+
+ (BMR - 2016/03/04, HDFFV-8623)
+
+ - Member Functions with "char*" as an Argument
+
+ Overloaded functions were added to provide the "const char*"
+ argument; the existing version will be deprecated in future
+ releases.
+
+ (BMR - 2016/03/04, HDFFV-8623)
+
+ - Regarding #ifdef VLPT_REMOVED
+
+ The #ifdef VLPT_REMOVED blocks have been removed from the packet
+ table library source code except for the following functions:
+ + VL_PacketTable::IsVariableLength() was moved to PacketTable
+ + VL_PacketTable::FreeReadBuff() is now PacketTable::FreeBuff()
+
+ (BMR - 2016/03/04, HDFFV-442)
+
+
+
+ Java Wrapper Library
+ --------------------
+
+ The Java HDF5 JNI library has been integrated into the HDF5 repository.
+ The configure option is "--enable-java", and the CMake option is
+ HDF5_BUILD_JAVA:BOOL=ON. The package hierarchy has changed from the
+ HDF5 1.8 JNI, which was "ncsa.hdf.hdflib.hdf5", to HDF5 1.10,
+ "hdf.hdflib.hdf5".
+
+ A number of new APIs were added including some for VDS and SWMR.
+
+
+
+ Other Important Changes
+ -----------------------
+
+ The hid_t type was changed from 32-bit to a 64-bit value.
+
+
+
+Issues Addressed in this Release Since alpha1
+=============================================
+
+ - H5Pget_virtual_printf_gap, H5Pget_virtual_view, H5Pget_efile_prefix
+
+ The correct access property list settings from the
+ H5Pget_virtual_printf_gap, H5Pget_virtual_view, and
+ H5Pget_efile_prefix function calls could not be retrieved
+ using H5Dget_access_plist().
+
+ Fixed.
+
+ (DER and NAF - 2016/03/14, HDFFV-9716)
+
+ - h5dump
+
+ When h5dump was provided with the name of a non-existing file or
+ when optional arguments were the last option on the command line,
+ h5dump would segfault.
+
+ Fixed.
+
+ (ADB 2016/02/28 HDFFV-9639, HDFFV-9684)
+
+ - No Error Message for Corrupt Metadata
+
+ The HDF5 Library did not propagate an error when it encountered
+ corrupt metadata in an HDF5 file. The issue was fixed for a
+ specific file provided by a user. If you still see the problem,
+ please contact help@hdfgroup.org
+
+ Fixed.
+
+ (MC - 2016/02/18, HDFFV-9670)
+
+ - Problem Reading Chunked Datasets with a String Datatype Larger
+ Than the Chunk Size in Bytes
+
+ When the latest file format was used and when a chunked dataset
+ was created with a datatype with the size bigger than a chunk
+ size, the data could not be read back. The issue was reported
+ for chunked datasets with a string datatype and was confirmed
+ for other datatypes with the sizes bigger than the chunk size in
+ bytes.
+
+ Fixed.
+
+ (JM - 2016/02/13, HDFFV-9672)
+
+ - Control over the Location of External Files
+
+ Users were unable to specify the locations of external files.
+
+ Two APIs - H5Pget_efile_prefix and H5Pset_efile_prefix - were
+ added so that users could specify the locations of external files.
+
+ (DER - 2016/02/04, HDFFV-8740)
+
+
+
+Issues Addressed in this Release Since alpha0
+=============================================
+ - h5format_convert
+
+ The h5format_convert tool did not downgrade the version of the
+ superblock.
+
+ Fixed. The tool now will downgrade the version of the superblock.
+
+ (EIP 2016/01/11)
+
+ - Crashes with multiple threads: invalid pointers
+
+ It was reported that alpha0 crashed when used with multiple
+ threads. The issue exists in the HDF5 Library versions 1.8 and
+ 1.9. The problem is related to a shared file pointer used in some
+ miscellaneous data structures. The thread-safe library exposed
+ paths in the library where a file pointer became invalid.
+
+ The alpha1 release contains the fixes for the specific use case
+ as described in HDFFV-9643. We will keep working on identifying
+ and fixing other paths in the library with similar problems.
+
+ (EIP - 2016/01/15, HDFFV-9643)
+
+
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 6.1 xlc/xlc_r 10.1.0.5
+ (NASA G-ADA) xlC/xlC_r 10.1.0.5
+ xlf90/xlf90_r 12.1.0.6
+
+ Linux 2.6.32-573.18.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (mayll/platypus) Version 4.4.7 20120313 (Red Hat 4.4.7-16)
+ Version 4.9.3, Version 5.2.0
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 15.7-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ Linux 2.6.32-504.8.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ IBM XL C/C++ V13.1
+ IBM XL Fortran V15.1
+
+ Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ Version 4.9.3, Version 5.2.0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+ Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2.0
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (wren/quail) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.0 from Xcode 7.0.0
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X El Capitan 10.11.3 Apple clang/clang++ version 7.0.2 from Xcode 7.0.2
+ 64-bit gfortran GNU Fortran (GCC) 5.2.0
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3
+
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+SunOS 5.11 32-bit n y/y n y y y
+SunOS 5.11 64-bit n y/y n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/y n y y n
+Windows 8.1 n y/y n y y y
+Windows 8.1 x64 n y/y n y y y
+Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y
+Mac OS X Yosemeti 10.10.5 64-bit n y/y n y y y
+AIX 6.1 32- and 64-bit n y/n n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y
+CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y
+Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+SunOS 5.11 32-bit y y y y
+SunOS 5.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 8.1 y y y y
+Windows 8.1 x64 y y y y
+Mac OS X Mountain Lion 10.8.5 64-bit y n y y
+Mac OS X Mavericks 10.9.5 64-bit y n y y
+Mac OS X Yosemeti 10.10.5 64-bit y n y y
+AIX 6.1 32- and 64-bit y n n y
+CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y
+Linux 2.6.32-431.11.2.el6.ppc64 y y y y
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-431.11.2.el6 g95 (GCC 4.0.3 (g95 0.94!)
+ #1 SMP x86_64 GNU/Linux
+ (platypus)
+
+ Windows 7 Visual Studio 2008 (cmake)
+
+ Windows 7 x64 Visual Studio 2008 (cmake)
+
+ Windows 7 x64 Visual Studio 2010 (cmake) with SWMR using GPFS
+
+ Windows 10 Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 10 x64 Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ (cmake and autotools)
+
+ hopper.nersc.gov PrgEnv-gnu/5.2.40
+ gcc (GCC) 4.9.2 20141030 (Cray Inc.)
+ GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.)
+ g++ (GCC) 4.9.2 20141030 (Cray Inc.)
+
+
+
+Known Problems and Limitations
+==============================
+This section contains the list of known problems and limitations introduced
+in this release of HDF5.
+
+Note: this list is not exhaustive of all known issues discovered in HDF5
+software to date. For a list of significant problems and known workarounds
+identified in past releases, please refer to:
+
+https://www.hdfgroup.org/HDF5/release/known_problems/
+
+The HDF Group also maintains a JIRA issue-tracking database which is used to
+capture all known issues which are too numerous to reasonably list in this
+document. The HDF Group is taking steps to make our JIRA issue database
+open to the public, and this section will refer to that database in a future
+release. In the meantime, please contact help@hdfgroup.org if you come across
+an issue not listed here or at the link above, and we will provide any
+information about known workarounds that we have or add it to our list of
+known issues if it is a new issue.
+
+ - The JUnit-interface test may fail on Solaris platforms. The result of
+ a test for verifying the content of the error stack to stdout is
+ in a different order on Solaris then other platforms. Use make -i option
+ to test beyond the java/test folder.
+ (ADB - 2016/03/22, HDFFV-9734)
+
+ - The flush/refresh test occasionally fails on OS X platforms. This is
+ being investigated but no fix or workaround is available at this time.
+ (DER - 2016/03/22, HDFFV-9731)
+
+ - The VDS/SWMR test will fail with a segmentation fault if the library
+ is built with --enable-using-memchecker. The is due to a VDS shutdown
+ procedure freeing a shared resource too early when the memory
+ checker changes are built. This problem does not arise when the
+ memory checker changes are not used since the internal library free
+ lists behave differently. The memory checker configure option should
+ normally only be used under special circumstances so this should not
+ affect most users. Users should be aware that the --enable-using-memchecker
+ + VDS combination may cause a segfault, however, so Valgrind et al. may
+ have to be used with an HDF5 library built without the feature if this
+ proves to be a problem.
+ (DER - 2016/03/21, HDFFV-9732)
+
+ - SWMR feature limitations
+ The SWMR feature will only work if an HDF5 file under SWMR access resides
+ on a file system that obeys POSIX write() ordering semantics. Because of
+ this, SWMR will not work on network file systems such as NFS or SMB/Windows
+ file shares since those systems do not guarantee write odering. SWMR
+ regression tests are likely to fail if run on a network file system. SWMR
+ is currently not tested on Windows though it can be tested manually
+ (some of the SWMR test programs are built by CMake), and there are no
+ obvious reasons for it to not work on NTFS or GPFS.
+ (EIP - 2016/03/20, HDFFV-9733)
+
+ - VDS feature limitation
+ Currently, the path to a VDS source file is interpreted as relative to the
+ directory where the executable program runs and not to the HDF5 file with
+ the VDS dataset unless a full path to the source file is specified during
+ the mapping.
+ (EIP - 2016/03/20, HDFFV-9724)
+
+ - When building HDF5 with Java using CMake and specifying Debug for CMAKE_BUILD_TYPE,
+ there is a missing command argument for the tests of the examples.
+
+ This error can be avoided by not building Java with Debug, HDF5_BUILD_JAVA:BOOL=OFF,
+ or not building Examples, HDF5_BUILD_EXAMPLES:BOOL=OFF.
+ (LRK - 2016/03/30, HDFFV-9743)
+
+ - The H5Lexists API changed behavior in HDF5-1.10 when used with a file handle
+ and root group name ("/"):
+
+ H5Lexists(fileid, "/")
+
+ In HDF5-1.8 it returns false (0) and in HDF5-1.10 it returns true (1).
+ The documentation will be updated with information regarding this change.
+ (LRK - 2016/03/30, HDFFV-8746)
+
diff --git a/release_docs/HISTORY-1_8.txt b/release_docs/HISTORY-1_8.txt
new file mode 100644
index 0000000..4465d06
--- /dev/null
+++ b/release_docs/HISTORY-1_8.txt
@@ -0,0 +1,12344 @@
+HDF5 History
+============
+
+This file contains development history of HDF5 1.8 branch
+
+19. Release Information for hdf5-1.8.17
+18. Release Information for hdf5-1.8.16
+17. Release Information for hdf5-1.8.15
+16. Release Information for hdf5-1.8.14
+15. Release Information for hdf5-1.8.13
+14. Release Information for hdf5-1.8.12
+13. Release Information for hdf5-1.8.11
+12. Release Information for hdf5-1.8.10-patch1
+11. Release Information for hdf5-1.8.10
+10. Release Information for hdf5-1.8.9
+09. Release Information for hdf5-1.8.8
+08. Release Information for hdf5-1.8.7
+07. Release Information for hdf5-1.8.6
+06. Release Information for hdf5-1.8.5
+05. Release Information for hdf5-1.8.4
+04. Release Information for hdf5-1.8.3
+03. Release Information for hdf5-1.8.2
+02. Release Information for hdf5-1.8.1
+01. Release Information for hdf5-1.8.0
+
+[Search on the string '%%%%' for section breaks of each release.]
+
+%%%%1.8.17%%%%
+
+
+HDF5 version 1.8.17 released on 2016-05-10
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.16 and
+HDF5-1.8.17-*, and contains information on the platforms tested and
+known problems in HDF5-1.8.17-*.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.17 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.17 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.17 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.17 (current
+release) versus Release 1.8.16
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.16
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Cmakehdf5: Added Ability to Run Multiple Make Commands
+
+ Added option --njobs to specify up to how many jobs to launch during
+ build (cmake) and testing (ctest).
+
+ (AKC - 2015/12/13, HDFFV-9612)
+
+ - Cmakehdf5: Added Szip Support and Verbose Option
+
+ Added --with-szlib to support the Szip library; and
+ --enable/disable-verbose to display all CMake process output.
+
+ (AKC - 2015/11/16, HDFFV-8932 and DAILYTEST-195)
+
+ - CMake minimum is now 3.1.0. (ADB - 2015/11/14)
+
+ - Large File System (LFS) Support has Changed in the Autotools
+
+ We assume that fseeko and ftello exist.
+
+ The *64 I/O functions and types are no longer explicitly used.
+ We now rely on a mapping provided by _FILE_OFFSET_BITS (or its
+ equivalent).
+
+ _LARGEFILE(64)_SOURCE is no longer exposed via AM_CPPFLAGS.
+
+ (DER - 2016/03/29, HDFFV-9626 and HDFFV-9541)
+
+
+
+ Library
+ -------
+ - New API Calls for Searching for External Dataset Storage
+
+ API calls that determine the search path for dataset external
+ storage were added. H5Pset/get_efile_prefix() API calls were added
+ to the library. These functions give control over the search path
+ for dataset external storage that has been configured with
+ H5Pset_external().
+
+ Additionally, the HDF5_EXTFILE_PREFIX environment variable can be
+ used to control the search path.
+
+ (DER - 2016/04/20, HDFFV-8740)
+
+
+
+ Parallel Library
+ ----------------
+ - None
+
+
+
+ Tools
+ -----
+ - None
+
+
+
+ High-Level APIs
+ ---------------
+
+ C Packet Table API
+ ------------------
+ - Replacement of a Public Function with H5PTcreate
+
+ The existing function H5PTcreate_fl limits applications so they
+ can use the deflate compression only. The public function
+ H5PTcreate has been added to replace H5PTcreate_fl. H5PTcreate
+ takes a property list identifier to provide flexibility on
+ creation properties. This also removes the following warning:
+ "deprecated conversion from string constant to "char*"
+ [-Wwrite-strings]".
+
+ (BMR - 2016/04/25, HDFFV-9708, HDFFV-8615)
+
+ - New Public Functions: H5PTget_dataset and H5PTget_type
+
+ Two accessor functions have been added. H5PTget_dataset returns
+ the identifier of the dataset associated with the packet table,
+ and H5PTget_type returns the identifier of the datatype used by
+ the packet table.
+
+ (BMR - 2016/04/25, HDFFV-8623 patch 3)
+
+ - Regarding #ifdef VLPT_REMOVED
+
+ The #ifdef VLPT_REMOVED blocks have been removed from the packet
+ table (PT) library source except for the following functions:
+ + H5PTis_varlen() has been made available again
+ + H5PTfree_vlen_readbuff() is now H5PTfree_vlen_buff()
+
+ (BMR - 2016/04/25, HDFFV-442)
+
+ C++ Packet Table API
+ --------------------
+ - New Constructor in FL_PacketTable
+
+ An overloaded constructor has been added to FL_PacketTable and
+ takes a property list identifier to provide flexibility on
+ creation properties such as compression.
+
+ FL_PacketTable(hid_t fileID, const char* name, hid_t dtypeID,
+ hsize_t chunkSize = 0, hid_t plistID = H5P_DEFAULT)
+
+ (BMR - 2016/04/25, HDFFV-8623 patch 5)
+
+ - New Member Functions in PacketTable
+
+ Two accessor wrappers were added to class PacketTable.
+
+ PacketTable::GetDataset() returns the identifier of the dataset
+ associated with the packet table, and PacketTable::GetDatatype()
+ returns the identifier of the datatype that the packet table uses.
+
+ (BMR - 2016/04/25, HDFFV-8623 patch 4)
+
+ - New Member Functions with "char*" as an Argument
+
+ Overloaded functions were added to provide the "const char*"
+ argument; the existing version will be deprecated in future
+ releases. This also removes the following warning:
+ "deprecated conversion from string constant to "char*"
+ [-Wwrite-strings]".
+
+ (BMR - 2016/04/25, HDFFV-8623 patch 1, HDFFV-8615)
+
+ - Regarding #ifdef VLPT_REMOVED
+
+ The #ifdef VLPT_REMOVED blocks have been removed from the packet
+ table library source code except for the following functions:
+ + VL_PacketTable::IsVariableLength() was moved to PacketTable
+ + VL_PacketTable::FreeReadBuff() is now PacketTable::FreeBuff()
+
+ (BMR - 2016/04/25, HDFFV-442)
+
+
+
+ Fortran API
+ -----------
+ - None
+
+
+
+ C++ API
+ -------
+ - New Member Function in DSetCreatPropList
+
+ DSetCreatPropList::setNbit() was added to setup N-bit compression for
+ a dataset.
+
+ (BMR - 2016/04/25, HDFFV-8623 patch 7)
+
+ - New Overloaded "const" Member Functions in ArrayType
+
+ The two following functions were added:
+ ArrayType::getArrayNDims() const
+ ArrayType::getArrayDims() const
+ to provide const version, and the non-const version was marked
+ deprecated. In-memory array information, ArrayType::rank and
+ ArrayType::dimensions, were removed. This is an implementation
+ detail and should not affect applications.
+
+ (BMR, 2016/04/25, HDFFV-9725)
+
+ - New member function added
+
+ The assignment operator ArrayType::operator= is added because ArrayType
+ has pointer data members.
+
+ (BMR, 2016/03/07, HDFFV-9562)
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - Mac OS X El Capitan 10.11.4 with compilers Apple clang/clang++
+ version 7.3.0 from Xcode 7.3, gfortran GNU Fortran (GCC) 5.2.0
+ and Intel icc/icpc/ifort version 16.0.2
+
+
+
+Bug Fixes since HDF5-1.8.16
+===========================
+
+ Configuration
+ -------------
+ - Updated Linux Language Level Flags to Match the Autotools. Removed
+ Linux-specific Flags from OS X.
+
+ An addition to the flags simply being out of sync with the Autotools,
+ the Linux flags were used on OS X builds which led to symbols not being
+ found. Although this was non-fatal and compilation continued (implicit
+ definitions were used by the compiler and the symbols resolved at link
+ time), a large number of warnings were raised.
+
+ Linux changes:
+
+ * CHANGED: _POSIX_C_SOURCE (from 199605 to 200112L)
+ * ADDED: _GNU_SOURCE
+ * REMOVED: _BSD_SOURCE
+ * REMOVED: _DEFAULT_SOURCE
+
+ (DER - 2015/12/08, HDFFV-9627)
+
+ - The --enable-clear-file-buffers configure Option was Non-functional
+ so the Feature was Always Enabled (its default value).
+
+ Regardless of the configure flag, the setting was always enabled when
+ the Autotools were used to configure HDF5. This was due to the "no"
+ option being processed after the "*" option in configure.ac so "*"
+ matched first. CMake was unaffected.
+
+ The option now works correctly.
+
+ NOTE that builders are always advised to leave this option enabled.
+ When disabled, buffers that are written to disk may contain the
+ memory's previous contents, which may include secure information.
+ The performance overhead of the feature (a single memset call per
+ allocation) is minimal.
+
+ (DER - 2016/02/03, HDFFV-9676)
+
+ - Added a patch to remove '"'s from arguments for MPI compilers that
+ were causing errors compiling H5lib_settings.c with SGI MPT.
+
+ (LRK - 2016/04/20, HDFFV-9439)
+
+ Library
+ -------
+ - Fixed shared file pointer problem which caused a crash when running a
+ program provided by a user.
+
+ (VC - 2016/04/01, HDFFV-9469)
+
+ - Fixed some format string warnings that prevent compiling with
+ -Werror=format-security on gcc.
+
+ These only appeared in error messages and would not cause problems
+ under normal operation.
+
+ (DER - 2016/01/13, HDFFV-9640)
+
+ - Fixed a library segmentation fault when accessing a corrupted
+ file provided by a user.
+
+ (MSC - 2016/02/19, HDFFV-9670)
+
+
+
+ Parallel Library
+ ----------------
+ - None
+
+
+
+ Performance
+ -------------
+ - None
+
+
+
+ Tools
+ -----
+ - h5dump: Sub-setting Fixed for Dimensions Greater than Two
+
+ When a dataset has more than two dimensions, sub-setting would
+ incorrectly calculate the data that needed to be displayed.
+ Added in block and stride calculations that account for dimensions
+ greater than two. NOTE: lines that have line breaks inserted
+ because of display length calculations may have index info that
+ is incorrect until the next dimension break.
+
+ (ADB - 2016/03/07, HDFFV-9698)
+
+ - h5dump: Issue with Argument Segmentation Fault
+
+ When an argument with an optional value was at the end of the command
+ line with a value, h5dump would crash. Reworked check for remaining
+ arguments.
+
+ (ADB - 2016/03/07, HDFFV-9570, HDFFV-9684)
+
+ - h5dump: Issue with Default Fill Value
+
+ Added all default cases of fill value to the display of fill value.
+
+ (ADB -, 2016/03/07, HDFFV-9241)
+
+ - h5dump: Clarified Help
+
+ Clarified usage of -O F option in h5dump utility help.
+
+ (ADB - 2016/03/07, HDFFV-9066)
+
+ - h5dump: Issue with Double Free Fault
+
+ Added a check for filename not null before calling free().
+
+ (ADB - 2016/01/27, HDFFV-9639)
+
+ - VS2015 Release Changed how Timezone was Handled
+
+ Created a function, HDget_timezone, in H5system.c. Replaced
+ timezone variable usage with function call.
+
+ (ADB - 2015/11/02, HDFFV-9550)
+
+
+
+ Fortran API
+ -----------
+ - None
+
+
+
+ C++ API
+ -------
+ - Removal of Obsolete Methods
+
+ The overloaded methods which had parameters that should be const
+ but were not have been removed.
+
+ (BMR - 2016/01/13, HDFFV-9789)
+
+
+
+ High-Level APIs:
+ ---------------
+ - Fixed Memory Leak in Packet Table API
+
+ Applied user's patch to fix memory leak in the creation of a
+ packet table.
+
+ (BMR - 2016/04/25, HDFFV-9700)
+
+
+
+ Fortran High-Level APIs:
+ ------------------------
+ - None
+
+
+
+ Testing
+ -------
+ - None
+
+
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 6.1 xlc/xlc_r 10.1.0.5
+ (NASA G-ADA) xlC/xlC_r 10.1.0.5
+ xlf90/xlf90_r 12.1.0.6
+
+ Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (platypus) Version 4.4.7 20120313
+ Version 4.9.3, Version 5.2.0
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 15.7-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ Linux 2.6.32-504.8.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-16)
+ IBM XL C/C++ V13.1
+ IBM XL Fortran V15.1
+
+ Linux 3.10.0-229.14.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ Version 5.2.0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 (cmake)
+ Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (wren/quail) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X El Capitan 10.11.4 Apple clang/clang++ version 7.3.0 from Xcode 7.3
+ 64-bit gfortran GNU Fortran (GCC) 5.2.0
+ (osx1011dev/ox1011test) Intel icc/icpc/ifort version 16.0.2
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+SunOS 5.11 32-bit n y/y n y y y
+SunOS 5.11 64-bit n y/y n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/y n y y n
+Windows 8.1 n y/y n y y y
+Windows 8.1 x64 n y/y n y y y
+Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y
+Mac OS X Yosemeti 10.10.5 64-bit n y/y n y y y
+AIX 6.1 32- and 64-bit n y/n n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y
+CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y
+Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+SunOS 5.11 32-bit y y y y
+SunOS 5.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 8.1 y y y y
+Windows 8.1 x64 y y y y
+Mac OS X Mountain Lion 10.8.5 64-bit y n y y
+Mac OS X Mavericks 10.9.5 64-bit y n y y
+Mac OS X Yosemeti 10.10.5 64-bit y n y y
+AIX 6.1 32- and 64-bit y n n y
+CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y
+Linux 2.6.32-431.11.2.el6.ppc64 y y y y
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-431.11.2.el6 g95 (GCC 4.0.3 (g95 0.94!)
+ #1 SMP x86_64 GNU/Linux
+ (platypus)
+
+ Windows 7 Visual Studio 2008 (cmake)
+
+ Windows 7 x64 Visual Studio 2008 (cmake)
+
+ Windows 10 Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 10 x64 Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ (cmake and autotools)
+
+ hopper.nersc.gov PrgEnv-gnu/5.2.40
+ gcc (GCC) 4.9.2 20141030 (Cray Inc.)
+ GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.)
+ g++ (GCC) 4.9.2 20141030 (Cray Inc.)
+
+
+Known Problems
+==============
+* On windows platforms in debug configurations, the VFD flush1 tests will fail
+ with the split and multi VFD drivers. These tests will display a modal debug
+ dialog which must be answered or wait for the test timeout to expire.
+ (ADB - 2014/06/23 - HDFFV-8851)
+
+* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ catches some undefined behavior in the alignment algorithm of the macro DETECT_I
+ in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment
+ of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for
+ H5detect.c. In the future, we can separate flags for H5detect.c from the rest of
+ the library. (SLU - 2013/10/16)
+
+* Make provided by Solaris fails in "make check". Solaris users should use
+ gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. the issue. HL and C++ shared libraries should now be
+ working as intended, however.
+ (MAM - 2011/04/20)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.16%%%%
+
+
+HDF5 version 1.8.16 released on 2015-11-10
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.15 and
+HDF5-1.8.16, and contains information on the platforms tested and
+known problems in HDF5-1.8.16.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.16 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.16 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.16 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.16 (current
+release) versus Release 1.8.15":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.15
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+New Features
+============
+
+ Configuration and Build
+ -------------
+ - The thread-safety + high-level library combination has been marked
+ as "unsupported" in the Autotools
+
+ The global lock used by the thread-safety feature has never been
+ raised to the high-level library level, making it possible that
+ the library state could change if a context switch were to occur in
+ a high-level library call. Because of this, the combination of
+ thread-safety and high-level library is officially unsupported by
+ The HDF Group.
+
+ In the past, although this combination has never been supported, this
+ was not enforced by the build systems. These changes will cause an
+ Autotools configure step to fail if --enable-threadsafe and
+ --enable-hl are combined unless additional options are specified.
+ Since the high-level library is built by default, this means that
+ these extra configuration options will need to be used any time
+ --enable-threadsafe is selected.
+
+ To build with --enable-threadsafe, either:
+
+ 1) Use --disable-hl to disable the high-level library (recommended)
+
+ 2) Use --enable-unsupported to build the high-level library with
+ the thread-safety feature.
+
+ (DER - 2015/09/10 HDFFV-8719)
+
+ - Using CMake now builds both static and shared libraries.
+
+ The CMake files have been updated to build both static and shared
+ libraries, with tools only built statically. The packaging of the
+ libraries and tools will include cmake-config files that allows
+ projects to choose either shared or static (default) libraries
+ to be found with the find_package command using the COMPONENTS
+ keyword and a list of components. The imported libraries will
+ include any interface specific settings and dependent libraries.
+
+ The default setting for BUILD_SHARED_LIBS has changed from OFF
+ to ON, which builds both static and shared libraries. The static
+ libraries are always built because of tools requirements.
+
+ (ADB - 2015/08/24 HDFFV-5881)
+
+ - Inline functions now correctly annotated with Autotools builds.
+
+ The method used to detect the appropriate inline markup scheme was
+ nonfunctional in Autotools builds. The Autotools have been modified
+ to correctly detect the compiler's inline markup symbol and apply it
+ to the source. Note that only a very small number of internal
+ functions are marked inline so this was not a very big change or
+ likely to significantly affect performance.
+
+ As a part of this change, the H5_inline symbol no longer appears in
+ H5pubconf.h.
+
+ (DER - 2015/08/13 HDFFV-9119, HDFFV-9421)
+
+ - Removed obsolete/unmaintained files from config/
+
+ Several files were removed from the config directory. These files
+ represent old operating systems, were no longer necessary, and/or
+ were no longer maintained. configure.ac was updated to reflect the
+ removed files.
+
+ Removed:
+
+ craynv
+ dec-flags
+ hpux11.23
+ ia64-linux-gnu
+ nec-superux14.1
+ sv1-cray
+ x86_64-redstorm-linux-gnu
+ powerpc-ibm-aix5.x
+
+ As a part of this work, a few lines that deal with locating the
+ sys/fpu.h header on SGI machines and some OSF/1 configure lines
+ were also removed. The Solaris config was also renamed to not have
+ a version number since the version number was ignored by configure
+ as it applies to all Solaris versions.
+
+ (DER - 2015/09/04 HDFFV-9116)
+
+ - Removed the FP_TO_INTEGER_OVERFLOW_WORKS macro/defines from the library
+
+ This was for working around bugs in the Cray X1 compiler, which is no
+ longer supported.
+
+ (DER - 2015/09/09 HDFFV-9191)
+
+ - Removed the H5_SW_ULONG_TO_FP_BOTTOM_BIT_WORKS and
+ H5_FP_TO_ULLONG_BOTTOM_BIT_WORKS symbols and associated code.
+
+ H5_SW_ULONG_TO_FP_BOTTOM_BIT_WORKS was a work-around on old 64-bit
+ SGI and Solaris systems.
+
+ H5_FP_TO_ULLONG_BOTTOM_BIT_WORKS was a work-around for old PGI
+ compilers on Linux.
+
+ Neither of these were used in any current library code and only appeared
+ in the dt_arith test.
+
+ (DER - 2015/09/09 HDFFV-9187)
+
+ - Removed CONVERT_DENORMAL_FLOAT symbol and associated code from the
+ library.
+
+ This was only set in configure files for Cray and NEC computers. These
+ config files no longer exist so there is no effect on currently
+ supported platforms.
+
+ (DER - 2015/09/09 HDFFV-9188)
+
+ - Removed _BSD_SOURCE and _DEFAULT_SOURCE from configure.ac
+
+ These are old BSD-compatibility symbols that are no longer needed by
+ the library.
+
+ (DER - 2015/09/10 HDFFV-9079)
+
+ - Removed HW_FP_TO_LLONG_NOT_WORKS symbol and associated code from the
+ library.
+
+ This was part of a work-around for the VS.NET 2003 compiler, which is
+ no longer supported.
+
+ (DER - 2015/09/10 HDFFV-9189)
+
+ - Removed the BAD_LOG2_CODE_GENERATED symbol and associated code from the
+ library.
+
+ This was an IRIX work-around.
+
+ (DER - 2015/09/11 HDFFV-9195)
+
+ - Decoupled shared object version numbers for wrapper libraries from the
+ shared object version number for the HDF5 library. These will be
+ maintained on an individual basis according to the interface changes
+ specific to these wrapper libraries.
+
+ For HDF5 1.8.16 the shared object version numbers were changed from
+ 10.0.1 to 10.1.0 for the HDF5 library due to added APIs. For the C++
+ wrapper library they were changed from 10.0.1 to 11.0.0 due to changes
+ in existing APIs. For all other wrapper libraries the versions were
+ changed from 10.0.1 to 10.0.2 because while the APIs had no changes
+ there have been changes in code that did not result in changes to their
+ interfaces.
+
+ (LRK - 2015/10/28)
+
+ Library
+ -------
+
+ - H5F_ACC_DEBUG flag for H5Fopen/create: functionality removed
+
+ The symbol was used to emit some extra debugging information
+ for HDF Group developers in the multi VFD. The underlying
+ functionality has been removed due to disuse. The symbol
+ remains defined since it was visible in H5Fpublic.h but it
+ has been set to zero and has no effect anywhere in the library.
+
+ (DER - 2015-05-02, HDFFV-1074)
+
+ - New public API call: H5is_library_threadsafe()
+
+ This API call indicates if the library was built with thread-
+ safety enabled.
+
+ (DER - 2015-09-01, HDFFV-9496)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - None
+
+ High-Level APIs
+ ---------------
+ - None
+
+ Fortran API
+ -----------
+ - None
+
+
+ C++ API
+ -------
+ - Class H5::ObjCreatPropList is added for the object creation property
+ list class.
+
+ Class H5::ObjCreatPropList is derived from H5::PropList and is a
+ baseclass of H5::DSetCreatPropList. Additional property list classes
+ will be derived from H5::ObjCreatPropList when they are added to the
+ library in future releases.
+
+ (BMR, 2015/10/13, Part of HDFFV-9169)
+
+ - New Wrappers for C Functions H5P[s/g]et_attr_phase_change and
+ H5P[s/g]et_attr_creation_order.
+
+ Wrappers were added to class H5::ObjCreatPropList for the C Functions
+ H5Pset_attr_phase_change: H5::ObjCreatPropList::setAttrPhaseChange
+ H5Pget_attr_phase_change: H5::ObjCreatPropList::getAttrPhaseChange
+ H5Pset_attr_creation_order: H5::ObjCreatPropList::setAttrCrtOrder
+ H5Pget_attr_creation_order: H5::ObjCreatPropList::getAttrCrtOrder
+
+ (BMR, 2015/10/13, Part of HDFFV-9167 and HDFFV-9169)
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - Added VS2015 with Intel Fortran 16 to supported Windows 7 platforms
+
+
+Bug Fixes since HDF5-1.8.15
+===========================
+
+ Configuration
+ -------------
+
+ - CMake test for long long printf format improved
+
+ The CMake configuration test for determining the printf format string
+ for printing a long long integer was fixed. The test would crash
+ when executed with VS2015.
+
+ (ADB - 2015-10-21 HDFFV-9488)
+
+ Library
+ -------
+ - VS2015 removed global variable timezone
+
+ The usage of the global variable timezone was modified for VS2015 by
+ adding an alias to Windows builds.
+
+ (ADB - 2015-10-23 HDFFV-9550)
+
+ - Fix potential error in H5Iclear_type
+
+ If the ID type's close callback could close another ID of the same type,
+ H5Iclear_type could occasionally run into problems due to the inner
+ workings of the skip list package. This could potentially cause an
+ error on library shutdown without calling H5Iclear_type directly. This
+ issue has been fixed.
+
+ (NAF - 2015-08-12)
+
+ - Fix uninitialized memory in dataspace selection code
+
+ When creating a dataspace with H5Screate and setting the extent with
+ H5Sextent_copy, the selection offset was not initialized, potentially
+ causing invalid I/O. There may be other cases where this happened.
+ Modified the library to always initialize the offset.
+
+ (NAF - 2015-09-08)
+
+ - Truncate file in H5Fflush() if EOA != EOF to avoid file
+ corruption in certain scenarios
+
+ In the following scenario, the resulting HDF5 file would be
+ incorrectly corrupted because the truncate operation
+ was at some point wrongly moved out of the flush operation:
+ - Create a new file with a single dataset.
+ - Write parts of the dataset (make sure that some values at
+ the end of the dataset are not initialized).
+ - Flush the file.
+ - Crash the program.
+ - Try to open the file with h5dump or h5debug, but the
+ resulting file is corrupted.
+
+ (MSC - 2015-06-15 HDFFV-9418)
+
+
+ Parallel Library
+ ----------------
+ -
+ (XYZ - YYYY/MM/DD HDFFV-####)
+
+ Performance
+ -------------
+ - None
+
+ Tools
+ -----
+ - VS2015 changed the default format for printing of exponents
+
+ VS2015 default format for exponents changed with the elimination
+ of the leading '0'. CMake now tests for the VS2015 compiler and
+ adjusts which reference files are used by tests.
+
+ (ADB - 2015-10-23 HDFFV-9550)
+
+ - Fixed h5repack with user-defined filters
+
+ h5repack would throw a buffer overrun exception on Windows when
+ parsing a user-defined filter ID of 5 digits. A local variable in
+ the parse routine was not of sufficient size.
+
+ (ADB - 2015/09/01 HDFFV-9515)
+
+ Fortran API
+ ------------
+ - None
+
+
+ C++ API
+ ------
+ - Removed memory leaks
+
+ The static global constant objects were changed to constant references
+ referencing dynamically allocated objects. This ensures that the clean-up
+ process in the C++ library occurs before the termination of the C library
+ and prevents memory leaks because the previous global constants were not
+ properly deleted before the C library termination.
+
+ (BMR, 2015/10/13, HDFFV-9529)
+
+ - Fixed the problem about identifiers being closed prematurely.
+
+ The C++ library needs to increment the ID's reference counter when it is
+ duplicated in the form of C++ objects, but not when the ID is obtained
+ from a C function. With this approach, both problems, prematurely
+ closing ID's and memory leaks due to ID's not being closed, should be
+ eliminated.
+
+ (BMR, 2015/10/15, HDFFV-7947)
+
+
+ High-Level APIs:
+ ------
+ - None
+
+
+ Fortran High-Level APIs:
+ ------------------------
+ - None
+
+
+ Testing
+ -------
+ - None
+
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 6.1 xlc/xlc_r 10.1.0.5
+ (NASA G-ADA) xlC/xlC_r 10.1.0.5
+ xlf90/xlf90_r 12.1.0.6
+
+ Linux 2.6.32-573.3.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (platypus) Version 4.4.7 20120313
+ Version 4.8.4, Version 5.2.0
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 15.7-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ Linux 2.6.32-504.8.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-11)
+ IBM XL C/C++ V13.1
+ IBM XL Fortran V15.1
+
+ Linux 3.10.0-229.14.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (kituo/moohan) Version 4.8.3 20140911 (Red Hat 4.8.3-9)
+ Version 5.2.0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+ Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2.0
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (wren/quail) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.0 from Xcode 7.0.0
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+SunOS 5.11 32-bit n y/y n y y y
+SunOS 5.11 64-bit n y/y n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/y n y y n
+Windows 8.1 n y/y n y y y
+Windows 8.1 x64 n y/y n y y y
+Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y
+Mac OS X Yosemeti 10.10.5 64-bit n y/y n y y y
+AIX 6.1 32- and 64-bit n y/n n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 Intel n y/y n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y
+CentOS 7.1 Linux 3.10.0 x86_64 GNU y y/y y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 Intel n y/y n y y y
+Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+SunOS 5.11 32-bit y y y y
+SunOS 5.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 8.1 y y y y
+Windows 8.1 x64 y y y y
+Mac OS X Mountain Lion 10.8.5 64-bit y n y y
+Mac OS X Mavericks 10.9.5 64-bit y n y y
+Mac OS X Yosemeti 10.10.5 64-bit y n y y
+AIX 6.1 32- and 64-bit y n n y
+CentOS 6.7 Linux 2.6.32 x86_64 GNU y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 Intel y y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 GNU y y y y
+CentOS 7.1 Linux 3.10.0 x86_64 Intel y y y y
+Linux 2.6.32-431.11.2.el6.ppc64 y y y y
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-431.11.2.el6 g95 (GCC 4.0.3 (g95 0.94!)
+ #1 SMP x86_64 GNU/Linux
+ (platypus)
+
+ Mac OS X El Capitan 10.11 Apple clang/clang++ version 7.0.0 from Xcode 7.0.1
+ 64-bit gfortran GNU Fortran (GCC) 5.2.0
+ (VM)
+
+ Windows 7 Visual Studio 2008 (cmake)
+
+ Windows 7 x64 Visual Studio 2008 (cmake)
+
+ Windows 10 Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 10 x64 Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ (cmake and autotools)
+
+ hopper.nersc.gov PrgEnv-gnu/5.2.40
+ gcc (GCC) 4.9.2 20141030 (Cray Inc.)
+ GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.)
+ g++ (GCC) 4.9.2 20141030 (Cray Inc.)
+
+
+Known Problems
+==============
+* On Windows platforms in debug configurations, the VFD flush1 tests will fail
+ with the split and multi VFD drivers. These tests will display a modal debug
+ dialog which must be answered or wait for the test timeout to expire.
+ (ADB - 2014/06/23 - HDFFV-8851)
+
+* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ catches some undefined behavior in the alignment algorithm of the macro DETECT_I
+ in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment
+ of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for
+ H5detect.c. In the future, we can separate flags for H5detect.c from the rest of
+ the library. (SLU - 2013/10/16)
+
+* Make provided by Solaris fails in "make check". Solaris users should use
+ gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the Fortran and HL/Fortran
+ tests fail. HL and C++ shared libraries should now be working as intended,
+ however.
+ (MAM - 2011/04/20)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* All of the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian systems. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.15%%%%
+
+
+HDF5 version 1.8.15 released on 2015-05-04
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.14 and
+HDF5-1.8.15, and contains information on the platforms tested and
+known problems in HDF5-1.8.15.
+
+Links to the HDF5 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for HDF5 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.14
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - CMake
+
+ Improvements made to the CMake build system.
+
+ The default options were changed to align with the Autotools configure
+ defaults. CMake configure files now support components when packaged
+ with CPack. Windows CPack supports WiX packaging, and will look for
+ WiX and NSIS in the standard locations.
+
+ The CMake minimum has been changed to 3.1.
+
+ (ADB - 2015/04/01 HDFFV-8074, 8968, 9006)
+
+ - cmakehdf5 for Cmake building.
+ Added configure options to support the building of Fortran or CXX API,
+ to enable/disable testings. Use "cmakehdf5 --help" for details.
+ (AKC - 2014/12/09 HDFFV-8932)
+
+ - Building Shared and Parallel Made Explicit
+
+ When --enable-parallel is specified, configure used to disable
+ shared by default.
+
+ Removed the restriction for building shared when parallel is
+ enabled. --disable-shared has to be used explicitly if user
+ wishes to disable shared libraries.
+
+ (MSC - 2015/02/19 HDFFV-9069)
+
+ - Inferring Parallel Compilers
+
+ configure was trying to infer if a compiler is a parallel
+ compiler with MPI support and enable parallel even if the user
+ did not explicitly enable parallel. This should not happen.
+
+ Disabled inferring parallel compilers to enable parallel HDF5
+ build. --enable-parallel has to be used explicitly to build
+ parallel HDF5 regardless of the compiler type being used.
+
+ (MSC - 2015/02/19 HDFFV-9068)
+
+ - Large File Support Configuration Option
+
+ Removed the option to enable or disable large file support. It will
+ always be enabled.
+
+ (MSC - 2015/02/19 HDFFV-9097)
+
+ - Removed Configuration Feature
+
+ When configure detected that the CodeWarrior compiler was being used it
+ would define a symbol that caused a test in test/tfile.c to be skipped
+ due to a broken CodeWarrior open() command.
+
+ Since this only masks the problem instead of fixing it and we don't
+ support CodeWarrior anyway, this functionality was removed.
+
+ (DER - 2015/02/21, HDFFV-9080)
+
+ - VMS Build/Test Files Have Been Removed
+
+ HDF5 no longer supports VMS, and the files were getting out of date.
+ Since we have no access to a VMS machine, there is no way for us to
+ maintain them.
+
+ A Subversion tag was created at:
+
+ https://svn.hdfgroup.uiuc.edu/tags/vms_last_support_1_8
+
+ immediately before removing the files.
+
+ (DER - 2015-02-26, HDFFV-9147)
+
+ - Removal of --with-default-vfd configure Option
+
+ In theory, this option was intended to allow setting a default
+ VFD that would be used by the library. In practice, the feature
+ only accepted the POSIX (SEC2) VFD (already the default) and
+ the stdio VFD (a demo VFD not intended for production use). The
+ inability to pass key VFD parameters at configure time limits the
+ full implementation of this feature, so it was retired.
+
+ (DER - 2015-02-26, HDFFV-9081)
+
+ - Direct VFD configure Behavior
+
+ The configure options for Linux now allow the Direct VFD to build
+ without passing additional compiler options/defines like _GNU_SOURCE.
+ Passing --enable-direct-vfd is now all that is needed to enable
+ the feature.
+
+ The Direct VFD is now disabled by default since it is intended for
+ specialized audiences. It was previously enabled by default, but the
+ configure script did not set correct POSIX levels, etc. making this
+ a moot point.
+
+ Note that the Direct VFD can only be configured on Linux when
+ the O_DIRECT flag to open()/create() and posix_memalign() function
+ are available. This is unchanged from previous behavior.
+
+ (DER - 2015-02-26, HDFFV-9057, 7567, 9088, 7566)
+
+ - _POSIX_C_SOURCE, _GNU_SOURCE, and _BSD_SOURCE No Longer Exported
+ to h5cc and Other Compiler Wrappers
+
+ The _POSIX_C_SOURCE, _GNU_SOURCE, and _BSD_SOURCE definitions are
+ not required for using API functions and may conflict with user
+ code requirements.
+
+ (DER - 2015-03-08, HDFFV-9152)
+
+ - Removed the --enable-filters Option from configure
+
+ This option allowed the user to disable selected internal filters,
+ presumably to make the library smaller. It has been removed since
+ it saved little space (the internal filters are small with respect
+ to the overall library size) and was not generally extendible to
+ the library at large due to the large number of #ifdefs that would
+ be required.
+
+ Note that this features applied to internal filters such as shuffle
+ and n-bit and not external filters like gzip or Szip. Those are still
+ enabled or disabled via their own configure options.
+
+ (DER - 2015-03-08, HDFFV-9086)
+
+ - Removed Obsolete Time Functionality from configure and the C Library
+
+ The library contained some residual functionality from obsolete
+ time zone handling code. This has been removed, and the configure
+ checks for the time functions have been cleaned up.
+
+ * Lumped all the time functionality together in configure.ac.
+ This was previously more spread out due to Solaris issues
+ with the ordering of certain checks.
+
+ * Removed processing that handles __tm_gmtoff members of struct
+ tm. (libc-4)
+
+ * Removed BSDgettimeofday(). (IRIX 5.3)
+
+ * Removed timezone struct handling in gettimeofday() (considered
+ harmful).
+
+ Note that the HDF5 Library stores timestamps in a platform-independent
+ manner, so old files can still be read. This only affects converting
+ system time to HDF5 timestamps.
+
+ The library currently uses the tm_gmtoff member of the tm struct
+ (preferred, if available) or the timezone global variable to
+ construct HDF5 timestamps.
+
+ (DER - 2015-03-09, HDFFV-9083 and 9085)
+
+ - Added -D_DEFAULT_SOURCE to CPPFLAGS on Linux Systems
+
+ This is the replacement for -D_BSD_SOURCE in versions of glibc since 2.19.
+ Since both are defined, it should work for all versions of glibc. Defining
+ both suppresses the warning about defining _BSD_SOURCE.
+
+ (NAF - 2015-04-02, HDFFV-9079)
+
+ Library
+ -------
+ - Added Memory Allocation Functions that Use the Library's Allocator
+
+ HDF5 filters may need to allocate or resize the buffer that is passed
+ to them from the library. If the filter has been compiled separately
+ from the library, it and the library may use different memory
+ allocation libraries for the (re)allocation and free calls. This can
+ cause heap corruption and crashes. This is particularly a problem on
+ Windows since each C run-time library is implemented as a separate
+ shared library, but can also show up on POSIX systems when debug or
+ high-performance allocation libraries are in use.
+
+ Two new functions (H5allocate_memory() and H5resize_memory()) were
+ added to the HDF5 C library. These functions have the same semantics as
+ malloc/calloc and realloc, respectively. Their primary purpose is to
+ allow filter authors to allocate or resize memory using the same
+ memory allocation library as the HDF5 library. Filter authors are
+ highly encouraged to use these new functions in place of malloc,
+ calloc, and realloc. They should also use the H5free_memory() call when
+ freeing memory.
+
+ Note that the filters provided with the library (zlib, szip, etc.) do
+ not experience the problems that these new functions are intended to
+ fix. This work only applies to third-party filters that are compiled
+ separately from the library.
+
+ (DER - 2015-04-01, HDFFV-9100)
+
+ - H5Pset_istore_k and H5Pset_sym_k
+
+ These two functions didn't check the value of the input parameter "ik".
+ When 2*ik exceeded 2 bytes of storage, data was lost in the file;
+ for example, some chunks would be overwritten.
+
+ Added validation of "ik" to not exceed the max v1 btree entries (2 bytes)
+ to these two routines.
+
+ (VC - 2015-03-24, HDFFV-9173)
+
+ - Added Functions to Control the Value of H5PL_no_plugin_g without
+ Using an Environment Variable
+
+ Sometimes it is necessary for an application to disable the use of
+ dynamically loaded plugin libraries without requiring the library to
+ be built with plugin support disabled or to set an environment
+ variable to disable plugin support globally.
+
+ Two new functions (H5PLset_loading_state() and H5PLget_loading_state())
+ were added to the HDF5 C Library. These functions require a parameter
+ that indicates which type of dynamically loaded plugin is enabled or
+ disabled.
+
+ (ADB - 2015-03-17, HDFFV-8520)
+
+ Parallel Library
+ ----------------
+ - MPI_Finalize and HDF5 Library Shutdown
+
+ Calling HDF5 routines after MPI_Finalize has been closed should
+ not be done, since those routines might call MPI functions that
+ would not be possible to do after finalizing the MPI library.
+
+ Attached an attribute destroy callback to MPI_COMM_SELF that
+ shuts down the HDF5 library when MPI_COMM_SELF is destroyed,
+ in other words, on MPI_Finalize. This should fix several issues
+ that users see when they forget to close HDF5 objects before
+ calling MPI_Finalize().
+
+ (MSC - 2015/02/25, HDFFV-883)
+
+ Tools
+ -----
+ - None
+
+ High-Level APIs
+ ---------------
+ - None
+
+ Fortran API
+ -----------
+ - Added Global Variables
+
+ These new global variables are equivalent to the C definitions
+ without the '_F':
+
+ H5G_UDLINK_F
+ H5G_SAME_LOC_F
+ H5O_TYPE_UNKNOWN_F
+ H5O_TYPE_GROUP_F
+ H5O_TYPE_DATASET_F
+ H5O_NAMED_DATATYPE_F
+ H5O_TYPE_NTYPES_F
+
+ (MSB - 2015/02/03, HDFFV-9040)
+
+
+ C++ API
+ -------
+ - New Wrappers for C Functions H5P[s/g]et_libver_bounds
+
+ Wrappers were added to class H5::FileAccPropList for the
+ C Functions H5Pget_libver_bounds and H5Pset_libver_bounds.
+
+ (BMR, 2015/04/06, Part of HDFFV-9167)
+
+ - New Wrappers to Get the Object Header's Version
+
+ The following wrappers are added to class H5::CommonFG
+ Returns the object header version of an object in a file or group,
+ given the object's name.
+
+ unsigned childObjVersion(const char* objname) const;
+ unsigned childObjVersion(const H5std_string& objname) const;
+
+ (BMR, 2015/04/06)
+
+ - New DataType Constructor
+
+ Added a DataType constructor that takes a PredType object, and this
+ constructor will invoke H5Tcopy to generate another datatype id
+ from a predefined datatype.
+
+ (BMR, 2015/04/06)
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - Support for Linux 3.10.0-123.20.1.el7 added (LK - 2015/04/01)
+ - Support for Mac OS X Yosemite 10.10 added (AKC - 2015/03/04, HDFFV-9007)
+ - Support for AIX 6.1 added and AIX 5.3 is retired. (AKC - 2015/01/09)
+
+Bug Fixes since HDF5-1.8.14
+===========================
+
+ Configuration
+ -------------
+ - Make uninstall generated "test: argument expected".
+ The error is due to $EXAMPLETOPDIR is used without setting a value first.
+
+ Fixed by assign it with the proper value.
+
+ (AKC - 2015/04/29, HDFFV-9298)
+
+ - Windows Installer Incorrect Display of PATH Environment Variable
+
+ In the Windows installer, the dialog box where the user can elect to
+ add the product's bin path to the %PATH% environment variable displayed
+ an incorrect path. This path was missing the C:\Program Files part
+ and used the POSIX file separator '/' before the bin (<path>/bin,
+ instead of <path>\bin).
+
+ The dialog box text was changed to simply say that the product's bin
+ path would be added instead of explicitly displaying the path.
+ This is in line with most installers. The reason for not fixing the
+ displayed path instead is that it is difficult to pass the correct
+ path from CPack to the NSIS installer for display.
+
+ Note that this was never a code issue - it was just a display
+ problem. The installer always did the right thing when updating the
+ environment variable.
+
+ (DER - 2014/11/14, HDFFV-9016)
+
+ Library
+ -------
+ - Incorrect Usage of List in CMake COMPILE_DEFINITIONS set_property
+
+ The CMake command set_property with COMPILE_DEFINITIONS property
+ needs a quoted semi-colon separated list of values. CMake will
+ transform the list to a series of -D{value} for the compile.
+
+ (ADB - 2014/12/09, HDFV-9041)
+
+ - Fixed Compile Errors on Windows w/ Visual Studio and CMake When
+ UNICODE is Defined
+
+ The HDF5 Library could not be built on Windows with Visual Studio when
+ UNICODE was defined. This was due to the incorrect use of the TEXT()
+ macro and some Win32 API functions that take TCHAR parameters. The faulty
+ code was a part of the filter plugin functionality. This was a
+ compile-time error that only affected users who build HDF5 from source
+ and define UNICODE, usually when HDF5 is being built as a part of a
+ larger product. There were no run-time effects.
+
+ These errors caused no problems when UNICODE was not defined. HDF5 is
+ normally not built with UNICODE defined and the binaries were
+ unaffected.
+
+ The fix was to remove the TEXT() macro and explicitly use the
+ 'A' form of the Win32 API calls, which expect char strings instead of
+ wchar_t strings.
+
+ Note that HDF5 currently does not support Unicode file paths on Windows.
+
+ (DER - 2015/02/22, HDFFV-8927)
+
+ - Addition of Error Tracing Functionality to Several C API Calls
+
+ A bug in a text processing script caused API calls that return a
+ pointer to not receive error tracing macros/functionality.
+
+ The bug has been corrected and error tracing functionality has been
+ added to the affected API calls. These functions will now correctly
+ print trace information when library errors are encountered.
+
+ (DER - 2015/02/26, HDFFV-9141)
+
+ - H5Rdereference Now Checks for HADDR_UNDEF or Uninitialized References
+
+ When passed HADDR_UNDEF or uninitialized references, the previous
+ behavior of H5Rdereference was to continue to process the reference
+ as a valid address.
+
+ H5Rdereference was changed to return immediately (with an error
+ message) if the references are HADDR_UNDEF or uninitialized.
+
+ (MSB - 2015/3/10, HDFFV-7959)
+
+ - Fixed Bugs in H5Sextent_copy
+
+ H5Sextent_copy would not free the previous extent, resulting in a memory
+ leak. Also, H5Sextent_copy would not update the number of elements
+ selected if the selection was "all", causing various problems. These
+ issues have been fixed.
+
+ (NAF - 2015/04/02)
+
+
+ Parallel Library
+ ----------------
+ - Fixed a Potential Memory Error
+
+ Fixed a potential memory error when performing parallel I/O on a
+ dataset with a single chunk, and at least one process has nothing
+ to do.
+
+ (NAF - 2015/02/16)
+
+ - Parallel Test Problem Fixed
+
+ Fixed problem with parallel tests where they failed beyond a
+ certain number of ranks. All tests should work for any arbitrary
+ number of ranks.
+
+ (MSC - 2014/11/06, HDFFV-1027,8962,8963)
+
+ - MPE Support
+
+ Enabling MPE was causing HDF5 build to fail. Support for it was
+ dropped at some point in time.
+
+ Fixed problem with enabling MPE. Users should use the community
+ maintained MPE on github (http://git.mpich.org/mpe.git/).
+
+ (MSC - 2015/02/20, HDFFV-9135)
+
+ Performance
+ -------------
+ - None
+
+ Tools
+ -----
+ - h5repack crashed on enumerated 8-bit type.
+
+ Previous version 1.8.14 introduced an error that caused the reading
+ of enumerated 8-bit type nested in compound type to fail.
+
+ Fixed library code responsible for reading the particular type.
+ (AKC - 2015.03/31, HDFFV-8667)
+
+ - h52gif crashed non-8bit images.
+
+ h52gif crashed if instructed to convert images other than 8bit images.
+
+ h52gif could handle only 8bit images. Added code to detect non-8bit
+ images and flag them as failure. Update tool document page to reflect
+ the limit.
+ (AKC - 2015/03/31, HDFFV-8957)
+
+ - perform/benchpar.c retired.
+
+ benchpar.c has not been built for a long time and its original purpose
+ is not needed any more.
+ (AKC - 2014/12/19, HDFFV-8156)
+
+ - Source perform/ directory moved to tools/perform.
+ The perform directory is moved to tools/perform for easier maintenance.
+ (AKC - 2014/12/17, HDFFV-9046)
+
+ Fortran API
+ ------------
+ - Fortran Fails with --enable-fortran2003 and Intel 15.x Compilers
+
+ Added BIND(C) to the offending APIs.
+
+ The Fortran Library (--enable-fortran2003) now works using Intel 15.x
+ without the need for any additional compilers flags.
+
+ (MSB - 2015/1/26, HDFFV-9049)
+
+ - h5tenum_insert_f Does Not Work with Default 8 Byte Integers
+ (xlf compiler)
+
+ In the Fortran 90 API, 'value' is no longer cast into the C int type.
+ Therefore, if h5tenum_insert_f is passed an 8 byte integer (via -i8)
+ then 'value' is written as the same type as the default Fortran
+ integer type (which can be 8 bytes).
+
+ A new Fortran 2003 API was added which is more in line with the C
+ API and users are strongly encouraged to use the Fortran 2003 API
+ instead of the Fortran 90 API.
+
+ SUBROUTINE h5tenum_insert_f(type_id, name, value, hdferr)
+ INTEGER(HID_T) , INTENT(IN) :: type_id
+ CHARACTER(LEN=*), INTENT(IN) :: name
+ TYPE(C_PTR) , INTENT(IN) :: value
+ INTEGER, INTENT(OUT) :: hdferr
+
+ (MSB - 2015/2/19, HDFFV-8908)
+
+ - Some Fortran APIs Never Returned the Error State
+
+ Some Fortran APIs never returned the error state: they
+ would always return a positive number. The APIs include
+ the following:
+
+ h5fget_file_image_f
+ h5lget_name_by_idx_f
+ h5oget_comment_by_name_f
+
+ They were corrected to return a negative number as described in
+ the Reference Manual if an error occurred.
+
+ (MSB - 2015/3/19, HDF5-239)
+
+ - Fixed h5pget_class_f
+
+ h5pget_class_f never correlated the class identifier to the property
+ list class name as indicated in the HDF5 Reference Manual; it instead
+ returned a property list class identifier as an INTEGER. The INTEGER
+ needed to be of type INTEGER(HID_T) to be correct.
+
+ The h5pget_class_f API was changed to return an INTEGER(HID_T)
+ property list class identifier instead of an INTEGER. This mimics the
+ intended behavior of the C API.
+
+ (MSB - 2015/3/16, HDFFV5-9162)
+
+ C++ API
+ ------
+ - Combined Two H5File::getObjCount Overloaded Methods
+
+ The following two methods
+
+ ssize_t getObjCount(unsigned types) const;
+ ssize_t getObjCount() const;
+
+ were combined into one:
+
+ ssize_t getObjCount(unsigned types = H5F_OBJ_ALL) const;
+
+ (BMR - 2015/04/06)
+
+ - Many Warnings Were Removed
+
+ Many warnings such as conversion, unused variables, missing base
+ class initialization, and initializing base classes in wrong order
+ were removed.
+
+ (BMR, 2015/04/06)
+
+ - Functionality Deprecation
+
+ The following two constructors of classes AbstractDs, IdComponent,
+ H5Location, and H5Object are no longer appropriate after the data member
+ "id" had been moved from IdComponent to the sub-classes in previous
+ releases.
+
+ <Classname>(const hid_t h5_id);
+ <Classname>(const <Classname>& original);
+
+ The copy constructors were no-op and removed in 1.8.15. The other
+ constructors will be removed from 1.10 release, and then from 1.8.17
+ if their removal does not cause any problems.
+
+ (BMR, 2015-04-06)
+
+
+ High-Level APIs:
+ ------
+ - Suppress Warnings from Flex/Bison-generated Code
+
+ Warning suppression #pragmas, etc. have been added to H5LTparse.c and
+ H5LTanalyze.c. We have no control over this code since it's created by
+ a generator.
+
+ (DER - 2015/03/08 - HDFFV-9149)
+
+ - Changed hdf5_hl.h to Include the HDF5 Main Library "hdf5.h"
+
+ User's no longer need to include both hdf5_hl.h and hdf5.h
+
+ (MSB - 2015/2/14, HDFFV-8685)
+
+
+ - H5PTcreate_fl Does Not Convert to Memory Datatype
+
+ H5PTcreate_fl now converts to the table's native memory datatype
+ to fix the problem of handling BE and LE packet tables.
+
+ (MSB - 2015/2/26 - HDFFV-9042)
+
+ - Fix for H5LT Attribute Functions
+
+ H5LT attribute functions fail to create attributes whose name
+ is a substring of an existing attribute.
+
+ H5LT attribute functions can now create attributes whose name
+ is a substring of an existing attribute.
+
+ (MSB - 2015/2/24, HDFFV-9132)
+
+
+ Fortran High-Level APIs:
+ ------------------------
+
+ - Internal Library Fix for Missing Argument Declaration
+
+ In Interface block for h5tbmake_table_c, "max_char_size_field_names"
+ is listed as an input, but in the argument definitions it is
+ "INTEGER :: max_char_size". This caused no known problems with the
+ Fortran HL API.
+
+ Fixed missing argument definition.
+
+ (MSB - 2015/2/18, HDFFV-8559)
+
+
+ Testing
+ -------
+ - None
+
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 6.1 xlc/xlc_r 10.1.0.5
+ (NASA G-ADA) xlC/xlC_r 10.1.0.5
+ xlf90/xlf90_r 12.1.0.6
+
+ Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-55)
+ Version 4.8.4, 4.9.2
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 14.10-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 15.0.1.133 (Build 20141023)
+
+ Linux 2.6.18-371.6.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 64-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-55)
+ Version 4.8.4, 4.9.2
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 15.0.1.133 Build 20141023
+
+ Linux 2.6.32-431.11.2.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (platypus) Version 4.4.7 20120313
+ Version 4.8.2, Version 4.9.2
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 14.10-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.1.133 Build 20141023
+
+ Linux 3.10.0-123.20.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (moohan) Version 4.8.2 20140120 (Red Hat 4.8.2-16)
+ Intel(R) C Intel(R) 64 Compiler XE for
+ applications running on Intel(R) 64,
+ Version 15.0.1.133 Build 20141023
+
+ Linux 2.6.32-431.29.2.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ IBM XL C/C++ V13.1
+ IBM XL Fortran V15.1
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2008 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.34(0.285/5/3) gcc(4.9.2) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2008 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 14.0.2
+
+ Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.1.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (wren/quail) Intel icc/icpc/ifort version 14.0.2
+
+ Mac OS X Yosemite 10.10.2 Apple clang/clang++ version 6.0 from Xcode 6.1.1
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.1
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+SunOS 5.11 32-bit n y/y n y y y
+SunOS 5.11 64-bit n y/y n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/y n y y n
+Windows 8.1 n y/y n y y y
+Windows 8.1 x64 n y/y n y y y
+Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.5 64-bit n y/y n y y y
+Mac OS X Yosemeti 10.10.2 64-bit n y/y n y y y
+AIX 6.1 32- and 64-bit n y/n n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 GNU n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 PGI n y/y n y y y
+CentOS 7.0 Linux 3.10.0 x86_64 GNU y y/y y y y y
+CentOS 7.0 Linux 3.10.0 x86_64 Intel n y/y n y y y
+Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+SunOS 5.11 32-bit y y y y
+SunOS 5.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 8.1 y y y y
+Windows 8.1 x64 y y y y
+Mac OS X Mountain Lion 10.8.5 64-bit y n y y
+Mac OS X Mavericks 10.9.5 64-bit y n y y
+Mac OS X Yosemeti 10.10.2 64-bit y n y y
+AIX 6.1 32- and 64-bit y n n y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n
+CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n
+CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 PGI y y y n
+CentOS 7.0 Linux 3.10.0 x86_64 GNU y y y n
+CentOS 7.0 Linux 3.10.0 x86_64 Intel y y y n
+Linux 2.6.32-431.11.2.el6.ppc64 y y y n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-308.13.1.el5PAE MPICH mpich 3.1.3 compiled with
+ #1 SMP i686 i686 i386 gcc 4.9.2 and gfortran 4.9.2
+ (jam) g95 (GCC 4.0.3 (g95 0.94!)
+
+ Linux 2.6.18-431.11.2.el6 MPICH mpich 3.1.3 compiled with
+ #1 SMP x86_64 GNU/Linux gcc 4.9.2 and gfortran 4.9.2
+ (platypus) g95 (GCC 4.0.3 (g95 0.94!)
+
+ FreeBSD 8.2-STABLE i386 gcc 4.5.4 [FreeBSD] 20110526
+ (loyalty) gcc 4.6.1 20110527
+ g++ 4.6.1 20110527
+ gfortran 4.6.1 20110527
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.5.4 [FreeBSD] 20110526
+ (freedom) gcc 4.6.1 20110527
+ g++ 4.6.1 20110527
+ gfortran 4.6.1 20110527
+
+ Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ (cmake and autotools)
+
+ hopper.nersc.gov PrgEnv-gnu/5.2.40
+ gcc (GCC) 4.9.2 20141030 (Cray Inc.)
+ GNU Fortran (GCC) 4.9.2 20141030 (Cray Inc.)
+ g++ (GCC) 4.9.2 20141030 (Cray Inc.)
+
+
+Known Problems
+==============
+* On Windows platforms in debug configurations, the VFD flush1 tests will fail
+ with the split and multi VFD drivers. These tests will display a modal debug
+ dialog which must be answered or wait for the test timeout to expire.
+ (ADB - 2014/06/23 - HDFFV-8851)
+
+* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ catches some undefined behavior in the alignment algorithm of the macro DETECT_I
+ in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment
+ of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for
+ H5detect.c. In the future, we can separate flags for H5detect.c from the rest of
+ the library. (SLU - 2013/10/16)
+
+* Make provided by Solaris fails in "make check". Solaris users should use
+ gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. the issue. HL and C++ shared libraries should now be
+ working as intended, however.
+ (MAM - 2011/04/20)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.14%%%%
+
+
+HDF5 version 1.8.14 released on 2014-11-12
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.13 and
+HDF5-1.8.14, and contains information on the platforms tested and
+known problems in HDF5-1.8.14.
+
+All new and modified APIs are listed in the "HDF5 Software Changes
+from Release to Release" document along with details about previous
+releases at:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+Links to the HDF5 1.8.14 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.14 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.14 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.13
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - bin/cmakehdf5 configures, builds and installs C, C++, Fortran and High
+ level API's. (It used to build the C API only).
+ (AKC 2014/10/17 HDFFV-8932).
+
+ Library
+ -------
+ - None
+
+ Parallel Library
+ ----------------
+ - Chunk Fill Writes Changed to Collective
+
+ Slow performance in chunk fill writes. Chunk fills
+ in the past were written independently by rank 0 one block
+ at a time.
+
+ Optimized the chunk fill write algorithm so that all
+ chunk fill values will be written collectively in a single MPI-IO
+ call. This should show a great performance improvement when
+ creating chunked datasets in parallel when the chunk dimensions
+ are fairly small.
+
+ (MSC - 2014/08/22, HDFFV-8878)
+
+ Tools
+ -----
+ - None
+
+ High-level APIs
+ ---------------
+ - None
+
+ Fortran API
+ -----------
+ - None
+
+ C++ API
+ -------
+ - Initialization of Object IDs
+
+ The data member "id" in classes that represent HDF5 objects were
+ initialized to 0, which caused problem for some users.
+
+ Replaced 0 with H5I_INVALID_HID to initialize these "id"s. For the
+ PropList class, H5P_DEFAULT is used instead of H5I_INVALID_HID.
+
+ (BMR - 2014/09/30, HDFFV-4259)
+
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - None
+
+Bug Fixes since HDF5-1.8.13
+===========================
+
+ Configuration
+ -------------
+ - CMake and SVN URLs
+
+ The SVN URLs will be different when the HDF Group domain name changes.
+
+ Removed the SVN URL references in the cacheinit.cmake and release_docs files.
+
+ (ADB - 2014/10/27, HDFFV-8953)
+
+ - CMake Packaging
+
+ A Fortran module was not generated if the compiler was not F2003
+ compliant.
+
+ Removed the module name from the package list of Fortran modules because
+ that module was never generated. This was only an issue for Fortran
+ compliers that are not F2003 compatible.
+
+ (ADB - 2014/10/16, HDFFV-8932)
+
+ - Shared Library Interface Version Number (soname)
+
+ In order to increase the maintainability of HDF5, an architectural
+ change was made which required the renaming of several public symbols in
+ H5Ppublic.h.
+
+ The shared libary interface version number ("soname") has been increased
+ on account of these changes. For a full list of the changed symbols, see
+ the interface compatibility report, which is available as a link off of
+ the 'HDF5 Software Changes from Release to Release' document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+ (AKC - 2014/10/03, HDFFV-8937)
+
+ - Configure Settings for Mac OSX Need Defaults for PROD_XXX, DEBUG_XXX,
+ and PROFILE_XXX
+
+ The configure setting files for Mac OSX (config/apple) did not
+ have the default settings of PROD_XXX, DEBUG_XXX, PROFILE_XXX.
+
+ Added the default settings. Mac platforms now builds library with
+ "-O3" optimization when the default clang compiler is used.
+
+ (AKC - 2014/10/01, HDFFV-8933)
+
+ - CMake ConfigureChecks
+
+ Two include files were missing from two C tests.
+
+ Propagated the configure test changes to H5_LDOUBLE_TO_INTEGER_WORKS_TEST
+ and H5_ULLONG_TO_LDOUBLE_PRECISION_TEST to ConfigureChecks.cmake (added
+ stdlib.h and string.h in the HDFTests.c file).
+
+ (ADB - 2014/09/02 HDFFV-8845)
+
+ - CMake Parallel Test Missing
+
+ The source file was removed in the previous release but the parallel
+ test t_posix_compliant was not.
+
+ Removed the t_posix_compliant parallel test from the library.
+
+ (ADB - 2014/8/14 HDFFV-8880)
+
+ - Autotools Reconfigure. Bison. Flex.
+
+ The Bison and Flex files were out of date.
+
+ Bison was upgraded to 2.7, and Flex was upgraded to 2.5.37. The
+ bin/reconfigure script now will execute Bison and Flex and update
+ the hl/src files.
+
+ (ADB - 2014/06/16 HDFFV-8709)
+
+ - Autotools Reconfigure. m4.
+
+ The m4 macro processor was out of date.
+
+ Reconfigured Autotools with m4 upgraded to 1.4.17.
+
+ (ADB - 2014/06/12 HDFFV-8743)
+
+ - Autotools: Modified configure to add an entry at the beginning of AM_LDFLAGS
+ for the hdf5 install directory. Without this entry the relink commands
+ invoked by "make install" to create libraries dependent on libhdf5.so added
+ a dependency on the first libhdf5.so found in any directory in AM_LDFLAGS
+ regardless of its version. (LRK - 2014/10/17 HDFFV-8944)
+
+ - Changed Autotools Build Behavior. Fortran High-level Library.
+
+ The Fortran high-level (HL) library did not compile if the default
+ size of a REAL is DOUBLE PRECISION; the build would fail during
+ compilation.
+
+ Configure now checks to see if REAL is DOUBLE PRECISION, Fortran is
+ enabled, and HL library is enabled. If this is true, then configure
+ will stop with an error message.
+
+ (MSB - 2014/8/11, HDFFV-8883/HDFFV-889)
+
+
+
+ Library
+ -------
+ - Fixed Identifier Management Code
+
+ Opening an object returns an identifier; closing the object should
+ free up the identifier. A problem was found where the identifiers
+ were not being freed up correctly.
+
+ Fixed the problem so that identifiers that have been used can be
+ used again after their object has been closed.
+
+ (QAK - 2014/10/16, HDFFV-8930)
+
+ - Removal of DllMain() from Static Windows Builds
+
+ A DllMain() function was added in HDF5 1.8.13 in order to handle
+ win32 thread cleanup. The preprocessor #ifdefs around the DllMain
+ function allowed it to be compiled when the static library is built,
+ which is incorrect behavior that can cause linkage problems in
+ clients.
+
+ The fix was to change the preprocessor #ifdefs to exclude compiling
+ DllMain() in static builds. Our DllMain function is now only
+ compiled when the shared, thread-safe library is built on Windows.
+
+ (DER - 2014/06/13, HDFFV-8837)
+
+ - Enforce Constraint on page_size Parameter in H5Pset_core_write_tracking()
+
+ The reference manual states that the page_size parameter cannot be
+ zero.
+
+ This change checks the page_size parameter to see it is zero and
+ returns an error code if it is.
+
+ (DER - 2014/08/11, HDFFV-8891)
+
+ - H5Ldelete_by_idx() fails on non-existent group name.
+ (MSC - 2014/07/31, HDFFV-8888)
+
+ - H5Ldelete_by_idx() Seg Fault on Non-existent Group Name
+
+ If a non-existent group name was used by H5Ldelete_by_idx(), a
+ segmentation fault would result.
+
+ Bug was fixed.
+
+ (MSC - 2014/07/31, HDFFV-8888)
+
+ - Bug in Test When Building Parallel HDF5 on PVFS2
+
+ There was a bug in a test when building Parallel HDF5 on PVFS2.
+
+ The build now uses MPI_File_get_size() instead of stat().
+
+ (MSC - 2014/07/14, HDFFV-8856)
+
+ - MPI-IO Driver Tried to Allocate Space for Zero-length Dataset
+
+ MPI-IO driver tried to allocate space for zero-length dataset
+ and asserts.
+
+ Fixed driver and added a regression test.
+
+ (MSC - 2014/07/03, HDFFV-8761)
+
+
+ Parallel Library
+ ----------------
+ - None
+
+ Performance
+ -------------
+ - None
+
+ Tools
+ -----
+ - None
+
+ Fortran API
+ -------
+ - SIZEOF Replaced by C_SIZEOF and STORAGE_SIZE.
+
+ The intrinsic function SIZEOF is non-standard and should be replaced with a
+ standard intrinsic function.
+
+ If the F2008 intrinsic C_SIZEOF and STORAGE_SIZE are available, then they will
+ be used instead of the non-standard SIZEOF intrinsic, even when the SIZEOF
+ function is available.
+
+ (MSB - 2014/6/16, HDFFV-8653)
+
+ - Non-functional API: h5pget_fill_value_f
+
+ The Fortran wrapper h5pget_fill_value_f was calling the wrong C API.
+
+ The correct C API, H5Pget_fill_value, is now called by the Fortran
+ wrapper.
+
+ (MSB - 2014/9/25, HDFFV-8879)
+
+ - Interoperability with C HDF5: H5Literate and h5literate_f
+
+ h5literate_f assumes the return value for the callback function to
+ be of type int (or int_f in C). However, in the C wrapper the return
+ value of H5Literate is type herr_t, and this could cause
+ interoperability issues.
+
+ The callback function should be declared INTEGER(C_INT) for
+ portability. The tests were updated accordingly.
+
+ (MSB - 2014/9/26, HDFFV-8909)
+
+ - Interoperability with C HDF5: Constant INTEGER Parameters with the
+ H5FD Interface
+
+ Wrong type cast of constant Fortran INTEGER parameters was used.
+
+ The following parameter constant types were changed from INTEGER to
+ INTEGER(HID_T) to match the C types: H5FD_CORE, H5FD_FAMILY, H5FD_LOG,
+ H5FD_MPIO, H5FD_MULTI, H5FD_SEC2, and H5FD_STDIO.
+
+ Other internal 'int' types where changed to 'hid_t'; these are
+ transparent to the user.
+
+ (MSB - 2014/7/18, HDFFV-8748)
+
+ C++ API
+ ------
+ - Memory Leaks
+
+ There were several potential memory leaks in the library due to
+ dynamically allocated strings not being freed when failure occurs.
+
+ Applied user's patches to remove these potential memory leaks.
+
+ (BMR - 2014/09/30, HDFFV-8928)
+
+ - Disallow H5F_ACC_CREAT
+
+ H5F_ACC_CREAT was included in the C++ API but the C library does not
+ allow it at this time.
+
+ Removed this flag from the functions in H5File class.
+
+ (BMR - 2014/09/29, HDFFV-8852)
+
+ - Missing Flags in Documentation: H5F_ACC_RDONLY and H5F_ACC_RDWR
+
+ The H5F_ACC_RDONLY and H5F_ACC_RDWR flags were missing from the
+ documentation of the H5File constructors.
+
+ These two flags are now included in the documentation for opening
+ files.
+
+ (BMR - 2014/09/29, HDFFV-8852)
+
+ High-level APIs:
+ ------
+ - Seg Faults in H5TBread_field_name and H5TBread_field_name_f
+
+ When H5TBread_field_name or H5TBread_field_name_f were used to read a
+ field and if the name of the field was wrong, a segmentation fault
+ would result.
+
+ Both C and Fortran APIs were fixed so they no longer seg fault if
+ the name of the field is wrong, and both APIs return a negative
+ value if the name of the field is wrong.
+
+ (MSB - 2014/09/29, HDFFV-8912)
+
+ - Possible Buffer Overflow in High-level (HL) APIs
+
+ Multiple HL APIs (H5DSis_scale is one example) had issues:
+ (1) The datatype from the file was re-used as the memory datatype,
+ and
+ (2) No effort was made to ensure that strings were actually
+ null-terminated.
+
+ All of the HL routines now check for NULL pointers, for null-terminated
+ strings, and to see if string buffers are short enough not to overflow
+ the buffer. The minimum length of the buffers is now used in strncmp
+ to avoid overflow.
+
+ (MSB - 2014/9/29, HDFFV-8670)
+
+ - Behavior Change of H5LTdtype_to_text
+
+ If a user buffer was passed in to H5LTdtype_to_text along with the
+ length, then the function would not truncate at the end of the
+ buffer, but would exceed the end of the user buffer.
+
+ H5LTdtype_to_text was changed to truncate the string if the user
+ buffer is too small.
+
+ (MSB - 2014/9/29, HDFFV-8855)
+
+ Fortran High-level APIs:
+ ------
+ - See entry for HDFFV-8912 above.
+
+ Testing
+ -------
+ - A subtest in parallel h5diff (ph5diff) testing was bypassed for the
+ local Linux 32 machine due to unknown issue in the previous version of
+ Mpich. The failure no long exists in the current Mpich. Therefore the
+ bypass is removed. (AKC - 2014/11/03 HDFFV-8954)
+
+ - Fixed incorrect exit code values (was -1) in testframe which is commonly
+ used by several test programs. (AKC - 2014/07/22 HDFFV-8881)
+
+ - Fixed Incorrect Exit Code Values in Testframe
+ The testframe which is commonly used by several test programs
+ had some incorrect exit code values. Fixed the incorrect exit code
+ values. (AKC - 2014/07/22, HDFFV-8881)
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 5.3 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+
+ Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.8.2
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 13.7-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.18-371.6.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 64-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.8.2
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.32-431.11.2.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (platypus) Version 4.4.7 20120313
+ Version 4.8.2
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 13.7-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.32-431.29.2.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ IBM XL C/C++ V13.1
+ IBM XL Fortran V15.1
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2008 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 14 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.32(0.274/5/3) gcc(4.8.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2008 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 14 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 14 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 14 (cmake)
+
+ Mac OS X Lion 10.7.5 Apple clang/clang++ version 3.0 from Xcode 4.6.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (duck) Intel icc/icpc/ifort version 13.0.3
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 14.0.2
+
+ Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.0.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (wren) Intel icc/icpc/ifort version 14.0.2
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+Solaris2.11 32-bit n y/y n y y y
+Solaris2.11 64-bit n y/y n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/y n y y n
+Windows 8.1 n y/y n y y y
+Windows 8.1 x64 n y/y n y y y
+Mac OS X Lion 10.7.5 64-bit n y/y n y y y
+Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.5 64-bit n y/y n y y ?
+AIX 5.3 32- and 64-bit n y/n n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 GNU n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 PGI n y/y n y y y
+Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.11 32-bit y y y y
+Solaris2.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 8.1 y y y y
+Windows 8.1 x64 y y y y
+Mac OS X Lion 10.7.5 64-bit y n y y
+Mac OS X Mountain Lion 10.8.5 64-bit y n y y
+Mac OS X Mavericks 10.9.5 64-bit y n y y
+AIX 5.3 32- and 64-bit y n n y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n
+CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n
+CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 PGI y y y n
+Linux 2.6.32-431.11.2.el6.ppc64 y y y n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-308.13.1.el5PAE MPICH mpich 3.1.2 compiled with
+ #1 SMP i686 i686 i386 gcc 4.9.1 and gfortran 4.9.1
+ (jam) g95 (GCC 4.0.3 (g95 0.94!)
+
+ Linux 2.6.18-431.11.2.el6 MPICH mpich 3.1.2 compiled with
+ #1 SMP x86_64 GNU/Linux gcc 4.9.1 and gfortran 4.9.1
+ (platypus) g95 (GCC 4.0.3 (g95 0.94!)
+
+ FreeBSD 8.2-STABLE i386 gcc 4.5.4 [FreeBSD] 20110526
+ (loyalty) gcc 4.6.1 20110527
+ g++ 4.6.1 20110527
+ gfortran 4.6.1 20110527
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.5.4 [FreeBSD] 20110526
+ (freedom) gcc 4.6.1 20110527
+ g++ 4.6.1 20110527
+ gfortran 4.6.1 20110527
+
+ Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ (cmake and autotools)
+
+ Cray Linux Environment (CLE) PrgEnv-pgi/4.2.34
+ hopper.nersc.gov pgcc 13.6-0 64-bit target on x86-64 Linux -tp istanbul
+ pgf90 13.6-0 64-bit target on x86-64 Linux -tp istanbul
+ pgCC 13.6-0 64-bit target on x86-64 Linux -tp istanbul
+
+
+Known Problems
+==============
+* On cygwin platforms the feature to load dynamic filter libraries only looks
+ for libraries with the a so extension. Support for cygwin cygxxx.dll libraries
+ is planned for the next release.
+ (ADB - 2014/11/04 - HDFFV-8736)
+
+* On windows platforms in debug configurations, the VFD flush1 tests will fail
+ with the split and multi VFD drivers. These tests will display a modal debug
+ dialog which must be answered or wait for the test timeout to expire.
+ The flush1 and flush2 tests will be skipped under debug for this release.
+ (ADB - 2014/06/23 - HDFFV-8851)
+
+* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ catches some undefined behavior in the alignment algorithm of the macro DETECT_I
+ in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment
+ of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for
+ H5detect.c. In the future, we can separate flags for H5detect.c from the rest of
+ the library. (SLU - 2013/10/16)
+
+* Make provided by Solaris fails in "make check". Solaris users should use
+ gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534)
+
+* The h5dump and h5diff utilities occasionally produce different output
+ between Linux and Windows systems. This is caused by lower-level library
+ routines that fail to write fill values to the user's buffer when reading
+ unallocated chunks from datasets that have a fill value set to
+ H5D_FILL_VALUE_DEFAULT. Due to platform differences the return of
+ spurious data values has only been encountered on Windows 32-bit systems.
+ (Issue HDFFV-8247; JP - 2013/03/27)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however.
+ (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2.
+ (AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO.
+ (CMC - 2009/04/28)
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.13%%%%
+
+
+HDF5 version 1.8.13 released on 2014-05-05
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.12 and
+HDF5-1.8.13, and contains information on the platforms tested and
+known problems in HDF5-1.8.13.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.13 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.13 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.13 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.13 (current
+release) versus Release 1.8.12":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.12
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Autotools: Automake updated to 1.14.1 (ADB - 2014/04/08)
+
+ - CMake: Moved minimum CMake version to 2.8.11 which enables better library
+ include processing. (ADB - 2014/03/26)
+
+ - When configuring a thread-safe HDF5 Library it is no longer necessary
+ to specify --enable-threadsafe with --with-pthreads if the Pthreads
+ library is in a standard location. (DER - 2014/04/11 HDFFV-8693)
+
+ Library
+ -------
+ - Added an H5free_memory API call. This should be used to free memory
+ that has been allocated by HDF5 API calls. H5Tget_member_name and
+ H5Pget_class_name are two examples. The main motivation for this call
+ is Windows, where it is common for application code and the HDF5 Library
+ to be using different C run-time libraries (CRT). Using the new call
+ ensures that the same CRT handles both the allocation and free. This
+ new function can also be useful in any case where the library uses a
+ different memory manager than the application, such as when a debug
+ memory manager is in use or when the HDF5 Library is wrapped for use
+ in a managed language like Python or Java. Fixes HDFFV-7710, 8519,
+ and 8851. (DER - 2014/04/11)
+
+ - The Core VFD (aka Memory VFD) can now be configured to track dirty
+ regions in the file and only write out the changed regions on
+ flush/close. Additionally, a "page aggregation" size can be set that
+ will aggregate small writes into larger writes. For example, setting
+ a 1 MiB page aggregation size will logically partition the the
+ in-memory file into 1 MiB pages that will be written out in their
+ entirety if even a single byte is dirtied. The feature is controlled
+ via the new H5Pset/get_core_write_tracking() API call. A new
+ "core_paged" target has been added to the check-vfd target in
+ test/Makefile.am that exercises the feature over all HDF5 VFD-aware
+ tests. (DER - 2014/04/12)
+
+ Parallel Library
+ ----------------
+ - Removed MPI-POSIX VFD, as it wasn't helping anyone and was just
+ generating support questions. Application developers performing
+ parallel I/O should always use the MPI-IO VFD.
+ (QAK - 2014/03/28 HDFFV-8659)
+
+ - Improved parallel I/O support to allow collective I/O on point
+ selections. (QAK - 2014/03/15)
+
+ Tools
+ -----
+ - None
+
+ High-Level APIs
+ ---------------
+ - None
+
+ Fortran API
+ -----------
+ - Wrappers h5pset_file_image_f and h5pget_file_image_f were added to the
+ library. (MSB - 2014/1/2014)
+
+ C++ API
+ -------
+ - The following new features are added:
+ + Wrappers to class H5Object to get an object's name (HDFFV-8548).
+ ssize_t getObjName(char *obj_name, size_t buf_size = 0)
+ ssize_t getObjName(H5std_string& obj_name, size_t len = 0)
+ H5std_string getObjName()
+ + Wrappers to class H5CommonFG to get a child object's type from a
+ group or file (HDFFV-8367).
+ H5O_type_t childObjType(const H5std_string& objname)
+ H5O_type_t childObjType(const char* objname)
+ H5O_type_t childObjType(hsize_t index,
+ H5_index_t index_type=H5_INDEX_NAME,
+ H5_iter_order_t order=H5_ITER_INC, const char* objname=".")
+ + Wrappers to class DSetMemXferPropList for setting/getting a transform
+ property list (HDFFV-7907).
+ DSetMemXferPropList(const char* expression);
+ void setDataTransform(const char* expression)
+ void setDataTransform(const H5std_string& expression)
+ ssize_t getDataTransform(char* exp, size_t buf_size=0)
+ H5std_string getDataTransform()
+ + Wrapper to CompType for setting size to compound datatype (HDFFV-8642).
+ void setSize(size_t size)
+ + Overloaded functions to provide prototypes that declare constant
+ arguments const (HDFFV-3384). These include:
+ DataSet::fillMemBuf
+ DataSet::getVlenBufSize
+ DataSpace::extentCopy
+ DataType::commit
+ FileAccPropList::setSplit
+ H5File::getVFDHandle
+ + Additional overload to class H5Location to get a comment as a char*
+ ssize_t getComment(const char* name, size_t buf_size, char* comment)
+ + Additional overloads to class Attribute to get an attribute's name for
+ convenience:
+ ssize_t getName(char* attr_name, size_t buf_size=0)
+ ssize_t getName(H5std_string& attr_name, size_t buf_size=0)
+ (BMR, 2014/04/15)
+ + A static wrapper to Exception for printing the error stack without an
+ instance of Exception
+ static void printErrorStack(FILE* stream = stderr,
+ hid_t err_stack = H5E_DEFAULT);
+ (BMR, 2014/04/25)
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ Mac OS X 10.6 Snow Leopard is not supported by Apple any more. In view of
+ the added support of Mac OS X 10.9, Mac OS X 10.6 is retired from HDF5
+ supported platforms. (AKC - 2014/03/14 HDFFV-8704)
+
+ Mac OS X 10.9 Mavericks is supported. (AKC - 2014/03/04 HDFFV-8694)
+
+
+Bug Fixes since HDF5-1.8.12
+===========================
+
+ Configuration
+ -------------
+ - CMake: When CMake commands are executed individually on the command line
+ and the external filters are being built, the CMAKE_BUILD_TYPE define
+ must be set to the same value as the configuration
+ (-DCMAKE_BUILD_TYPE:STRING=Release if using -C Release). This is needed
+ by the the szip and zlib filter build commands. (ADB - HDFFV-8695)
+
+ - CMake: Removed use of the XLATE_UTILITY program.
+ (ADB - 2014/03/28 HDFFV-8640)
+
+ - CMake: Added missing quotes in setting the CMAKE_EXE_LINKER_FLAGS for the
+ MPI option. (ADB - 2014/02/27 HDFFV-8674)
+
+ - CMake: Configuration of the HDF5 C++ or Fortran libraries with the
+ thread-safety feature.
+
+ C++ and/or Fortran + thread-safe is enforced as a non-supported
+ configuration. This matches the autotools. (DER - 2014/04/11)
+
+ - CMake: Configuration of static HDF5 C library with the thread-safety
+ feature.
+
+ Static + thread-safe + Win32 threads is not a supported configuration
+ due to the inability to automatically clean up thread-local storage.
+ This is expected to be fixed in a future release. In the meantime, a
+ work-around that uses internal functionality may allow the combination
+ to be used without resource leaks. Contact the help desk for more
+ information. (DER - 2014/04/11)
+
+ - Autotools: Several changes were done to configure and installcheck.
+
+ An export of LD_LIBRARY_PATH=<szip library location> was
+ removed from configure; make installcheck was revised to run
+ scripts installed in share/hdf5_examples to use the installed h5cc, etc.
+ to compile and run example source files also installed there.
+
+ Make installcheck will now fail when a shared szip or other external lib
+ file cannot be found in the same manner that executables compiled and
+ linked with h5cc will fail to run when those lib files cannot be found
+ after install. Make installcheck should pass after setting
+ LD_LIBRARY_PATH to the szip location. (LRK - 2014/04/16)
+
+ Library
+ -------
+ - A Gnu Make directive (.NOTPARALLEL) is added to fortran/test/Makefile.
+
+ AIX native make does not support this directive and would fail if
+ parallel make (e.g. make -j4) is used to build the library. AIX users
+ either do not use parallel make or install Gnu Make to build the library.
+ (AKC 2014/04/08 HDFFV-8738)
+
+ - H5R.c: H5Rget_name gave an assertion failure if the "name" parameter
+ was NULL.
+
+ Fixed H5Rget_name to return the size of the buffer needed to read a
+ name of the referenced object in this case. The size doesn't include
+ the NULL terminator. H5Rget_name returns negative on failure.
+ (MSB - 2014/01/22 HDFFV-8620)
+
+ - H5Z.c: H5Zfilter_avail didn't check if a filter was available as a
+ dynamically loaded filter. The error manifested itself in the h5repack
+ tool when removing user-defined dynamically loaded filter.
+
+ Added a code to find the filter among the dynamically loaded filters
+ after the function fails to find it among the registered filters.
+ (ADB - 2014/03/03 HDFFV-8629)
+
+ - Memory leak: a memory leak was observed in conjunction to the
+ H5TS_errstk_key_g thread-local variable allocated in the H5E_get_stack
+ function in H5E.c.
+
+ The shared HDF5 thread-safe library now no longer leaks thread-local
+ storage resources on Windows with Win32 threads. Currently, there is
+ no solution for this problem when HDF5 is statically built. We
+ disabled the build of the static HDF5 thread-safe library with
+ Win32 threads. (DER - 2014/04/11 HDFFV-8518)
+
+ - H5Dio.c: Improved handling of NULL pointers to H5Dread/H5Dwrite
+ calls. Credit to Jason Newton (nevion@gmail.com) for the original patch.
+
+ H5Dwrite/read failed when a NULL pointer was passed for a data buffer
+ and 0 elements were selected. Fixed. (QAK - 2014/04/16 HDFFV-8705)
+
+ - Deprecated API (1_6 API): Improved handling of closing the library and
+ re-accessing it with a deprecated routine.
+
+ When a program used a deprecated API (for example, H5Gcreate1),
+ closed the library, and reopened it again to access a group, dataset,
+ datatype, dataspace, attribute, or property list, HDF5 failed to
+ provide an identifier for the object. Fixed.
+ (NAF, QAK - 2014/04/16 HDFFV-8232)
+
+ Parallel Library
+ ----------------
+ - Fixed a missing H5F_Provisional module in HDF5mpio.f90
+ (MSB - 2014/2/7 HDFFV-8651)
+
+ Performance
+ -------------
+ - None
+
+ Tools
+ -----
+ - The h5diff tool would report that a datafile compared with an exact
+ copy of the same datafile had differences. This was due to the issue
+ below of reading un-written chunks. This problem is also fixed.
+ (AKC - 2014/05/01 HDFFV-8637)
+
+ - The h5dump and h5diff utilities occasionally produced different output
+ between Linux and Windows systems. This has been fixed.
+
+ This happened to datasets that used chunked storage, with default fill
+ values, and some of the chunks had not been written.
+ When the dataset was read, the library failed to write the default fill
+ values to parts of the use buffer that were associated with the unwritten
+ chunks. (JP - 2014/05/01 HDFFV-8247)
+
+ - The compress option is retired from bin/release.
+ (AKC - 2014/04/25 HDFFV-8755)
+
+ - bin/release has a new option "zip" that produces a release zip file for
+ the Windows platform. (AKC - 2014/04/24 HDFFV-8433)
+
+ - h5diff: Several failures relating to handling of strings attributes
+ are fixed.
+
+ The tool crashed or gave an error message when one of the strings had
+ fixed size type and another variable-length size type. h5diff now flags such
+ strings as "not comparable". We plan to enhance the tool to handle
+ strings of the different types in the future releases.
+ (AKC - 2014/04/18 HDFFV-8625, 8639, 8745)
+
+ - h5repack: h5repack would not remove user-defined filters.
+ Fixed by modifying h5repack to check if the filter is registered or
+ can be dynamically loaded. (ADB - 2014/03/03 HDFFV-8629)
+
+ F90 API
+ -------
+ - H5D_CHUNK_CACHE_NSLOTS_DFLT_F and H5D_CHUNK_CACHE_NBYTES_DFLT_F were
+ changed from the default KIND for INTEGER to INTEGER of KIND size_t.
+ (MSB - 2014/3/31 HDFFV-8689)
+
+ C++ API
+ ------
+ - Added throw() to all exception destructors. Credit to Jason Newton
+ (nevion@gmail.com) for the patch. (BMR - 2014/4/15 HDFFV-8623)
+ - Changed the default value for H5Location::getComment from 256 to 0
+ to conform to C function and because it makes more sense.
+ (BMR - 2014/4/15)
+
+ High-Level APIs:
+ ------
+ - None
+
+ Fortran High-Level APIs:
+ ------
+ - None
+
+ Testing
+ -------
+ - testhdf5 now exits with EXIT_SUCCESS(0) if no errors, else
+ EXIT_FAILURE(1). (AKC - 2014/01/27 HDFFV-8572)
+
+ - The big test now pays attention to the HDF5_DRIVER environment variable.
+ Previously, it would run all tests with the family, stdio, and sec2
+ virtual file drivers (VFDs) for each VFD in the check-vfd make target,
+ regardless of the variable setting. It now checks the variable and
+ either runs the appropriate VFD-specific tests or skips as needed.
+ This saves much testing time. Fixes HDFFV-8554. (DER - 2014/04/11)
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 5.3 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+ gmake v3.82
+
+ Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.8.2
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 13.7-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.18-371.6.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 64-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.8.2
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 13.7-0
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.32-431.11.2.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (platypus) Version 4.4.7 20120313
+ Version 4.8.2
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 13.7-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.32-431.11.2.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ IBM XL C/C++ V11.1
+ IBM XL Fortran V13.1
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 14 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.28(0.271/5/3) gcc(4.8.2) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 14 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 14 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 14 (cmake)
+
+ Mac OS X Lion 10.7.3 Apple clang/clang++ version 3.0 from Xcode 4.6.1
+ 64-bit gfortran GNU Fortran (GCC) 4.6.2
+ (duck) Intel icc/icpc/ifort version 13.0.3
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.0 from Xcode 5.0.2
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 14.0.2
+
+ Mac OS X Mavericks 10.9.2 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (wren/quail) Intel icc/icpc/ifort version 14.0.2
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+Solaris2.11 32-bit n y/y n y y y
+Solaris2.11 64-bit n y/y n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/y n y y y
+Windows 8.1 n y/y n y y y
+Windows 8.1 x64 n y/y n y y y
+Mac OS X Lion 10.7.3 64-bit n y/y n y y y
+Mac OS X Mountain Lion 10.8.1 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.1 64-bit n y/y n y y y
+AIX 5.3 32- and 64-bit n y/n n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 GNU n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 PGI n y/y n y y y
+Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+OpenVMS IA64 V8.4 n y/n n y y n
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.11 32-bit y y y y
+Solaris2.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 8.1 y y y y
+Windows 8.1 x64 y y y y
+Mac OS X Lion 10.7.3 64-bit y n y y
+Mac OS X Mountain Lion 10.8.1 64-bit y n y y
+Mac OS X Mavericks 10.9.1 64-bit y n y y
+AIX 5.3 32- and 64-bit y n n y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n
+CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n
+CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 PGI y y y n
+Linux 2.6.32-431.11.2.el6.ppc64 y y y n
+OpenVMS IA64 V8.4 n n n n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-308.13.1.el5PAE MPICH mpich 3.1 compiled with
+ #1 SMP i686 i686 i386 gcc 4.8.2 and gfortran 4.8.2
+ (jam) g95 (GCC 4.0.3 (g95 0.94!)
+
+ Linux 2.6.18-431.11.2.el6 MPICH mpich 3.1 compiled with
+ #1 SMP x86_64 GNU/Linux gcc 4.8.2 and gfortran 4.8.2
+ (platypus) g95 (GCC 4.0.3 (g95 0.94!)
+
+ FreeBSD 8.2-STABLE i386 gcc 4.5.4 [FreeBSD] 20110526
+ (loyalty) gcc 4.6.1 20110527
+ g++ 4.6.1 20110527
+ gfortran 4.6.1 20110527
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.5.4 [FreeBSD] 20110526
+ (freedom) gcc 4.6.1 20110527
+ g++ 4.6.1 20110527
+ gfortran 4.6.1 20110527
+
+ Debian7.1.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora20 3.11.10-301.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.2 20131212 (Red Hat 4.8.2-7)
+ GNU Fortran (GCC) 4.8.2 20130603 (Red Hat 4.8.2-7)
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.6-4-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ Ubuntu 13.10 3.11.0-13-generic #20-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.8.1-10ubuntu8) 4.8.1
+ GNU Fortran (Ubuntu/Linaro 4.8.1-10ubuntu8) 4.8.1
+ (cmake and autotools)
+
+ Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46
+ hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+
+
+Known Problems
+==============
+* When reading or writing a dataset (H5Dread/H5Dwrite) with a large selection
+ size (e.g., 2GB ~= 500 million of 4 bytes integers or floating point
+ numbers), some I/O systems may not be able to process it correctly.
+ We advise users to find out system limits before using large selections. If
+ I/O size limits exist, application should use HDF5 partial I/O capabilities
+ (e.g., H5Sselect_hyperslab(...)) to divide large requests into smaller sizes.
+ In this case we also advise users not to use chunk storage sizes larger that
+ 2GB since the HDF5 library performs I/O on the entire chunk. We will work
+ on the HDF5 library to divide large data requests to smaller I/O requests.
+ (AKC 2014/05/02 HDFFV-8479)
+
+* Due to an Intel compiler bug introduced in version 14.0.1, the HDF5 FORTRAN
+ wrappers do not work with configure option --enable-fortran2003.
+ However, the option --enable-fortran works with Intel 14.0.1. The compiler
+ bug was fixed in Intel version 14.0.2 and resolved the issue.
+ (MSB - 2014/4/15)
+
+* Due to a PGI compiler bug introduced in versions before 13.3 and versions
+ after 14.2, the FORTRAN test 'Testing get file image' will fail.
+ (MSB - 2014/4/15)
+
+* On CYGWIN, when building the library dynamically, testing will fail on
+ dynamically loaded filters. The test process will build dynamic filter
+ libraries with the *.dll.a extension, and the HDF5 Library will be looking
+ for *.so libraries. Entered as issue HDFFV-8736. (ADB - 2014/04/14)
+
+* A Gnu Make directive (.NOTPARALLEL) is added to fortran/test/Makefile.
+ AIX native make does not support this directive and would fail if
+ parallel make (e.g. make -j4) is used to build the library. AIX users
+ either do not use parallel make or install Gnu Make to build the library.
+ (AKC 2014/04/08 HDFFV-8738)
+
+* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ catches some undefined behavior in the alignment algorithm of the macro
+ DETECT_I in H5detect.c. Since the algorithm is trying to detect the alignment
+ of integers, ideally the flag -fcatch-undefined-behavior should not to be
+ used for H5detect.c. In the future, we can separate flags for H5detect.c
+ from the rest of the library. (SLU - 2013/10/16 HDFFV-8147)
+
+* Make provided by Solaris fails in "make check". Solaris users should use
+ gmake to build and install the HDF5 software. (AKC - 2013/10/08 - HDFFV-8534)
+
+* On OpenVMS, two soft conversion functions (H5T__conv_i_f and H5T__conv_f_i)
+ have bugs. They convert data between floating-point numbers and integers.
+ But the library's default is hard conversion. The user should avoid
+ explicitly enabling soft conversion between floating-point numbers and
+ integers. (Issue VMS-8; SLU - 2013/09/19)
+
+* On OpenVMS, ZLIB 1.2.8 library doesn't work properly. ZLIB 1.2.5 works
+ fine. So please use ZLIB 1.2.5 to build HDF5 library. (Issue VMS-5;
+ SLU 2013/09/19)
+
+* When building using the Cray compilers on Cray machines, HDF5
+ configure mistakenly thinks the compiler is an intel compiler and
+ sets the -std=c99 flag which breaks configure on Cray. To build HDF5
+ properly on a Cray machine, please consult with the instructions in
+ INSTALL_parallel for building on Hopper.
+ (MSC - 2013/04/26 - HDFFV-8429)
+
+* The 5.9 C++ compiler on Sun failed to compile a C++ test ttypes.cpp. It
+ complains with this message:
+ "/home/hdf5/src/H5Vprivate.h", line 130: Error: __func__ is not defined.
+
+ The reason is that __func__ is a predefined identifier in C99 standard. The
+ HDF5 C library uses it in H5private.h. The test ttypes.cpp includes
+ H5private.h (H5Tpkg.h<-H5Fprivate.h<-H5Vprivate.h<-H5private.h). Sun's 5.9
+ C++ compiler doesn't support __func__, thus fails to compile the C++ test.
+ But Sun's 5.11 C++ compiler does. To check whether your Sun C++ compiler
+ knows this identifier, try to compile the following simple C++ program:
+ #include<stdio.h>
+
+ int main(void)
+ {
+ printf("%s\n", __func__);
+ return 0;
+ }
+ (SLU - 2012/11/5)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set
+ to aprun -np X, because the H5lib_settings.c file was not generated
+ properly. Not setting those environment variables works, because
+ configure was able to automatically detect that it's a Cray system
+ and used the proper launch commands when necessary.
+ (MSC - 2012/04/18)
+
+* The data conversion test dt_arith.c fails in "long double" to integer
+ conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library
+ is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernal
+ (3.2.2 on Fedora) doesn't have the problem. Users should lower the
+ optimization level (-O1 or -O0) by defining CFLAGS in the command line of
+ "configure" like:
+
+ CFLAGS=-O1 ./configure
+
+ This will overwrite the library's default optimization level.
+ (SLU - 2012/02/07 - HDFFV-7829)
+ This issue is no longer present on Ubuntu 12.10 (3.5.0 kernel) with
+ gcc 4.7.2.
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD.
+ (QAK - 2011/04/26)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however.
+ (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2.
+ (AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO.
+ (CMC - 2009/04/28)
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.12%%%%
+
+
+HDF5 version 1.8.12 released on 2013-11-04
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.11 and
+HDF5-1.8.12, and contains information on the platforms tested and
+known problems in HDF5-1.8.12.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.12 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.12 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.12 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.12 (current
+release) versus Release 1.8.11":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.11
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Added a configuration option to change the default plugin path.
+ The configure option is --with-default-plugindir=location.
+ The cmake option is -DH5_DEFAULT_PLUGINDIR:PATH=location.
+ HDFFV-8513. (ADB 2013/09/04)
+ - Renamed FFLAGS to FCFLAGS in configure. (ADB 2013/08/13)
+ - CMake can now package a compressed examples file, the default for
+ Windows binaries from HDF Group. (ADB - 2013/07/22)
+
+ Library
+ -------
+ - None
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5repack: Added the ability to use plugin filters to read and write
+ files. The option uses the filter number. HDFFV-8345
+ (ADB - 2013/09/04).
+ - h5dump: Added the option -N --any_path, which searches the file for
+ paths that match the search path. HDFFV-7989 (ADB - 2013/08/12).
+ - h5dump: Added the optional arg 0 to -A, which excludes attributes
+ from display. HDFFV-8134 (ADB - 2013/08/01).
+
+ High-Level APIs
+ ---------------
+ - None
+
+ Fortran API
+ -----------
+ - None
+
+ C++ API
+ -------
+ - Added tutorial examples to C++/examples. They can be installed by
+ "make install-examples" and, in the installed directory, they can be
+ executed by running the script file run-c++-ex.sh. (BMR - 2013/09/28)
+ - A new class, H5::H5Location, is added to represent the location concept
+ in the C library. It is a base class to H5::H5File and H5::H5Ojbect,
+ whose member functions are moved into H5::H5Location. H5::H5File can
+ now inherent those functions. As a result, an H5::H5File object can have
+ an attribute. (BMR - 2013/09/27)
+ - Added wrappers for H5Rget_obj_type2 to retrieve the type of the object
+ that an object reference points to. (BMR - 2013/09/27)
+ H5O_type_t H5Location::getRefObjType(void *ref, H5R_type_t ref_type)
+ - Added wrappers for H5Aexist to check whether an attribute exists given
+ a name. (BMR - 2013/09/27)
+ bool H5::H5Location::attrExists(const char* name)
+ bool H5::H5Location::attrExists(const H5std_string& name)
+ - Added a number of overloaded functions for convenience. (BMR - 2013/09/27)
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - None
+
+Bug Fixes since HDF5-1.8.11
+===========================
+
+ Configuration
+ -------------
+ - Modified H5detect.c to scan floating point types for padding bits before
+ analyzing the type further. This should fix problems with gcc 4.8.
+ (NAF - 2013/09/19 - HDFFV-8523/HDFFV-8500)
+ - HDF5 rpaths are no longer encoded in the library files when configured
+ with --disable-sharedlib-rpath. (LRK-2013-09-23 - HDFFV-8276)
+
+ Library
+ -------
+ - Added const qualifier to source buffer parameters in H5Dgather and
+ H5D_scatter_func_t (H5Dscatter callback). (NAF - 2013/7/09)
+
+ - CMake now creates *.so.{lt_version} files with the same version as
+ configure. (ADB - 2013/06/05 HDFFV-8437)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Performance
+ -------------
+ - None
+
+ Tools
+ -----
+ - h5dump: Added the option -N --any_path, which searches the file for
+ paths that match the search path. HDFFV-7989 (ADB - 2013/08/12).
+ - h5dump: Added the optional arg 0 to -A, which excludes attributes
+ from display. HDFFV-8134 (ADB - 2013/08/01).
+ - h5dump correctly exports subsetted data to a file, using the --output
+ option. (ADB - 2013/06/07 HDFFV-8447)
+ - h5cc and other compile scripts now default to linking shared libraries
+ when HDF5 is configured with the --disable-static option.
+ (LRK - 2013-09-23 - HDFFV-8141)
+
+ F90 API
+ -------
+ - None
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+ - None
+
+ Fortran High-Level APIs:
+ ------
+ - None
+
+ Testing
+ -------
+ - test/big sometimes failed with the message of "file selection+offset not
+ within extent". This has been fixed. (AKC - 2013/09/28 HDFFV-8271).
+ - tools/h5diff/testh5diff.sh is run in every "make check", even after it
+ has passed in the previous run. It should not run again if there are no
+ code changes. Fixed. (AKC - 2013/07/19 HDFFV-8392)
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 5.3 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+
+ Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.8.1
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 13.7-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 13.1.3 (Build 20130607)
+
+ Linux 2.6.18-308.16.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 64-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.8.1
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 13.7-0
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 13.1.3 (Build 20130607)
+
+ Linux 2.6.32-358.18.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (platypus) Version 4.4.7 20120313
+ Version 4.8.1
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 13.7-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 13.1.3 (Build 20130607)
+
+ Linux 2.6.32-358.18.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ IBM XL C/C++ V11.1
+ IBM XL Fortran V13.1
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 11 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 13 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 13 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 13 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 13 (cmake)
+
+ Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ 64-bit gfortran GNU Fortran (GCC) 4.6.2
+ (fred) Intel C (icc), Fortran (ifort), C++ (icpc)
+ 12.1 Build 20120928
+
+ Mac OS X Lion 10.7.3 Apple clang/clang++ version 3.0 from Xcode 4.6.1
+ 64-bit gfortran GNU Fortran (GCC) 4.6.2
+ (duck) Intel icc/icpc/ifort version 13.0.3
+
+ Mac OS X Mountain Lion 10.8.1 Apple clang/clang++ version 4.2 from Xcode 4.6.1
+ 64-bit gfortran GNU Fortran (GCC) 4.6.2
+ (wren) Intel icc/icpc/ifort version 13.0.3
+
+ OpenVMS IA64 V8.4 HP C V7.3-018
+ HP Fortran V8.2-104939-50H96
+ HP C++ V7.4-004
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+Solaris2.11 32-bit n y/y n y y y
+Solaris2.11 64-bit n y/y n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/n n y y y
+Windows 8.1 n y/y n y y y
+Windows 8.1 x64 n y/y n y y y
+Mac OS X Snow Leopard 10.6.8 64-bit n y/y n y y y
+Mac OS X Lion 10.7.3 64-bit n y/y n y y y
+Mac OS X Mountain Lion 10.8.1 64-bit n y/y n y y y
+AIX 5.3 32- and 64-bit n y/n n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 GNU y y/y y y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 PGI n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 GNU n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y
+Linux 2.6.32-358.2.1.el6.ppc64 n y/n n y y y
+OpenVMS IA64 V8.4 n y/n n y y n
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.11 32-bit y y y y
+Solaris2.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 8.1 y y y y
+Windows 8.1 x64 y y y y
+Mac OS X Snow Leopard 10.6.8 64-bit y n y n
+Mac OS X Lion 10.7.3 64-bit y n y y
+Mac OS X Mountain Lion 10.8.1 64-bit y n y y
+AIX 5.3 32- and 64-bit y n n y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n
+CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n
+CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 5.9 Linux 2.6.18 x86_64 PGI y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n
+Linux 2.6.32-358.2.1.el6.ppc64 y y y n
+OpenVMS IA64 V8.4 n n n n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-308.13.1.el5PAE MPICH mpich2-1.4.1p1 compiled with
+ #1 SMP i686 i686 i386 gcc 4.1.2 and gfortran 4.1.2
+ (jam) g95 (GCC 4.0.3 (g95 0.94!)
+
+ Linux 2.6.18-308.16.1.el5 MPICH mpich2-1.4.1p1 compiled with
+ #1 SMP x86_64 GNU/Linux gcc 4.1.2 and gfortran 4.1.2
+ (koala) g95 (GCC 4.0.3 (g95 0.94!)
+
+ FreeBSD 8.2-STABLE i386 gcc 4.5.4 [FreeBSD] 20110526
+ (loyalty) gcc 4.6.1 20110527
+ g++ 4.6.1 20110527
+ gfortran 4.6.1 20110527
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.5.4 [FreeBSD] 20110526
+ (freedom) gcc 4.6.1 20110527
+ g++ 4.6.1 20110527
+ gfortran 4.6.1 20110527
+
+ Debian7.1.0 3.2.0-4-amd64 #1 SMP Debian 3.2.46-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora19 3.11.1-200.fc19.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.1 20130603 (Red Hat 4.8.1-1)
+ GNU Fortran (GCC) 4.8.1 20130603 (Red Hat 4.8.1-1)
+ (cmake and autotools)
+
+ SUSE 12.3 3.7.10-1.16-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.7.2
+ GNU Fortran (SUSE Linux) 4.7.2
+ (cmake and autotools)
+
+ Ubuntu 13.04 3.8.0-30-generic #44-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.7.3-1ubuntu1) 4.7.3
+ GNU Fortran (Ubuntu/Linaro 4.7.3-1ubuntu1) 4.7.3
+ (cmake and autotools)
+
+ Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46
+ hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+
+
+Known Problems
+==============
+* Several HDF5 command-line tools and tests leave behind generated files
+ that are not cleaned up with "make clean" or "make distclean" when software
+ is built in place. The issue will be addressed in the 1.8.13 release. We
+ recommend to use build directory to compile and test HDF5 as described
+ in the INSTALL file, section 4.2.
+
+* Source directory names with spaces in them will cause failures in configure
+ or make on Mac (HDFFV-8152), Linux, and probably all other platforms. If a
+ configure command with a space is run from a build directory, it will exit
+ with an error message: "checking whether build environment is sane...
+ configure: error: unsafe srcdir value: '/scr/lrknox/hdf5 v1.8.12'". If
+ configure is run inside or below the directory with the space in the name,
+ libtool will get the directory path from the system, put the part of the
+ path before the space in the libdir variable in .../src/libhdf5.la, and
+ then fail to find the nonexistent directory. This is a known libtool issue
+ and the suggested workaround is to rename the directory without spaces.
+ (LRK - 2013/10/22)
+
+* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ catches some undefined behavior in the alignment algorithm of the macro
+ DETECT_I in H5detect.c (HDFFV-8147). This issue will be addressed in the
+ next release. (SLU - 2013/10/16)
+
+* Running make check for the tools can fail in the tools tests if make was not
+ run prior. The tests for the tools use other tools in the tests, therefore
+ all the tools should be built before testing the tools. (ADB - 2013/10/09)
+
+* Make provided by Solaris fails in "make check". Solaris users should use
+ gmake to build and install HDF5 software. (AKC - 2013/10/08 - HDFFV-8534)
+
+* On OpenVMS, two soft conversion functions (H5T__conv_i_f and H5T__conv_f_i)
+ have bugs. They convert data between floating-point numbers and integers.
+ But the library's default is hard conversion. The user should avoid
+ explicitly enabling soft conversion between floating-point numbers and
+ integers. (Issue VMS-8; SLU - 2013/09/19)
+
+* On OpenVMS, ZLIB 1.2.8 library doesn't work properly. ZLIB 1.2.5 works
+ fine. So please use ZLIB 1.2.5 to build HDF5 library. (Issue VMS-5;
+ SLU 2013/09/19)
+
+* When building using the Cray compilers on Cray machines, HDF5
+ configure mistakenly thinks the compiler is an intel compiler and
+ sets the -std=c99 flag which breaks configure on Cray. To build HDF5
+ properly on a Cray machine, please consult with the instructions in
+ INSTALL_parallel for building on Hopper.
+ (MSC - 2013/04/26 - HDFFV-8429)
+
+* The h5dump and h5diff utilities occasionally produce different output
+ between Linux and Windows systems. This is caused by lower-level library
+ routines that fail to write fill values to the user's buffer when reading
+ unallocated chunks from datasets that have a fill value set to
+ H5D_FILL_VALUE_DEFAULT. Due to platform differences the return of
+ spurious data values has only been encountered on Windows 32-bit systems.
+ (Issue HDFFV-8247; JP - 2013/03/27)
+
+* The 5.9 C++ compiler on Sun failed to compile a C++ test ttypes.cpp. It
+ complains with this message:
+ "/home/hdf5/src/H5Vprivate.h", line 130: Error: __func__ is not defined.
+
+ The reason is that __func__ is a predefined identifier in C99 standard. The
+ HDF5 C library uses it in H5private.h. The test ttypes.cpp includes
+ H5private.h (H5Tpkg.h<-H5Fprivate.h<-H5Vprivate.h<-H5private.h). Sun's 5.9
+ C++ compiler doesn't support __func__, thus fails to compile the C++ test.
+ But Sun's 5.11 C++ compiler does. To check whether your Sun C++ compiler
+ knows this identifier, try to compile the following simple C++ program:
+ #include<stdio.h>
+
+ int main(void)
+ {
+ printf("%s\n", __func__);
+ return 0;
+ }
+ (SLU - 2012/11/5)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set
+ to aprun -np X, because the H5lib_settings.c file was not generated
+ properly. Not setting those environment variables works, because
+ configure was able to automatically detect that it is a Cray system
+ and used the proper launch commands when necessary.
+ (MSC - 2012/04/18)
+
+* The data conversion test dt_arith.c fails in "long double" to integer
+ conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library
+ is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernel
+ (3.2.2 on Fedora) do not have the problem. Users should lower the
+ optimization level (-O1 or -O0) by defining CFLAGS in the command line of
+ "configure" like:
+
+ CFLAGS=-O1 ./configure
+
+ This will overwrite the library's default optimization level.
+ (SLU - 2012/02/07 - HDFFV-7829)
+ This issue is no longer present on Ubuntu 12.10 (3.5.0 kernel) with
+ gcc 4.7.2.
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD.
+ (QAK - 2011/04/26)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however.
+ (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2.
+ (AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO.
+ (CMC - 2009/04/28)
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.11%%%%
+
+
+HDF5 version 1.8.11 released on 2013-05-08
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.10 and
+HDF5-1.8.11-*, and contains information on the platforms tested and
+known problems in HDF5-1.8.11-*.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.11 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.11 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.11 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.11 (current
+release) versus Release 1.8.10":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.10
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Libtool version number is changed to 8.0.0 because there are API
+ changes. See below for details. (AKC - 2013/05/07 HDFFV-8435)
+ - Mac OS X 10.7 (Lion) and 10.8 (Mountain Lion) uses clang/clang++ as the
+ default C and C++ compilers. (AKC - 2013/04/19 HDFFV-8245)
+ - CMake minimum is now 2.8.10. (ADB 2013/1/14)
+ - A new tool, cmakehdf5, which is a build command script similar to
+ buildhdf5 is added and is available in the bin directory.
+ (AKC - 2013/01/16 HDFFV-8336)
+
+ Library
+ -------
+ - The library can load filter libraries dynamically during runtime. Users
+ can set the search path through environment variable HDF5_PLUGIN_PATH
+ and call H5Pset_filter to enable a dynamic filter. (SLU - 2013/04/08)
+ - Added new API functions H5Dscatter and H5Dgather to scatter data to and
+ and gather data from a selection within a memory buffer.
+ (NAF - 2013/02/05)
+ - The library now supports the data conversion from enumeration to numeric
+ (integer and floating-point number) datatypes. See Issue HDFFV-8221.
+ (SLU - 2012/10/23)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5dump: added new option -O or -ddl to output the ddl text to a file. This
+ is a complement to the -o or --output option, which redirects the data to
+ a file. HDFFV-8229 (ADB - 2013/2/25)
+
+ High-Level APIs
+ ---------------
+ - A new API function, H5DOwrite_chunk. This function writes a data chunk
+ directly into a file, bypassing hyperslab selection, data conversion,
+ and the filter pipeline. The user must be careful with the function and
+ clearly understand the I/O process of the library. (SLU - 2013/2/11)
+
+ Fortran API
+ -----------
+ - New API functions added (MSB - 2013/3/23):
+
+ h5odecr_refcount_f, h5oexists_by_name_f, h5oget_comment_f,
+ h5oget_comment_by_name_f, h5oincr_refcount_f, h5oopen_by_idx_f,
+ h5oset_comment_f, h5oset_comment_by_name_f, h5oset_comment_by_name_f
+
+ F2003: h5oget_info_f, h5oget_info_by_idx_f, h5ovisit_by_name_f
+
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - SunOS 5.11 (emu) 32-bit and 64-bit with Sun C/C++ 5.12 compiler and
+ Sun Fortran 95 8.6 compiler.
+ - Visual Studio 2012 w/ Intel Fortran 13 on Windows 7
+ - g95 released new version recently and is tested in this release.
+
+Bug Fixes since HDF5-1.8.10
+==========================
+
+ Configuration
+ -------------
+ - Fixed Thread-safe configure failure for the AIX platform.
+ (AKC - 2013/04/19 HDFFV-8390)
+ - Configure will check the result of header searches before searching for
+ the library.
+ Fixes HDFFV-8257 (ADB 2013/03/04)
+ - HDF does not support building SHARED Fortran libraries on OSX. Added
+ CMake code to check for this condition.
+ Fixes HDFFV-8227 (ADB 2013/03/04)
+ - CMake builds on Windows will no longer use legacy naming for libraries.
+ The "dll" tag will no longer be added to the name of *.lib and *.dll.
+ The option HDF_LEGACY_NAMING is now OFF by default.
+ Fixes HDFFV-8292 (ADB 2013/01/30)
+
+ Library
+ -------
+ - The library now behaves correctly when performing large I/O operations
+ on Mac OS-X. Previously, single I/O operations > 2 GB would fail
+ since the Darwin read/write calls cannot handle the number of bytes
+ that their parameter types imply.
+ Fixes HDFFV-7975 and HDFFV-8240 (DER 2013/01/07)
+ - Fixed a bug in the core VFD that causes failures when opening files
+ > 2 GB.
+ Fixes HDFFV-8124 and HDFFV-8158 (DER 2013/01/07)
+ - Fixed a bug where unintialized memory was read during variable-length
+ type conversion. This caused segfaults in netCDF. Fixes HDFFV-8159.
+ (DER 2013/03/30)
+ - Removed the H5Pset/get_dxpl_multi functions from the library. The
+ intended functionality for them was never fully implemented, and they
+ have always been fundamentally broken. NOTE: This does not affect
+ setting the multi VFD or any expected VFD functionality. Multi VFD
+ usage remains unchanged.
+ Fixes HDFFV-8296. (DER 2013/03/30)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Performance
+ -------------
+ - None
+
+ Tools
+ -----
+ - h5redeploy is changed to do this by default:
+ Installation directories:
+ prefix architecture-independent files.
+ exec_prefix architecture-dependent files, default is <prefix>.
+ libdir libraries, default is <exec_prefix>/lib.
+ includedir header files, default is <prefix/include>.
+ This allows users to just change the first line of prefix=<...> and the
+ effect will change libdir and includedir too. (AKC 2013/04/05 HDFFV-8358)
+ - h5repack: Fixed failure to convert the layout of a small chunked
+ dataset (size < 1K) to contiguous layout. HDFFV-8214 (JKM 2013/03/26)
+ - h5dump: Fixed displaying compression ratio for unknown or user-defined
+ filters. HDFFV-8344 (XCAO 2013/03/19)
+ - h5dump: Changed UNKNOWN_FILTER to USER_DEFINED_FILTER for user defined
+ filter. HDFFV-8346 (XCAO 2013/03/19)
+ - h5diff: Fixed to return the correct exit code 1 when the program
+ detects a unique extra attribute. Prior to this fix, h5diff returned
+ exit code 0 indicating the two files are identical.
+ HDFFV-7643 (JKM 2013/02/15)
+ - h5dump: Fixed writing nulls to a binary file when exporting a dataset
+ with compound string datatype. HDFFV-8169 (ADB 2013/1/31)
+ - The following h5stat test case failed in BG/P machines (and potentially
+ other machines that display extra output if an MPI task returns with a
+ non-zero code.)
+ Testing h5stat notexist.h5
+ The test script was fixed to ignore the extra output. HDFFV-8233
+ (AKC - 2012/11/30)
+ - h5diff: Improved speed when comparing HDF5 files with lots of
+ attributes. Much slower performance was identified with release versions
+ from 1.8.7 to 1.8.10 compared to 1.8.6. (JKM 2012/10/19)
+
+ F90 API
+ -------
+ - The integer type of the 'offset' argument in h5pset_external_f and
+ h5pget_external_f was changed to INTEGER(KIND=OFF_T) to support 8-byte
+ integers, matching the C type definition of off_t. (MSB - 2013/3/23)
+ - h5fc updated to recognize .f95, .f03 and .f08 file extensions.
+
+ C++ API
+ ------
+ - The C++ wrappers DSetMemXferPropList::setMulti/getMulti were removed
+ because the C functions H5Pset/get_dxpl_multi functions are removed
+ from the library. Fixes HDFFV-8296 by DER. (BMR 2013/03/30)
+ - An exception thrown by an internal function was not propagating to the
+ test program during stack unwinding, so it couldn't be caught by the
+ test, and the program terminated "without an active exception." It
+ seemed that the problem happened when c_str() was used to generate
+ an equivalent const char* from a std::string and the resulting string
+ was passed to the internal function. As a work-around, we added a
+ try/catch around the the call to the internal function and when the
+ exception is caught there, it is re-thrown. Fixes HDFFV-8067.
+ (BMR 2013/03/30)
+
+ High-Level APIs:
+ ------
+ - Fixed a problem with H5DSget_scale_name including the NULL terminator
+ in the size calculation returned by the function. The API was changed
+ to NOT include the NULL terminator in the size of name returned
+ (MSB- 2013/2/10)
+
+ Fortran High-Level APIs:
+ ------
+ - None
+
+ Testing
+ -------
+ - In some Mac systems, testlibinfo.sh failed with this error:
+ Check file ../src/.libs/libhdf5.7.dylib
+ strings: object: ../src/.libs/libhdf5.7.dylib malformed object \
+ (unknown load command 15)
+ The strings command of Mac systems inspects library files, and older
+ versions of strings may not know newer library formats, resulting
+ in errors. Fixed by sending the library file as stdin to the strings
+ command to avoid this problem. (AKC - 2013/03/08 HDFFV-8305)
+ - Fixed a typo in the ERROR macro in test/testhdf5.h. It segmentation
+ faulted when used before. (AKC - 2013/02/12 HDFFV-8267)
+
+Supported Platforms
+===================
+The following platforms are supported and have been tested for this release.
+They are built with the configure process unless specified otherwise.
+
+ AIX 5.3 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+
+ Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.6.3
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 11.9-0
+ Version 12.5-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 12.1 (Build 20110811)
+ Version 12.1 (Build 20120212)
+
+ Linux 2.6.18-308.16.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 64-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-52)
+ Version 4.6.3
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 11.9-0
+ Version 12.5-0
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 12.1 (Build 20110811)
+ Version 12.1 (Build 20120212)
+
+ Linux 2.6.32-358.2.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (platypus) Version 4.4.7 20120313
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 12.1 20120212
+
+ Linux 2.6.32-358.2.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-3)
+ IBM XL C/C++ V11.1
+ IBM XL Fortran V13.1
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 11 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 13 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 13 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ 64-bit gfortran GNU Fortran (GCC) 4.6.2
+ (fred) Intel C (icc), Fortran (ifort), C++ (icpc)
+ 12.1 Build 20120928
+
+ Mac OS X Lion 10.7.3 Apple clang/clang++ version 3.0 from Xcode 4.6.1
+ 64-bit gfortran GNU Fortran (GCC) 4.6.2
+ (duck) Intel icc/icpc/ifort version 13.0
+
+ Mac OS X Mountain Lion 10.8.1 Apple clang/clang++ version 4.2 from Xcode 4.6.1
+ 64-bit gfortran GNU Fortran (GCC) 4.6.2
+ (wren) Intel icc/icpc/ifort version 13.0.1.119
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+Solaris2.11 32-bit n y/y n y y y
+Solaris2.11 64-bit n y/n n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/n n y y y
+Windows 7 x64 Cygwin n y/n n y y y
+Mac OS X Snow Leopard 10.6.8 64-bit n y/y n y y y
+Mac OS X Lion 10.7.3 64-bit n y/y n y y y
+Mac OS X Mountain Lion 10.8.1 64-bit n y/y n y y y
+AIX 5.3 32- and 64-bit n y/n n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 GNU y y/y y y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 PGI n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 GNU n y/n n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y
+Linux 2.6.32-358.2.1.el6.ppc64 n y/n n y y y
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.11 32-bit y y y y
+Solaris2.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 7 x64 Cygwin n n n y
+Mac OS X Snow Leopard 10.6.8 64-bit y n y n
+Mac OS X Lion 10.7.3 64-bit y n y y
+Mac OS X Mountain Lion 10.8.1 64-bit y n y y
+AIX 5.3 32- and 64-bit y n n y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n
+CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n
+CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 5.9 Linux 2.6.18 x86_64 PGI y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n
+Linux 2.6.32-358.2.1.el6.ppc64 y y y n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-308.13.1.el5PAE MPICH mpich2-1.4.1p1 compiled with
+ #1 SMP i686 i686 i386 gcc 4.1.2 and gfortran 4.1.2
+ (jam) g95 (GCC 4.0.3 (g95 0.94!)
+
+ Linux 2.6.18-308.16.1.el5 MPICH mpich2-1.4.1p1 compiled with
+ #1 SMP x86_64 GNU/Linux gcc 4.1.2 and gfortran 4.1.2
+ (koala) g95 (GCC 4.0.3 (g95 0.94!)
+
+ FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719
+ (loyalty) gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719
+ (freedom) gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ Debian6.0.7 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+ (cmake and autotools)
+
+ Fedora18 3.7.9-205.fc18.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.7.2 20121109 (Red Hat 4.7.2-8)
+ GNU Fortran (GCC) 4.7.2 20120507 (Red Hat 4.7.2-8)
+ (cmake and autotools)
+
+ SUSE 12.3 3.7.10-1.1-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.7.2
+ GNU Fortran (SUSE Linux) 4.7.2
+ (cmake and autotools)
+
+ Ubuntu 12.10 3.5.0-25-generic #39-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.7.2-2ubuntu1) 4.7.2
+ GNU Fortran (Ubuntu/Linaro 4.7.2-2ubuntu1) 4.7.2
+ (cmake and autotools)
+
+ Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46
+ hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+
+
+Known Problems
+==============
+
+* When building using the Cray compilers on Cray machines, HDF5
+ configure mistakenly thinks the compiler is an intel compiler and
+ sets the -std=c99 flag which breaks configure on Cray. To build HDF5
+ properly on a Cray machine, please consult with the instructions in
+ INSTALL_parallel for building on Hopper.
+ (MSC - 2013/04/26 - HDFFV-8429)
+
+* The h5dump and h5diff utilities occasionally produce different output
+ between Linux and Windows systems. This is caused by lower-level library
+ routines that fail to write fill values to the user's buffer when reading
+ unallocated chunks from datasets that have a fill value set to
+ H5D_FILL_VALUE_DEFAULT. Due to platform differences the return of
+ spurious data values has only been encountered on Windows 32-bit systems.
+ (Issue HDFFV-8247; JP - 2013/03/27)
+
+* The 5.9 C++ compiler on Sun failed to compile a C++ test ttypes.cpp. It
+ complains with this message:
+ "/home/hdf5/src/H5Vprivate.h", line 130: Error: __func__ is not defined.
+
+ The reason is that __func__ is a predefined identifier in C99 standard. The
+ HDF5 C library uses it in H5private.h. The test ttypes.cpp includes
+ H5private.h (H5Tpkg.h<-H5Fprivate.h<-H5Vprivate.h<-H5private.h). Sun's 5.9
+ C++ compiler doesn't support __func__, thus fails to compile the C++ test.
+ But Sun's 5.11 C++ compiler does. To check whether your Sun C++ compiler
+ knows this identifier, try to compile the following simple C++ program:
+ #include<stdio.h>
+
+ int main(void)
+ {
+ printf("%s\n", __func__);
+ return 0;
+ }
+ (SLU - 2012/11/5)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set
+ to aprun -np X, because the H5lib_settings.c file was not generated
+ properly. Not setting those environment variables works, because
+ configure was able to automatically detect that it's a Cray system
+ and used the proper launch commands when necessary.
+ (MSC - 2012/04/18)
+
+* The data conversion test dt_arith.c fails in "long double" to integer
+ conversion on Ubuntu 11.10 (3.0.0.13 kernel) with GCC 4.6.1 if the library
+ is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernal
+ (3.2.2 on Fedora) doesn't have the problem. Users should lower the
+ optimization level (-O1 or -O0) by defining CFLAGS in the command line of
+ "configure" like:
+
+ CFLAGS=-O1 ./configure
+
+ This will overwrite the library's default optimization level.
+ (SLU - 2012/02/07 - HDFFV-7829)
+ This issue is no longer present on Ubuntu 12.10 (3.5.0 kernel) with
+ gcc 4.7.2.
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD.
+ (QAK - 2011/04/26)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however.
+ (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2.
+ (AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* The library's test dt_arith.c showed a compiler's rounding problem on
+ Cygwin when converting from unsigned long long to long double. The
+ library's own conversion works fine. We defined a macro for Cygwin to
+ skip this test until we can solve the problem.
+ (SLU - 2010/05/05 - HDFFV-1264)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO.
+ (CMC - 2009/04/28)
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.10-patch1%%%%
+
+
+HDF5 version 1.8.10-patch1 released on 2013-01-22
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.9 and
+HDF5 1.8.10, and contains information on the platforms tested and
+known problems in HDF5-1.8.10.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.10 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.10 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.10 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.10 (current
+release) versus Release 1.8.9":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.10
+- Bug Fixes since HDF5-1.8.9
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - A new tool, cmakehdf5, which is a build command script similar to
+ buildhdf5 is added and is available in the bin directory.
+ (AKC - 2013/01/21)
+
+ Library
+ -------
+ - Updated to latest autotools and changed all hard *.sh scripts to
+ configure managed *.sh.in files. Removed overloading of autotools
+ TESTS variable by examples and tests. Renamed configure.in to
+ configure.ac. (ADB - 2012/08/23 - HDFFV-8129)
+ - The data sieve buffer size was set for all the datasets in the file. It
+ could waste memory if any dataset size is smaller than the sieve buffer
+ size. Now the library picks the smaller one between the dataset size
+ and the sieve buffer size from the file access property. See Issue 7934.
+ (SLU - 2012/4/11)
+
+ Parallel Library
+ ----------------
+ - Added the H5Pget_mpio_no_collective_cause() function that retrieves
+ reasons why the collective I/O was broken during read/write IO access.
+ (JKM - 2012/08/30 HDFFV-8143)
+
+ - Added H5Pget_mpio_actual_io_mode_f (MSB - 2012/09/27)
+
+ Tools
+ -----
+ - h5import: Changed to allow the use of h5dump output as input files to
+ h5import. h5dump must include the "-p" option to print the properties;
+ configuration file is captured output of h5dump. The restrictions are
+ that only one dataset with a simple datatype (integer, floating-point,
+ or string) can be processed. Integers and floating-point imports from
+ h5dump must use the "binary" option for the data file. The string version
+ uses the h5dump "-y --width=1" options to disable the indexing printouts,
+ print single columns, and obviously NOT use the "binary" option.
+ (ADB - 2012/07/19 HDFFV-721)
+
+ High-Level APIs
+ ---------------
+ - None
+
+ Fortran API
+ -----------
+ - Fixed a typo in return value of the nh5dread_f_c function (was 1
+ instead of 0 on success); fixed the return value to make it consistent
+ with other Fortran functions; cleaned debug statements from the code.
+ (EIP - 2012/06/23)
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - None
+
+Bug Fixes since HDF5-1.8.10
+===========================
+ Library
+ -------
+ - The library now behaves correctly when performing large I/O operations on
+ Mac OS-X. Previously, single I/O operations > 2 GB would fail since the
+ Darwin read/write calls cannot handle the number of bytes that their
+ parameter types imply.
+ Fixes HDFFV-7975 and HDFFV-8240 (DER - 07 JAN 2013)
+ - Fixed a bug in the core VFD that cause failures when opening files > 2 GB.
+ Fixes HDFFV-8124 and HDFFV-8158 (DER - 07 JAN 2013)
+
+ Tools
+ -----
+ - The following h5stat test case failed in BG/P machines (and potentially
+ other machines that display extra output if an MPI task returns with a
+ non-zero code.)
+ Testing h5stat notexist.h5
+ The test script was fixed to ignore the extra output.
+ HDFFV-8233 (AKC - 2012/12/17)
+ - h5diff: Fixed slowness when comparing HDF5 files with many attributes.
+ Much slower performance was identified with later release version
+ (from 1.8.7 to 1.8.10) compared to 1.8.6. The issue was introduced
+ from fixing an attribute related bug for 1.8.7 release in the past.
+ HDFFV-8145 (JKM 2012/12/13)
+
+ Testing
+ -------
+ - None
+
+Bug Fixes since HDF5-1.8.9
+==========================
+
+ Configuration
+ -------------
+ - Fixed configure --enable-production to not use -O optimization for Lion
+ and Mountain Lion systems when gcc (i686-apple-darwin11-llvm-gcc-4.2
+ (GCC) 4.2.1) is used. Somehow the -O optimization will cause some of
+ the hard conversion code in test/dt_arith.c to fail. HDFFV-8017.
+ (AKC - 2012/10/10)
+ - Fixed AIX Fortran compiler flags to use appropriate settings for
+ debugging, profiling, and optimization situations. HDFFV-8069.
+ (AKC 2012/09/27)
+
+ Library
+ -------
+ - Fixed a memory leak exposed when inserting/removing a property
+ from a property list several times. HDFFV-8022. (MSC 2012/05/18)
+ - The file_image test will fail in the "initial file image and callbacks in
+ the core VFD" sub-test if the source directory is read-only as the test
+ fails to create its test files in the build directory. This has been
+ fixed. HDFFV-8009 (AKC - 2012/07/06)
+
+
+ Parallel Library
+ ----------------
+ - The MPI-POSIX VFD was updated to include the POSIX and Windows
+ correctness features added that had already been added to the other VFDs.
+ HDFFV-8058/7845. (DER 2012/09/17)
+
+ Performance
+ -------------
+ - Removed program perform/benchpar from the enable-build-all list. The
+ program will be retired or moved to another location. HDFFV-8156
+ (AKC 2012/10/01)
+ - Retired program perform/mpi-perf. Its purpose has been incorporated
+ into h5perf. (AKC 2012/09/21)
+
+ Tools
+ -----
+ - h5repack: "h5repack -f NONE file1.h5 out.h5" command failed if
+ source file contains chunked dataset and a chunk dim is bigger than
+ the dataset dim. Another issue is that the command changed max dims
+ if chunk dim is smaller than the dataset dim. These issue occurred
+ when dataset size is smaller than 64k (compact size limit) Fixed both.
+ HDFFV-8012 (JKM 2012/09/24)
+ - h5diff: Fixed the counter in verbose mode (-v, -r) so that it will no
+ longer add together the differences between datasets and the differences
+ between attributes of those datasets. This change makes the output of
+ verbose mode consistent for datasets, groups, and committed datatypes.
+ HDFFV-5919 (JKM 2012/09/10)
+ - h5diff: Fixed the incorrect result when comparing attribute data
+ values and the data type has the same class but different sizes.
+ HDFFV-7942 (JKM 2012/08/15)
+ - h5dump: Replaced single element fwrite with block writes.
+ HDFFV-1208 (ADB 2012/08/13)
+ - h5diff: Fixed test failure for "make check" due to failure of
+ copying test files when performed in HDF5 source tree. Also applied
+ to other tools. HDFFV-8107 (JKM 2012/08/01)
+ - ph5diff: Fixed intermittent hang issue on a certain operation in
+ parallel mode. It was detected by daily test for comparing
+ non-comparable objects, but it could have occurred in other
+ operations depending on machine condition. HDFFV-8003 (JKM 2012/08/01)
+ - h5diff: Fixed the function COPY_TESTFILES_TO_TESTDIR() of testh5diff.sh
+ to better report when there is an error in the file copying.
+ HDFFV-8105 (AKC 2012/07/22)
+ - h5dump: Fixed the sort by name display to maintain correct parent/child
+ relationships between ascending/descending order.
+ HDFFV-8095 (ADB 2012/07/12)
+ - h5dump: Fixed the display by creation order when using option -n
+ (print contents).
+ HDFFV-5942 (ADB 2012/07/09)
+ - h5dump: Changed to allow H5T_CSET_UTF8 to be displayed in h5dump output.
+ Used technique similar to what was done in h5ls (matches library
+ options).
+ HDFFV-7999 (ADB 2012/05/23)
+ - h5diff: Fixed the tool so that it will not check and display the status
+ of dangling links without setting the --follow-symlinks option. This
+ also improved performance when comparing lots of external links without
+ the --follow-symlinks option.
+ HDFFV-7998 (JKM 2012/04/26)
+
+ F90 API
+ -------
+
+ - Fixed a typo in return value of the nh5dread_f_c function (was 1
+ instead of 0 on success); fixed the return value to make it consistent
+ with other Fortran functions; cleaned debug statements from the code.
+ (EIP - 2012/06/23)
+
+ - Fixed a problem writing/reading control characters to a dataset; writing
+ a string containing alerts, backspace, carriage_return, form_feed,
+ horizontal_tab, vertical_tab, or new_line is now tested and working.
+ (MSB - 2012/09/01)
+
+ - Corrected the integer type of H5S_UNLIMITED_F to HSIZE_T (MSB - 2012/09/01)
+
+ - Corrected the number of continuation lines in the src files
+ to be less than 32 lines for F95 compliance. (MSB - 2012/10/01)
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+
+ - Fixed problem with H5TBdelete_record destroying all data following the
+ deletion of a row. (MSB- 2012/7/26)
+
+ - Fixed H5LTget_attribute_string not closing an object identifier when an
+ error occurs. (MSB- 2012/7/21)
+
+ - Corrected the return type of H5TBAget_fill from herr_t to htri_t to
+ reflect that a return value of 1 indicates that a fill value is
+ present, 0 indicates a fill value is not present, and <0 indicates an
+ error.
+
+ Fortran High-Level APIs:
+ ------
+ - None
+
+Supported Platforms
+===================
+ AIX 5.3 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+
+ Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-52)
+ Version 4.6.3
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 11.9-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 12.1
+ MPICH mpich2-1.4.1p1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.18-308.16.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 32-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-52)
+ Version 4.6.3
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 11.9-0
+ Version 12.5-0
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 12.1 (Build 20110811)
+ Version 12.1 (Build 20120212)
+ MPICH mpich2-1.4.1p1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.32-220.7.1.el6.ppc64 gcc (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.6 20110731
+ (ostrich) GNU Fortran (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3)
+
+ Linux 2.6.32-220.23.1.1chaos Intel C, C++, Fortran Compilers
+ ch5.x86_64 GNU/Linux Version 12.1.5.339
+ (LLNL Aztec)
+
+ IBM Blue Gene/P XL C for Blue Gene/P, bgxlc V9.0
+ (LLNL uDawn) XL C++ for Blue Gene/P, bgxlC V9.0
+ XL Fortran for Blue Gene/P, bgxlf90 V11.1
+
+ SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16
+ (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13
+ Sun C++ 5.9 Sun OS_sparc Patch 124863-26
+ Sun C 5.11 SunOS_sparc
+ Sun Fortran 95 8.5 SunOS_sparc
+ Sun C++ 5.11 SunOS_sparc
+
+ Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+
+ Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ (fred) gfortran GNU Fortran (GCC) 4.6.2
+ Intel C (icc), Fortran (ifort), C++ (icpc)
+ 12.1.0.038 Build 20110811
+
+ Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ Intel 32-bit gfortran GNU Fortran (GCC) 4.6.1
+ (tejeda) Intel C (icc), Fortran (ifort), C++ (icpc)
+ 12.1.0.038 Build 20110811
+
+ Mac OS X Lion 10.7.3 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.2.1
+ 32- and 64-bit g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.2.1
+ (duck) gfortran GNU Fortran (GCC) 4.6.2
+
+ Mac OS X Mountain Lion 10.8.1 cc Apple clang version 4.0 from Xcode 4.5.1
+ (owl) c++ Apple clang version 4.0 from Xcode 4.5.1
+ gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.5.1
+ g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.5.1
+ gfortran GNU Fortran (GCC) 4.6.2
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+Solaris2.10 32-bit n y/y n y y y
+Solaris2.10 64-bit n y/n n y y y
+Windows 7 y y/n n y y y
+Windows 7 x64 y y/n n y y y
+Mac OS X Snow Leopard 10.6.8 32-bit n y/y n y y n
+Mac OS X Snow Leopard 10.6.8 64-bit n y/y n y y y
+Mac OS X Lion 10.7.3 32-bit n y/y n y y n
+Mac OS X Lion 10.7.3 64-bit n y/y n y y y
+Mac OS X Mountain Lion 10.8.1 64-bit n y/n n y y n
+AIX 5.3 32- and 64-bit y y/n y y y y
+CentOS 5.5 Linux 2.6.18-308 i686 GNU y y/y y y y y
+CentOS 5.5 Linux 2.6.18-308 i686 Intel n y/y n y y y
+CentOS 5.5 Linux 2.6.18-308 i686 PGI n y/y n y y y
+CentOS 5.5 Linux 2.6.18 x86_64 GNU y y/y y y y y
+CentOS 5.5 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 5.5 Linux 2.6.18 x86_64 PGI n y/y n y y y
+Linux 2.6.32-220.7.1.el6.ppc64 n y/n n y y y
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit n n n n
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Mac OS X Snow Leopard 10.6.8 32-bit y n y n
+Mac OS X Snow Leopard 10.6.8 64-bit y n y n
+Mac OS X Lion 10.7.3 32-bit y n y y
+Mac OS X Lion 10.7.3 64-bit y n y y
+Mac OS X Mountain Lion 10.8.1 64-bit y n y y
+AIX 5.3 32- and 64-bit n n n y
+CentOS 5.5 Linux 2.6.18-308 i686 GNU y y y y
+CentOS 5.5 Linux 2.6.18-308 i686 Intel y y y n
+CentOS 5.5 Linux 2.6.18-308 i686 PGI y y y n
+CentOS 5.5 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 5.5 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 5.5 Linux 2.6.18 x86_64 PGI y y y n
+Linux 2.6.32-220.7.1.el6.ppc64 y y y n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719
+ (loyalty) gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719
+ (freedom) gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ Debian6.0.3 2.6.32-5-686 #1 SMP i686 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+ (cmake and autotools)
+
+ Debian6.0.3 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+ (cmake and autotools)
+
+ Fedora17 3.5.2-1.fc17.i6866 #1 SMP i686 i686 i386 GNU/Linux
+ gcc (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5)
+ GNU Fortran (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5)
+ (cmake and autotools)
+
+ Fedora17 3.5.2-1.fc17.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5)
+ GNU Fortran (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5)
+ (cmake and autotools)
+
+ SUSE 12.2 3.4.6-2.10-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux
+ gcc (SUSE Linux) 4.7.1
+ GNU Fortran (SUSE Linux) 4.7.1
+ (cmake and autotools)
+
+ SUSE 12.2 3.4.6-2.10-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.7.1
+ GNU Fortran (SUSE Linux) 4.7.1
+ (cmake and autotools)
+
+ Ubuntu 12.04 3.2.0-29-generic #46-Ubuntu SMP i686 GNU/Linux
+ gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
+ GNU Fortran (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
+ (cmake and autotools)
+
+ Ubuntu 12.04 3.2.0-29-generic #46-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
+ GNU Fortran (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
+ (cmake and autotools)
+ (Use optimization level -O1)
+
+ Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46
+ hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+
+
+Known Problems
+==============
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set
+ to aprun -np X, because the H5lib_settings.c file was not generated
+ properly. Not setting those environment variables works, because
+ configure was able to automatically detect that it's a Cray system
+ and used the proper launch commands when necessary.
+ (MSC - 2012/04/18)
+
+* The data conversion test dt_arith.c fails in "long double" to integer
+ conversion on Ubuntu 11.10 (3.0.0.13 kernal) with GCC 4.6.1 if the library
+ is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernal
+ (3.2.2 on Fedora) doesn't have the problem. Users should lower the
+ optimization level (-O1 or -O0) by defining CFLAGS in the command line of
+ "configure" like:
+
+ CFLAGS=-O1 ./configure
+
+ This will overwrite the library's default optimization level.
+ (SLU - 2012/02/07 - HDFFV-7829)
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD.
+ (QAK - 2011/04/26)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however.
+ (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2.
+ (AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* The library's test dt_arith.c showed a compiler's rounding problem on
+ Cygwin when converting from unsigned long long to long double. The
+ library's own conversion works fine. We defined a macro for Cygwin to
+ skip this test until we can solve the problem.
+ (SLU - 2010/05/05 - HDFFV-1264)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO.
+ (CMC - 2009/04/28)
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.10%%%%
+
+
+HDF5 version 1.8.10 released on 2012-10-26
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.9 and
+HDF5 1.8.10, and contains information on the platforms tested and
+known problems in HDF5-1.8.10.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.10 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.10 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.10 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.10 (current
+release) versus Release 1.8.9":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.9
+- Supported Platforms
+- Supported Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - None
+
+ Library
+ -------
+ - Updated to latest autotools and changed all hard *.sh scripts to
+ configure managed *.sh.in files. Removed overloading of autotools
+ TESTS variable by examples and tests. Renamed configure.in to
+ configure.ac. (ADB - 2012/08/23 - HDFFV-8129)
+ - The data sieve buffer size was set for all the datasets in the file. It
+ could waste memory if any dataset size is smaller than the sieve buffer
+ size. Now the library picks the smaller one between the dataset size
+ and the sieve buffer size from the file access property. See Issue 7934.
+ (SLU - 2012/4/11)
+
+ Parallel Library
+ ----------------
+ - Added the H5Pget_mpio_no_collective_cause() function that retrieves
+ reasons why the collective I/O was broken during read/write IO access.
+ (JKM - 2012/08/30 HDFFV-8143)
+
+ - Added H5Pget_mpio_actual_io_mode_f (MSB - 2012/09/27)
+
+ Tools
+ -----
+ - h5import: Changed to allow the use of h5dump output as input files to
+ h5import. h5dump must include the "-p" option to print the properties;
+ configuration file is captured output of h5dump. The restrictions are
+ that only one dataset with a simple datatype (integer, floating-point,
+ or string) can be processed. Integers and floating-point imports from
+ h5dump must use the "binary" option for the data file. The string version
+ uses the h5dump "-y --width=1" options to disable the indexing printouts,
+ print single columns, and obviously NOT use the "binary" option.
+ (ADB - 2012/07/19 HDFFV-721)
+
+ High-Level APIs
+ ---------------
+ - None
+
+ Fortran API
+ -----------
+ - Fixed a typo in return value of the nh5dread_f_c function (was 1
+ instead of 0 on success); fixed the return value to make it consistent
+ with other Fortran functions; cleaned debug statements from the code.
+ (EIP - 2012/06/23)
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - None
+
+Bug Fixes since HDF5-1.8.9
+==========================
+
+ Configuration
+ -------------
+ - Fixed configure --enable-production to not use -O optimization for Lion
+ and Mountain Lion systems when gcc (i686-apple-darwin11-llvm-gcc-4.2
+ (GCC) 4.2.1) is used. Somehow the -O optimization will cause some of
+ the hard conversion code in test/dt_arith.c to fail. HDFFV-8017.
+ (AKC - 2012/10/10)
+ - Fixed AIX Fortran compiler flags to use appropriate settings for
+ debugging, profiling, and optimization situations. HDFFV-8069.
+ (AKC 2012/09/27)
+
+ Library
+ -------
+ - Fixed a memory leak exposed when inserting/removing a property
+ from a property list several times. HDFFV-8022. (MSC 2012/05/18)
+ - The file_image test will fail in the "initial file image and callbacks in
+ the core VFD" sub-test if the source directory is read-only as the test
+ fails to create its test files in the build directory. This has been
+ fixed. HDFFV-8009 (AKC - 2012/07/06)
+
+
+ Parallel Library
+ ----------------
+ - The MPI-POSIX VFD was updated to include the POSIX and Windows
+ correctness features added that had already been added to the other VFDs.
+ HDFFV-8058/7845. (DER 2012/09/17)
+
+ Performance
+ -------------
+ - Removed program perform/benchpar from the enable-build-all list. The
+ program will be retired or moved to another location. HDFFV-8156
+ (AKC 2012/10/01)
+ - Retired program perform/mpi-perf. Its purpose has been incorporated
+ into h5perf. (AKC 2012/09/21)
+
+ Tools
+ -----
+ - h5repack: "h5repack -f NONE file1.h5 out.h5" command failed if
+ source file contains chunked dataset and a chunk dim is bigger than
+ the dataset dim. Another issue is that the command changed max dims
+ if chunk dim is smaller than the dataset dim. These issue occurred
+ when dataset size is smaller than 64k (compact size limit) Fixed both.
+ HDFFV-8012 (JKM 2012/09/24)
+ - h5diff: Fixed the counter in verbose mode (-v, -r) so that it will no
+ longer add together the differences between datasets and the differences
+ between attributes of those datasets. This change makes the output of
+ verbose mode consistent for datasets, groups, and committed datatypes.
+ HDFFV-5919 (JKM 2012/09/10)
+ - h5diff: Fixed the incorrect result when comparing attribute data
+ values and the data type has the same class but different sizes.
+ HDFFV-7942 (JKM 2012/08/15)
+ - h5dump: Replaced single element fwrite with block writes.
+ HDFFV-1208 (ADB 2012/08/13)
+ - h5diff: Fixed test failure for "make check" due to failure of
+ copying test files when performed in HDF5 source tree. Also applied
+ to other tools. HDFFV-8107 (JKM 2012/08/01)
+ - ph5diff: Fixed intermittent hang issue on a certain operation in
+ parallel mode. It was detected by daily test for comparing
+ non-comparable objects, but it could have occurred in other
+ operations depending on machine condition. HDFFV-8003 (JKM 2012/08/01)
+ - h5diff: Fixed the function COPY_TESTFILES_TO_TESTDIR() of testh5diff.sh
+ to better report when there is an error in the file copying.
+ HDFFV-8105 (AKC 2012/07/22)
+ - h5dump: Fixed the sort by name display to maintain correct parent/child
+ relationships between ascending/descending order.
+ HDFFV-8095 (ADB 2012/07/12)
+ - h5dump: Fixed the display by creation order when using option -n
+ (print contents).
+ HDFFV-5942 (ADB 2012/07/09)
+ - h5dump: Changed to allow H5T_CSET_UTF8 to be displayed in h5dump output.
+ Used technique similar to what was done in h5ls (matches library
+ options).
+ HDFFV-7999 (ADB 2012/05/23)
+ - h5diff: Fixed the tool so that it will not check and display the status
+ of dangling links without setting the --follow-symlinks option. This
+ also improved performance when comparing lots of external links without
+ the --follow-symlinks option.
+ HDFFV-7998 (JKM 2012/04/26)
+
+ F90 API
+ -------
+
+ - Fixed a typo in return value of the nh5dread_f_c function (was 1
+ instead of 0 on success); fixed the return value to make it consistent
+ with other Fortran functions; cleaned debug statements from the code.
+ (EIP - 2012/06/23)
+
+ - Fixed a problem writing/reading control characters to a dataset; writing
+ a string containing alerts, backspace, carriage_return, form_feed,
+ horizontal_tab, vertical_tab, or new_line is now tested and working.
+ (MSB - 2012/09/01)
+
+ - Corrected the integer type of H5S_UNLIMITED_F to HSIZE_T (MSB - 2012/09/01)
+
+ - Corrected the number of continuation lines in the src files
+ to be less than 32 lines for F95 compliance. (MSB - 2012/10/01)
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+
+ - Fixed problem with H5TBdelete_record destroying all data following the
+ deletion of a row. (MSB- 2012/7/26)
+
+ - Fixed H5LTget_attribute_string not closing an object identifier when an
+ error occurs. (MSB- 2012/7/21)
+
+ - Corrected the return type of H5TBAget_fill from herr_t to htri_t to
+ reflect that a return value of 1 indicates that a fill value is
+ present, 0 indicates a fill value is not present, and <0 indicates an
+ error.
+
+ Fortran High-Level APIs:
+ ------
+ - None
+
+Supported Platforms
+===================
+ AIX 5.3 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+
+ Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-52)
+ Version 4.6.3
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 11.9-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 12.1
+ MPICH mpich2-1.4.1p1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.18-308.16.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 32-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-52)
+ Version 4.6.3
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 11.9-0
+ Version 12.5-0
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 12.1 (Build 20110811)
+ Version 12.1 (Build 20120212)
+ MPICH mpich2-1.4.1p1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.32-220.7.1.el6.ppc64 gcc (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.6 20110731
+ (ostrich) GNU Fortran (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3)
+
+ Linux 2.6.32-220.23.1.1chaos Intel C, C++, Fortran Compilers
+ ch5.x86_64 GNU/Linux Version 12.1.5.339
+ (LLNL Aztec)
+
+ IBM Blue Gene/P XL C for Blue Gene/P, bgxlc V9.0
+ (LLNL uDawn) XL C++ for Blue Gene/P, bgxlC V9.0
+ XL Fortran for Blue Gene/P, bgxlf90 V11.1
+
+ SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16
+ (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13
+ Sun C++ 5.9 Sun OS_sparc Patch 124863-26
+ Sun C 5.11 SunOS_sparc
+ Sun Fortran 95 8.5 SunOS_sparc
+ Sun C++ 5.11 SunOS_sparc
+
+ Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+
+ Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.15(0.260/5/3) gcc(4.5.3) compiler and gfortran)
+ (cmake and autotools)
+
+ Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ (fred) gfortran GNU Fortran (GCC) 4.6.2
+ Intel C (icc), Fortran (ifort), C++ (icpc)
+ 12.1.0.038 Build 20110811
+
+ Mac OS X Snow Leopard 10.6.8 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ Darwin Kernel Version 10.8.0 g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 3.2.6
+ Intel 32-bit gfortran GNU Fortran (GCC) 4.6.1
+ (tejeda) Intel C (icc), Fortran (ifort), C++ (icpc)
+ 12.1.0.038 Build 20110811
+
+ Mac OS X Lion 10.7.3 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.2.1
+ 32- and 64-bit g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.2.1
+ (duck) gfortran GNU Fortran (GCC) 4.6.2
+
+ Mac OS X Mountain Lion 10.8.1 cc Apple clang version 4.0 from Xcode 4.5.1
+ (owl) c++ Apple clang version 4.0 from Xcode 4.5.1
+ gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.5.1
+ g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.5.1
+ gfortran GNU Fortran (GCC) 4.6.2
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+Solaris2.10 32-bit n y/y n y y y
+Solaris2.10 64-bit n y/n n y y y
+Windows 7 y y/n n y y y
+Windows 7 x64 y y/n n y y y
+Mac OS X Snow Leopard 10.6.8 32-bit n y/y n y y n
+Mac OS X Snow Leopard 10.6.8 64-bit n y/y n y y y
+Mac OS X Lion 10.7.3 32-bit n y/y n y y n
+Mac OS X Lion 10.7.3 64-bit n y/y n y y y
+Mac OS X Mountain Lion 10.8.1 64-bit n y/n n y y n
+AIX 5.3 32- and 64-bit y y/n y y y y
+CentOS 5.5 Linux 2.6.18-308 i686 GNU y y/y y y y y
+CentOS 5.5 Linux 2.6.18-308 i686 Intel n y/y n y y y
+CentOS 5.5 Linux 2.6.18-308 i686 PGI n y/y n y y y
+CentOS 5.5 Linux 2.6.18 x86_64 GNU y y/y y y y y
+CentOS 5.5 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 5.5 Linux 2.6.18 x86_64 PGI n y/y n y y y
+Linux 2.6.32-220.7.1.el6.ppc64 n y/n n y y y
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit n n n n
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Mac OS X Snow Leopard 10.6.8 32-bit y n y n
+Mac OS X Snow Leopard 10.6.8 64-bit y n y n
+Mac OS X Lion 10.7.3 32-bit y n y y
+Mac OS X Lion 10.7.3 64-bit y n y y
+Mac OS X Mountain Lion 10.8.1 64-bit y n y y
+AIX 5.3 32- and 64-bit n n n y
+CentOS 5.5 Linux 2.6.18-308 i686 GNU y y y y
+CentOS 5.5 Linux 2.6.18-308 i686 Intel y y y n
+CentOS 5.5 Linux 2.6.18-308 i686 PGI y y y n
+CentOS 5.5 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 5.5 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 5.5 Linux 2.6.18 x86_64 PGI y y y n
+Linux 2.6.32-220.7.1.el6.ppc64 y y y n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719
+ (loyalty) gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719
+ (freedom) gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ Debian6.0.3 2.6.32-5-686 #1 SMP i686 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+ (cmake and autotools)
+
+ Debian6.0.3 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+ (cmake and autotools)
+
+ Fedora17 3.5.2-1.fc17.i6866 #1 SMP i686 i686 i386 GNU/Linux
+ gcc (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5)
+ GNU Fortran (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5)
+ (cmake and autotools)
+
+ Fedora17 3.5.2-1.fc17.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5)
+ GNU Fortran (GCC) 4.7.0 20120507 (Red Hat 4.7.0-5)
+ (cmake and autotools)
+
+ SUSE 12.2 3.4.6-2.10-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux
+ gcc (SUSE Linux) 4.7.1
+ GNU Fortran (SUSE Linux) 4.7.1
+ (cmake and autotools)
+
+ SUSE 12.2 3.4.6-2.10-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.7.1
+ GNU Fortran (SUSE Linux) 4.7.1
+ (cmake and autotools)
+
+ Ubuntu 12.04 3.2.0-29-generic #46-Ubuntu SMP i686 GNU/Linux
+ gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
+ GNU Fortran (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
+ (cmake and autotools)
+
+ Ubuntu 12.04 3.2.0-29-generic #46-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
+ GNU Fortran (Ubuntu/Linaro 4.6.3-1ubuntu5) 4.6.3
+ (cmake and autotools)
+ (Use optimization level -O1)
+
+ Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46
+ hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+
+
+Known Problems
+==============
+* The following h5stat test case fails in BG/P machines (and potentially other
+ machines that display extra output if an MPI task returns with a non-zero
+ code.)
+ Testing h5stat notexist.h5
+
+ The test actually runs and passes as expected. It is the extra output from
+ the MPI process that causes the test script to fail. This will be fixed
+ in the next release. (AKC - 2012/10/25 - HDFFV-8233)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set
+ to aprun -np X, because the H5lib_settings.c file was not generated
+ properly. Not setting those environment variables works, because
+ configure was able to automatically detect that it's a Cray system
+ and used the proper launch commands when necessary.
+ (MSC - 2012/04/18)
+
+* The data conversion test dt_arith.c fails in "long double" to integer
+ conversion on Ubuntu 11.10 (3.0.0.13 kernal) with GCC 4.6.1 if the library
+ is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernal
+ (3.2.2 on Fedora) doesn't have the problem. Users should lower the
+ optimization level (-O1 or -O0) by defining CFLAGS in the command line of
+ "configure" like:
+
+ CFLAGS=-O1 ./configure
+
+ This will overwrite the library's default optimization level.
+ (SLU - 2012/02/07 - HDFFV-7829)
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14 - HDFFV-8235)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD.
+ (QAK - 2011/04/26)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however.
+ (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2.
+ (AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* The library's test dt_arith.c showed a compiler's rounding problem on
+ Cygwin when converting from unsigned long long to long double. The
+ library's own conversion works fine. We defined a macro for Cygwin to
+ skip this test until we can solve the problem.
+ (SLU - 2010/05/05 - HDFFV-1264)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO.
+ (CMC - 2009/04/28)
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+
+%%%%1.8.9%%%%
+
+
+HDF5 version 1.8.9 released on 2012-05-09
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.8 and
+HDF5 1.8.9. It also contains information on the platforms tested and
+known problems in HDF5-1.8.9.
+
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.9 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.9 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.9 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.9 (current
+release) versus Release 1.8.8":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.8
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - None
+
+ Library
+ -------
+ - Added new feature to merge committed datatypes when copying objects,
+ using new H5O_COPY_MERGE_COMMITTED_DTYPE_FLAG, modified by new API
+ routines: H5Padd_merge_committed_dtype_path(),
+ H5Pfree_merge_committed_dtype_paths(), H5Pset_mcdt_search_cb() and
+ H5Pget_mcdt_search_cb(). (QAK - 2012/03/30)
+ - Added new feature which allows working with files in memory in the
+ same ways files are worked with on disk. New API routines include
+ H5Pset_file_image, H5Pget_file_image, H5Pset_file_image_callbacks,
+ H5Pget_file_image_callbacks, H5Fget_file_image, and
+ H5LTopen_file_image. (QAK - 2012/04/17)
+
+ Parallel Library
+ ----------------
+ - Corrected memory allocation error in MPI datatype construction code.
+ (QAK - 2012/04/23)
+ - Add two new routines to set/get the atomicity parameter in the
+ MPI library to perform atomic operations. Some file systems (for
+ example PVFS2) do not support atomic updates, so those routines
+ would not be supported. (MSC - 2012/03/27 - HDFFV-7961)
+
+ Tools
+ -----
+ - h5repack: Added ability to set the metadata block size of the output
+ file, with the '-M'/'--metadata_block_size' command line parameter.
+ (QAK - 2012/03/30)
+ - h5stat: Added ability to display a summary of the file space usage for a
+ file, with the '-S'/'--summary' command line parameter. (QAK - 2012/03/28)
+ - h5dump: Added capability for "-a" option to show attributes containing "/"
+ by using an escape character. For example, for a dataset "/dset"
+ containing attribute "speed(m/h)", use "h5dump -a "/dset/speed(\/h)"
+ to show the content of the attribute. (PC - 2012/03/12 - HDFFV-7523)
+ - h5dump: Added ability to apply command options across multiple files using a
+ wildcard in the filename. Unix example; "h5dump -H -d Dataset1 tarr*.h5".
+ Cross platform example; "h5dump -H -d Dataset1 tarray1.h5 tarray2.h5 tarray3.h5".
+ (ADB - 2012/03/12 - HDFFV-7876).
+ - h5dump: Added new option --no-compact-subset. This option will not
+ interpret the '[' character as starting the compact form of
+ subsetting. This is useful when the "h5dump error: unable to
+ open dataset "datset_name"" message is output because a dataset
+ name contains a '[' character. (ADB - 2012/03/05 - HDFFV-7689).
+ - h5repack: Improved performance for big chunked datasets (size > 128MB)
+ when used with the layout (-l) or compression (-f) options.
+ Before this change, repacking datasets with chunks with a large first
+ dimension would take extremely long. For example, repacking a dataset
+ with chunk dimensions of 1024x5x1 might take many hours to process
+ while changing a dataset with chunk dimensions set to 1x5x1024
+ might take under an hour. After this change, processing the dataset
+ with chunk dimensions of 1024x5x1 takes about 15 minutes, and processing
+ a dataset with chunk dimensions of 1x5x1024 takes about 14 minutes.
+ (JKM - 2012/03/01 - HDFFV-7862)
+
+ High-Level APIs
+ ---------------
+ - New API: H5LTpath_valid (Fortran: h5ltpath_valid_f) checks
+ if a path is correct, determines if a link resolves to a valid
+ object, and checks that the link does not dangle. (MSB - 2012/03/15)
+
+ Fortran API
+ -----------
+
+ - Added for the C API the Fortran wrapper:
+ h5ocopy_f (MSB - 2012/03/22)
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - None
+
+Bug Fixes since HDF5-1.8.8
+==========================
+
+ Configuration
+ -------------
+ - Fixed Makefile issue in which "-Wl," was not properly specified
+ prior to -rpath when building parallel Fortran libraries with
+ an Intel compiler. (MAM - 2012/03/26)
+ - Makefiles generated by other packages using h5cc as the compiler
+ no longer error when 'make' is invoked more than once in order
+ to 'rebuild' after changes to source. (MAM - 2012/03/26)
+ - Added code to display the version information of XL Fortran and C++
+ in the summary of configure. (AKC - 2012/02/28 - HDFFV-7793)
+ - Updated all CMakeLists.txt files to indicate the minimum CMake version is
+ the current standard of 2.8.6 (ADB - 2011/12/05 - HDFFV-7854)
+
+ Library
+ -------
+ - Windows and STDIO correctness changes have been propagated from the SEC2
+ and old Windows drivers to the STDIO VFD. (DER - 2012/03/30 - HDFFV-7917)
+ - Fixed an error that would occur when copying an object with attribute
+ creation order tracked and indexed. (NAF - 2012/03/28 - HDFFV-7762)
+ - Fixed a bug in H5Ocopy(): When copying an opened object, call the
+ object's flush class action to ensure that cached data is flushed so
+ that H5Ocopy will get the correct data. (VC - 2012/03/27 - HDFFV-7853)
+ - The istore test will now skip the sparse 50x50x50 test when the VFD does
+ not support sparse files on that platform. The most important platforms
+ on which this will be skipped are Windows (NTFS sparse files are not
+ supported) and Mac OS-X (HFS sparse files are not supported). This
+ fixes CTest timeout issues on Windows. (DER - 2012/03/27 - HDFFV-7769)
+ - Windows and POSIX correctness changes have been propagated from the SEC2
+ VFD to the Core VFD. This mainly affects file operations on the
+ driver's backing store and fixes a problem on Windows where large files
+ could not be read. (DER - 2012/03/27 - HDFFV-7916 - HDFFV-7603)
+ - When an application tries to write or read many small data chunks and
+ runs out of memory, the library had a segmentation fault. The fix is to
+ return the error stack with proper information.
+ (SLU - 2012/03/23 - HDFFV-7785)
+ - H5Pset_data_transform had a segmentation fault in some cases like x*-100.
+ It works correctly now and handles other cases like 100-x or 2/x.
+ (SLU - 2012/03/15 - HDFFV-7922)
+ - Fixed rare corruption bugs that could occur when using the new object
+ header format. (NAF - 2012/03/15 - HDFFV-7879)
+ - Fixed an error that occurred when creating a contiguous dataset with a
+ zero-sized dataspace and space allocation time set to 'early'.
+ (QAK - 2012/03/12)
+ - Changed Windows thread creation to use _beginthread() instead of
+ CreateThread(). Threads created by the latter can be killed in
+ low-memory situations. (DER - 2012/02/10 - HDFFV-7780)
+ - Creating a dataset in a read-only file caused a segmentation fault when
+ the file is closed. It's fixed. The attempt to create a dataset will
+ fail with an error indicating the file is read-only.
+ (SLU - 2012/01/25 - HDFFV-7756)
+ - Fixed a segmentation fault that could occur when shrinking a dataset
+ with chunks larger than 1 MB. (NAF - 2011/11/30 - HDFFV-7833)
+ - Fixed a bug that could cause H5Oget_info to return the wrong address
+ after copying a committed (named) datatype. (NAF - 2011/11/14)
+ - The library allowed the conversion of strings between ASCII and UTF8
+ We have corrected it to report an error under this situation.
+ (SLU - 2011/11/8 - HDFFV-7582)
+ - Fixed a segmentation fault when the library tried to shrink the size
+ of a compound datatype through H5Tset_size immediately after the
+ datatype was created. (SLU - 2011/11/4 - HDFFV-7618)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5unjam: Fixed a segmentation fault that occurred when h5unjam was used
+ with the -V (show version) option. (JKM - 2012/04/19 - HDFFV-8001)
+ - h5repack: Fixed a failure that occurred when repacking the chunk size
+ of a specified chunked dataset with unlimited max dims.
+ (JKM - 2012/04/11 - HDFFV-7993)
+ - h5diff: Fixed a failure when comparing groups. Before the fix, if an
+ object in a group was compared with an object in another group where
+ both had the same name but the object type was different, then h5diff
+ would fail. After the fix, h5diff detects such cases as non-comparable
+ and displays appropriate error messages.
+ (JKM - 2012/03/28 - HDFFV-7644)
+ - h5diff: If unique objects exist only in one file and if h5diff is set to
+ exclude the unique objects with the --exclude-path option, then h5diff
+ might miss excluding some objects. This was fixed to correctly exclude
+ objects. (JKM - 2012/03/20 - HDFFV-7837)
+ - h5diff: When two symbolic dangling links are compared with the
+ --follow-symlinks option, the result should be the same. This worked when
+ comparing two files, but didn't work when comparing two objects.
+ h5diff now works when comparing two objects.
+ (JKM - 2012/03/09 - HDFFV-7835)
+ - h5dump: Added the tools library error stack to properly catch error
+ information generated within the library. (ADB - 2012/03/12 - HDFFV-7958)
+ - h5dump: Changed the process where an open link used to fail. Now dangling
+ links no longer throw error messages. (ADB - 2012/03/12 - HDFFV-7839)
+ - h5dump: Refactored code to remove duplicated functions. Split XML
+ functions from DDL functions. Corrected indentation and formatting
+ errors. Also fixed subsetting counting overflow (HDFFV-5874). Verified
+ all tools call tools_init() in main. The USER_BLOCK data now correctly
+ displays within the SUPER_BLOCK info. NOTE: WHITESPACE IN THE OUTPUT
+ HAS CHANGED. (ADB - 2012/02/17 - HDFFV-7560)
+ - h5diff: Fixed to prevent from displaying error stack message when
+ comparing two dangling symbolic links with the follow-symlinks option.
+ (JKM - 2012/01/13 - HDFFV-7836)
+ - h5repack: Fixed a memory leak that occurred with the handling of
+ variable length strings in attributes.
+ (JKM - 2012/01/10 - HDFFV-7840)
+ - h5ls: Fixed a segmentation fault that occurred when accessing region
+ reference data in an attribute. (JKM - 2012/01/06 - HDFFV-7838)
+
+ F90 API
+ -------
+ - None
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+ - None
+
+ Fortran High-Level APIs:
+ ------
+ - h5ltget_attribute_string_f: The h5ltget_attribute_string_f used to return
+ the C NULL character in the returned character buffer. The returned
+ charactor buffer now does not return the C NULL character; the buffer
+ is blank-padded if needed. (MSB - 2012/03/23)
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ AIX 5.3 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+
+ FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719
+ (loyalty) g++ 4.2.1 [FreeBSD] 20070719
+ gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719
+ (freedom) g++ 4.2.1 [FreeBSD] 20070719
+ gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ Linux 2.6.18-194.3.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-52)
+ Version 4.5.2
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 11.8-0
+ Version 11.9-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 12.0
+ Version 12.1
+ MPICH mpich2-1.3.1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.18-308.1.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 32-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-52)
+ Version 4.5.2
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 11.9-0 (64-bit)
+ Version 11.8-0 (32-bit)
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 12.0
+ Version 12.1
+ MPICH mpich2-1.3.1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.32-220.7.1.el6.ppc64 gcc (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.6 20110731
+ (ostrich) GNU Fortran (GCC) 4.4.6 20110731 (Red Hat 4.4.6-3)
+
+ Linux 2.6.18-108chaos Intel C, C++, Fortran Compilers Version 11.1
+ #1 SMP x86_64 GNU/Linux
+ (LLNL Aztec)
+
+ IBM Blue Gene/P XL C for Blue Gene/P, bgxlc V9.0
+ (LLNL uDawn) XL C++ for Blue Gene/P, bgxlC V9.0
+ XL Fortran for Blue Gene/P, bgxlf0 V11.1
+
+ SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16
+ (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13
+ Sun C++ 5.9 Sun OS_sparc Patch 124863-26
+ Sun C 5.11 SunOS_sparc
+ Sun Fortran 95 8.5 SunOS_sparc
+ Sun C++ 5.11 SunOS_sparc
+
+ SGI Altix UV Intel(R) C, Fortran Compilers
+ SGI ProPack 7 Linux Version 11.1 20100806
+ 2.6.32.24-0.2.1.2230.2.PTF- SGI MPT 2.02
+ default #1 SMP
+ (NCSA ember)
+
+ Dell NVIDIA Cluster Intel(R) C, Fortran Compilers
+ Red Hat Enterprise Linux 6 Version 12.0.4 20110427
+ 2.6.32-131.4.1.el6.x86_64 mvapich2 1.7rc1-intel-12.0.4
+ (NCSA forge)
+
+ Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+
+ Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Cygwin(1.7.9 native gcc(4.5.3) compiler and gfortran)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 12 (cmake)
+ Cygwin(1.7.9 native gcc(4.5.3) compiler and gfortran)
+
+ Mac OS X Snow Leopard 10.6.8 i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (gcc)
+ Darwin Kernel Version 10.8.0 i686-apple-darwin10-g++-4.2.1 (GCC) 4.2.1 (g++)
+ Intel 64-bit (Apple Inc. build 5666) (dot 3)
+ (fred) GNU Fortan (GCC) 4.6.1 (gfortran)
+ Intel C (icc), Fortran (ifort), C++ (icpc)
+ 12.1.0.038 Build 20110811
+
+ Mac OS X Snow Leopard 10.6.8 i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (gcc)
+ Darwin Kernel Version 10.8.0 i686-apple-darwin10-g++-4.2.1 (GCC) 4.2.1 (g++)
+ Intel 32-bit (Apple Inc. build 5666) (dot 3)
+ (tejeda) GNU Fortran (GCC) 4.6.1 (gfortran)
+ Intel C (icc), Fortran (ifort), C++ (icpc)
+ 12.1.0.038 Build 20110811
+
+ Mac OS X Lion 10.7.3 GCC 4.2.1 gcc
+ 32- and 64-bit GNU Fortran (GCC) 4.6.1 gfortran
+ (duck) GCC 4.2.1. g++
+
+ Debian6.0.3 2.6.32-5-686 #1 SMP i686 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+
+ Debian6.0.3 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+
+ Fedora16 3.2.9-2.fc16.i6866 #1 SMP i686 i686 i386 GNU/Linux
+ gcc (GCC) 4.6.2 20111027 (Red Hat 4.6.2-1)
+ GNU Fortran (GCC) 4.6.2 20111027 (Red Hat 4.6.2-1)
+
+ Fedora16 3.2.9-2.fc16.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.6.2 20111027 (Red Hat 4.6.2-1)
+ GNU Fortran (GCC) 4.6.2 20111027 (Red Hat 4.6.2-1)
+
+ SUSE 12.1 3.1.9-1.4-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux
+ gcc (SUSE Linux) 4.6.2
+ GNU Fortran (SUSE Linux) 4.6.2
+
+ SUSE 12.1 3.1.9-1.4-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.6.2
+ GNU Fortran (SUSE Linux) 4.6.2
+
+ Ubuntu 11.10 3.0.0-16-generic #29-Ubuntu SMP i686 GNU/Linux
+ gcc (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1
+ GNU Fortran (Ubuntu/Linaro 4.6.4-9ubuntu3) 4.6.1
+
+ Ubuntu 11.10 3.0.0-16-generic #29-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1
+ GNU Fortran (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1
+
+ Cray Linux Environment (CLE) PrgEnv-pgi 2.2.74
+ hopper.nersc.gov pgcc 11.9-0 64-bit target on x86-64 Linux -tp k8e
+ pgf90 11.9-0 64-bit target on x86-64 Linux -tp k8e
+ pgCC 11.9-0 64-bit target on x86-64 Linux -tp k8e
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+Solaris2.10 32-bit n y n y y y
+Solaris2.10 64-bit n y n y y y
+Windows XP n y(4) n y y y
+Windows XP x64 n y(4) n y y y
+Windows Vista n y(4) n y y y
+Windows Vista x64 n y(4) n y y y
+Mac OS X Snow Leopard 10.6.8 32-bit n y n y y n
+Mac OS X Snow Leopard 10.6.8 64-bit n y n y y y
+Mac OS X Lion 10.7.3 32-bit n y n y y n
+Mac OS X Lion 10.7.3 64-bit n y n y y y
+AIX 5.3 32- and 64-bit y y y y y y
+FreeBSD 8.2-STABLE 32&64 bit n x n x y y
+CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y y y
+CentOS 5.5 Linux 2.6.18-194 i686 Intel W n y n y y y
+CentOS 5.5 Linux 2.6.18-194 i686 PGI W n y n y y y
+CentOS 5.5 Linux 2.6.18 x86_64 GNU (1) W y y(3) y y y y
+CentOS 5.5 Linux 2.6.18 x86_64 Intel W n y n y y y
+CentOS 5.5 Linux 2.6.18 x86_64 PGI W n y n y y y
+Linux 2.6.32-220.7.1.el6.ppc64 n y n y y y
+SGI ProPack 7 Linux 2.6.32.24 y y y y y y
+Red Hat Enterprise Linux 6 y y y y y y
+CLE hopper.nersc.gov y y(3) y y y n
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit n n n n
+Windows XP y y(4) y n
+Windows XP x64 y y(4) y n
+Windows Vista y y(4) y y
+Windows Vista x64 y y(4) y y
+Mac OS X Snow Leopard 10.6.8 32-bit y n y n
+Mac OS X Snow Leopard 10.6.8 64-bit y n y n
+Mac OS X Lion 10.7.3 32-bit y n y y
+Mac OS X Lion 10.7.3 64-bit y n y y
+AIX 5.3 32- and 64-bit n n n y
+FreeBSD 8.2-STABLE 32&64 bit y x x y
+CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y
+CentOS 5.5 Linux 2.6.18-194 i686 Intel W y y y n
+CentOS 5.5 Linux 2.6.18-194 i686 PGI W y y y n
+CentOS 5.5 Linux 2.6.18 x86_64 GNU (1) W y y y y
+CentOS 5.5 Linux 2.6.18 x86_64 Intel W y y y n
+CentOS 5.5 Linux 2.6.18 x86_64 PGI W y y y n
+Linux 2.6.32-220.7.1.el6.ppc64 y y y n
+SGI ProPack 7 Linux 2.6.32.24 y y y n
+Red Hat Enterprise Linux 6 y y y n
+CLE hopper.nersc.gov n n n n
+
+ (1) Fortran compiled with gfortran.
+ (2) With PGI and Absoft compilers.
+ (3) With PGI compiler for Fortran.
+ (4) Using Visual Studio 2008 w/ Intel Fortran 10.1 (Cygwin shared libraries are not supported)
+ (5) C and C++ shared libraries will not be built when Fortran is enabled.
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* The h5repacktst test fails on AIX 32-bit because the test uses more
+ memory than the default amount. The failure message typically looks like:
+
+ "time: 0551-010 The process was stopped abnormally. Try again."
+
+ This is an issue with the test only and does not represent a problem with
+ the library. To allow the test to pass, request more memory when testing
+ via appropriate command such as:
+
+ $ env LDR_CNRTL=MAXDATA=0x20000000@DSA make check
+
+ (AKC - 2012/05/09 - HDFFV-8016)
+
+* The file_image test will fail in the "initial file image and callbacks in
+ the core VFD" sub-test if the source directory is read-only as the test
+ fails to create its test files in the build directory. This will be
+ resolved in a future release.
+ (AKC - 2012/05/05 - HDFFV-8009)
+
+* The dt_arith test reports several errors involving "long double" on
+ Mac OS X 10.7 Lion when any level of optimization is enabled. The test does
+ not fail in debug mode. This will be addressed in a future release.
+ (SLU - 2012/05/08)
+
+* The following h5dump test case fails in BG/P machines (and potentially other
+ machines that use a command script to launch executables):
+
+ h5dump --no-compact-subset -d "AHFINDERDIRECT::ah_centroid_t[0] it=0 tl=0"
+ tno-subset.h5
+
+ This is due to the embedded spaces in the dataset name being interpreted
+ by the command script launcher as meta-characters, thus passing three
+ arguments to h5dump's -d flag. The command passes if run by hand, just
+ not via the test script.
+ (AKC - 2012/05/03)
+
+* The ph5diff (parallel h5diff) tool can intermittently hang in parallel mode
+ when comparing two HDF5 files that contain objects with the same names but
+ with different object types.
+ (JKM - 2012/04/27)
+
+* On hopper, the build failed when RUNSERIAL and RUNPARALLEL are set
+ to aprun -np X, because the H5lib_settings.c file was not generated
+ properly. Not setting those environment variables works, because
+ configure was able to automatically detect that it's a Cray system
+ and used the proper launch commands when necessary.
+ (MSC - 2012/04/18)
+
+* The data conversion test dt_arith.c fails in "long double" to integer
+ conversion on Ubuntu 11.10 (3.0.0.13 kernal) with GCC 4.6.1 if the library
+ is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernal
+ (3.2.2 on Fedora) doesn't have the problem. Users should lower the
+ optimization level (-O1 or -O0) by defining CFLAGS in the command line of
+ "configure" like:
+
+ CFLAGS=-O1 ./configure
+
+ This will overwrite the library's default optimization level.
+ (SLU - 2012/02/07 - HDFFV-7829)
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release.
+ (DER - 2011/10/14)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in a future release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value.
+ (AKC - 2011/05/07)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD.
+ (QAK - 2011/04/26)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however.
+ (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2.
+ (AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* The library's test dt_arith.c showed a compiler's rounding problem on
+ Cygwin when converting from unsigned long long to long double. The
+ library's own conversion works fine. We defined a macro for Cygwin to
+ skip this test until we can solve the problem.
+ (SLU - 2010/05/05 - HDFFV-1264)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* MinGW has a missing libstdc++.dll.a library file and will not successfully link
+ C++ applications/tests. Do not use the enable-cxx configure option. Read all of
+ the INSTALL_MINGW.txt file for all restrictions.
+ (ADB - 2009/11/11)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* The PathScale MPI implementation, accessing a Panasas file system, would
+ cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file does not
+ exist. This is due to the MPI_File_open() call failing if the mode has
+ the MPI_MODE_EXCL bit set.
+ (AKC - 2009/08/11 - HDFFV-988)
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO.
+ (CMC - 2009/04/28)
+
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and
+ tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests
+ are expected to fail and should exit with a non-zero code but the yod
+ command does not propagate the exit code of the executables. Yod always
+ returns 0 if it can launch the executable. The test suite shell expects
+ a non-zero for this particular test, therefore it concludes the test has
+ failed when it receives 0 from yod. Skip all the "failing" test for now
+ by changing them as following.
+
+ ======== Original tools/h5ls/testh5ls.sh =========
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Change to ===============================
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ==================================================
+
+ ======== Original tools/h5copy/testh5copy.sh =========
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ H5LSTEST $FILEOUT
+ ======== Change to ===============================
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ echo SKIP H5LSTEST $FILEOUT
+ ==================================================
+ (AKC - 2008/11/10)
+
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message, "yod allocation delayed for node recovery". This interferes with
+ test suites that do not expect to see this message. See the section of "Red
+ Storm" in file INSTALL_parallel for a way to deal with this problem.
+ (AKC - 2008/05/28)
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use",
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+ (AKC - 2004/12/08)
+
+
+%%%%1.8.8%%%%
+
+
+HDF5 version 1.8.8 released on 2011-11-15
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.7 and
+HDF5 1.8.8, and contains information on the platforms tested and
+known problems in HDF5-1.8.8.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.8 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.8 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.8 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.8 (current
+release) versus Release 1.8.7":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.7
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Added the --enable-fortran2003 flag to enable Fortran2003 support
+ in the HDF5 Fortran library. The flag should be used along with the
+ --enable-fortran flag and takes affect only when the Fortran compiler
+ is Fortran2003 compliant. (EIP - 2011/11/14)
+ - Added checks for clock_gettime and mach/mach_time.h to both configure and
+ CMake. This will support the move from gettimeofday to clock_gettime's
+ monotonic timer in the profiling code in a future release.
+ (DER - 2011/10/12)
+
+ Library
+ -------
+ - The Windows VFD code has been removed with the exception of the functions
+ which set it (H5Pset_fapl_windows, for example). Setting the Windows
+ VFD now really sets the SEC2 VFD. The WINDOWS_MAX_BUF and
+ WINDOWS_USE_STDIO configuration options and #defines have also been
+ removed. NOTE: Since the Windows VFD was a clone of the SEC2 VFD, this
+ change should be transparent to users.
+ (DER - 2011/10/12 - HDFFV-7740, HDFFV-7744)
+ - H5Tcreate now supports the string type (fixed-length and variable-
+ length). (SLU - 2011/05/20)
+
+ Parallel Library
+ ----------------
+ - Added new H5Pget_mpio_actual_chunk_opt_mode and
+ H5Pget_mpio_actual_io_mode API routines for querying whether/how
+ a collective I/O operation completed. (QAK - 2011/10/12)
+
+ Tools
+ -----
+ - None
+
+ High-Level APIs
+ ---------------
+ - Added the following Fortran wrappers for the Dimension Scale APIs:
+ h5dsset_scale_f
+ h5dsattach_scale_f
+ h5dsdetach_scale_f
+ h5dsis_attached_f
+ h5dsis_scale_f
+ h5dsset_label_f
+ h5dsget_label_f
+ h5dsget_scale_name_f
+ h5dsget_num_scales_f
+ (EIP for SB - 2011/10/13 - HDFFV-3797)
+
+ Fortran API
+ -----------
+ - The HDF5 Fortran library was enhanced to support the Fortran 2003 standard.
+ The following features are available when the HDF5 library is configured
+ using the --enable-fortran and --enable-fortran2003 configure flags AND
+ if the Fortran compiler is Fortran 2003 compliant:
+
+ - Subroutines overloaded with the C_PTR derived type:
+ h5pget_f
+ h5pget_fill_value_f
+ h5pinsert_f
+ h5pregister_f
+ h5pset_f
+ h5pset_fill_value_f
+ h5rcreate_f
+ h5rderefrence_f
+ h5rget_name_f
+ h5rget_obj_type_f
+ - Subroutines overloaded with the C_PTR derived type
+ and simplified signatures:
+ h5aread_f
+ h5awrite_f
+ h5dread_f
+ h5dwrite_f
+ - New subroutines
+ h5dvlen_reclaim_f
+ h5literate_by_name_f
+ h5literate_f
+ h5ovisit_f
+ h5tconvert_f
+ h5pset_nbit_f
+ h5pset_scaleoffset_f
+ - Subroutines with additional optional parameters:
+ h5pcreate_class_f
+ (EIP - 2011/10/14)
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - None
+
+Bug Fixes since HDF5-1.8.7
+==========================
+
+ Configuration
+ -------------
+ - Changed the size of H5_SIZEOF_OFF_T to 4 bytes (was 8) in the VMS
+ h5pubconf.h based on the output of a test program. (DER - 2011/10/12)
+ - The Windows and VMS versions of H5pubconf.h were brought into sync with
+ the linux/posix version. (DER - 2011/10/12)
+ - Fixed a bug in the bin/trace Perl script where API functions
+ that take a variable number of arguments were not processed for
+ trace statement fixup. (DER - 2011/08/25)
+ - The --enable-h5dump-packed-bits configure option has been removed.
+ The h5dump code that this option conditionally enabled is now always
+ compiled into h5dump. Please refer to the h5dump reference manual for
+ usage of the packed bits feature. (MAM - 2011/06/23 - HDFFV-7592)
+ - Configure now uses the same flags and symbols in its tests that are
+ used to build the library. (DER - 2011/05/24)
+
+ Library
+ -------
+ - Corrected the error when copying attributes between files which are using
+ different versions of the file format. (QAK - 2011/10/20 - HDFFV-7718)
+ - Corrected the error when loading local heaps from the file, which could
+ cause the size of the local heap's data block to increase dramatically.
+ (QAK - 2011/10/14 - HDFFV-7767)
+ - An application does not need to do H5O_move_msgs_forward() when writing
+ attributes. Tests were checked into the performance suite.
+ (VC - 2011/10/13 - HDFFV-7640)
+ - Fixed a bug that occurred when using H5Ocopy on a committed datatype
+ containing an attribute using that committed datatype.
+ (NAF - 2011/10/13 - HDFFV-5854)
+ - Added generic VFD I/O types to the SEC2 and log VFDs to ensure correct
+ I/O sizes (and remove compiler warnings) between Windows and true POSIX
+ systems. (DER - 2011/10/12)
+ - Corrected some Windows behavior in the SEC2 and log VFDs. This mainly
+ involved datatype correctness fixes, Windows API call error checks,
+ and adding the volume serial number to the VFD cmp functions.
+ (DER - 2011/10/12)
+ - Converted post-checks for the appropriate POSIX I/O sizes to pre-checks
+ in order to avoid platform-specific or undefined behavior.
+ (DER - 2011/10/12)
+ - #ifdef _WIN32 instances have been changed to #ifdef H5_HAVE_WIN32_API.
+ H5_HAVE_VISUAL_STUDIO checks have been added where necessary. This is in
+ CMake only as configure never sets _WIN32. (ADB - 2011/09/12)
+ - CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ discovered 3 problems in tests and tools' library:
+ 1. In dsets.c, left shifting an unsigned int for 32 bits or more
+ caused undefined behavior.
+ 2. In dt_arith.c, the INIT_INTEGER macro definition has an overflow
+ when the value is a negative minimal and is being subtracted from one.
+ 3. In tools/lib/h5tools_str.c, right shifting an int value for 32 bits
+ or more caused undefined behavior.
+ All the problems have been corrected. (SLU - 2011/09/02 - HDFFV-7674)
+ - H5Epush2() now has the correct trace functionality (this is related to the
+ bin/trace Perl script bug noted in the configure section).
+ (DER - 2011/08/25)
+ - Corrected mismatched function name typo of h5pget_dxpl_mpio_c and
+ h5pfill_value_defined_c. (AKC - 2011/08/22 - HDFFV-7641)
+ - Corrected an internal error in the library where objects that use committed
+ (named) datatypes and were accessed from two different file IDs could confuse
+ the two and cause erroneous failures. (QAK - 2011/07/18 - HDFFV-7638)
+ - In v1.6 of the library, there was an EOA for the whole MULTI file saved in the
+ super block. We took it out in v1.8 of the library because it's meaningless
+ for the MULTI file. v1.8 of the library saves the EOA for the metadata file
+ instead, but this caused a backward compatibility problem.
+ A v1.8 library couldn't open the file created with the v1.6 library. We
+ fixed the problem by checking the EOA value to detect the file
+ created with v1.6 library. (SLU - 2011/06/22)
+ - When a dataset had filters and reading data failed, the error message
+ didn't say which filter wasn't registered. It's fixed now. (SLU - 2011/06/03)
+
+ Parallel Library
+ ----------------
+ - The Special Collective IO (IO when some processes do not contribute to the
+ IO) and the Complex Derived Datatype MPI functionalities are no longer
+ conditionally enabled in the library by configure. They are always
+ enabled in order to take advantage of performance boosts from these
+ behaviors. Older MPI implementations that do not allow for these
+ functionalities can no longer by used by HDF5.
+ (MAM - 2011/07/08 - HDFFV-7639).
+
+ Tools
+ -----
+ - h5diff: fixed segfault over non-comparable attribute with different
+ dimention or rank, along with '-c' option to display details.
+ (JKM - 2011/10/24 - HDFFV-7770)
+ - Fixed h5diff to display all the comparable objects and attributes
+ regardless of detecting non-comparables. (JKM - 2011/09/16 - HDFFV-7693)
+ - Fixed h5repack to update the values of references(object and region) of
+ attributes in h5repack for 1) references, 2) arrays of references,
+ 3) variable-length references, and 4) compound references.
+ (PC - 2011/09/14 - HDFFV-5932)
+ - h5diff: fixed a segfault over a dataset with container types
+ array and variable-length (vlen) along with multiple nested compound types.
+ Example: compound->array->compound, compound->vlen->compound.
+ (JKM - 2011/09/01 - HDFFV-7712)
+ - h5repack: added macro to handle a failure in H5Dread/write when memory
+ allocation failed inside the library. (PC - 2011/08/19)
+ - Fixed h5jam to not to allow the specifying of an HDF5 formatted file as
+ an input file for the -u (user block file) option. The original HDF5 file
+ would not be accessible if this behavior was allowed.
+ (JKM - 2011/08/19 - HDFFV-5941)
+ - Revised the command help pages of h5jam and h5unjam. The descriptions
+ were not up to date and some were missing.
+ (JKM - 2011/08/15 - HDFFV-7515)
+ - Fixed h5dump to correct the schema location:
+ <hdf5:HDF5-File
+ xmlns:hdf5="http://hdfgroup.org/HDF5/XML/schema/HDF5-File"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://hdfgroup.org/HDF5/XML/schema/HDF5-File
+ http://www.hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd">
+ (ADB - 2011/08/10)
+ - h5repack: h5repack failed to copy a dataset if the layout is changed
+ from chunked with unlimited dimensions to contiguous.
+ (PC - 2011/07/15 - HDFFV-7649)
+ - Fixed h5diff: the "--delta" option considers two NaN of the same type
+ are different. This is wrong based on the h5diff description in the
+ Reference Manual. (PC - 2011/07/15 - HDFFV-7656)
+ - Fixed h5diff to display an instructive error message and exit with
+ an instructive error message when mutually exclusive options
+ (-d, -p and --use-system-epsilon) are used together.
+ (JKM - 2011/07/07 - HDFFV-7600)
+ - Fixed h5dump so that it displays the first line of each element in correct
+ position for multiple dimention array types. Before this fix,
+ the first line of each element in an array was
+ displayed after the last line of previous element without
+ moving to the next line (+indentation).
+ (JKM - 2011/06/15 - HDFFV-5878)
+ - Fixed h5dump so that it will display the correct value for
+ H5T_STD_I8LE datasets on the Blue-gene system (ppc64, linux, Big-Endian,
+ clustering). (AKC & JKM - 2011/05/12 - HDFFV-7594)
+ - Fixed h5diff to compare a file to itself correctly. Previously h5diff
+ reported either the files were different or not compatible in certain
+ cases even when comparing a file to itself. This fix also improves
+ performance when comparing the same target objects through verifying
+ the object and file addresses before comparing the details
+ in the objects. Examples of details are datasets and attributes.
+ (XCAO & JKM - 2011/05/06 - HDFFV-5928)
+
+ F90 API
+ -------
+ - Modified the h5open_f and h5close_f subroutines to not to call H5open
+ and H5close correspondingly. While the H5open call just adds overhead,
+ the H5close call called by a Fortran application shuts down the HDF5
+ library. This makes the library inaccessible to the application.
+ (EIP & SB - 2011/10/13 - HDFFV-915)
+ - Fixed h5tget_tag_f where the length of the C string was used to
+ repack the C string into the Fortran string. This lead to memory
+ corruption in the calling program. (SB - 2011/07/26)
+ - Added defined constants:
+ H5T_ORDER_MIXED_F (HDFFV-2767)
+ H5Z_SO_FLOAT_DSCALE_F
+ H5Z_SO_FLOAT_ESCALE_F
+ H5Z_SO_INT_F
+ H5Z_SO_INT_MINBITS_DEFAULT_F
+ H5O_TYPE_UNKNOWN_F
+ H5O_TYPE_GROUP_F
+ H5O_TYPE_DATASET_F
+ H5O_TYPE_NAMED_DATATYPE_F
+ H5O_TYPE_NTYPES_F
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+ - Fixed the H5LTdtype_to_text function. It had some memory problems when
+ dealing with some complicated data types. (SLU - 2011/10/19 - HDFFV-7701)
+ - Fixed H5DSset_label seg faulting when retrieving the length of a
+ dimension label that was not set. (SB - 2011/08/07 - HDFFV-7673)
+ - Fixed a dimension scale bug where if you create a dimscale, attach two
+ datasets to it, and then unattach them, you get an error if they are
+ unattached in order, but no error if you unattach them in reverse order.
+ (SB - 2011/06/07 - HDFFV-7605)
+
+ Fortran High-Level APIs:
+ ------
+ - None
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ AIX 5.3 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+
+ FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719
+ (loyalty) g++ 4.2.1 [FreeBSD] 20070719
+ gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719
+ (freedom) g++ 4.2.1 [FreeBSD] 20070719
+ gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ IBM Blue Gene/P bgxlc 9.0.0.9
+ (LLNL uDawn) bgxlf90 11.1.0.7
+ bgxlC 9.0.0.9
+
+ Linux 2.6.16.60-0.54.5-smp Intel(R) C, C++, Fortran Compilers
+ x86_64 Version 11.1 20090630
+ (INL Icestorm)
+
+ Linux 2.6.18-194.el5 x86_64 Intel(R) C, C++, Fortran Compilers
+ (INL Fission) Version 12.0.2 20110112
+
+ Linux 2.6.18-108chaos x86_64 Intel(R) C, C++, Fortran Compilers
+ (LLNL Aztec) Version 11.1 20090630
+
+ Linux 2.6.18-194.3.1.el5PAE gcc (GCC) 4.1.2 and 4.4.2
+ #1 SMP i686 i686 i386 GNU Fortran (GCC) 4.1.2 20080704
+ (jam) (Red Hat 4.1.2-48) and 4.4.2
+ PGI C, Fortran, C++ 10.4-0 32-bit
+ PGI C, Fortran, C++ 10.6-0 32-bit
+ Intel(R) C Compiler for 32-bit
+ applications, Version 11.1
+ Intel(R) C++ Compiler for 32-bit
+ applications, Version 11.1
+ Intel(R) Fortran Compiler for 32-bit
+ applications, Version 11.1
+ MPICH mpich2-1.3.1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.18-238.12.1.el5 gcc 4.1.2 and 4.4.2
+ #1 SMP x86_64 GNU/Linux GNU Fortran (GCC) 4.1.2 20080704
+ (koala) (Red Hat 4.1.2-46) and 4.4.2
+ tested for both 32- and 64-bit binaries
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64,
+ Version 11.1.
+ PGI C, Fortran, C++ Version 9.0-4
+ for 64-bit target on x86-64
+ MPICH mpich2-1.3.1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ SGI Altix UV Intel(R) C, Fortran Compilers
+ SGI ProPack 7 Linux Version 11.1 20100806
+ 2.6.32.24-0.2.1.2230.2.PTF- SGI MPT 2.02
+ default #1 SMP
+ (NCSA ember)
+
+ Dell NVIDIA Cluster Intel(R) C, Fortran Compilers
+ Red Hat Enterprise Linux 6 Version 12.0.4 20110427
+ 2.6.32-131.4.1.el6.x86_64 mvapich2 1.7rc1-intel-12.0.4
+ (NCSA forge)
+
+ SunOS 5.10 32- and 64-bit Sun C 5.11 SunOS_sparc 2010/08/13
+ Sun Fortran 95 8.5 SunOS_sparc 2010/08/13
+ Sun C++ 5.11 SunOS_sparc 2010/08/13
+
+ Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 (cmake)
+ Cygwin(1.7.9 native gcc(4.5.3) compiler and gfortran)
+
+ Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 (cmake)
+ Cygwin(1.7.9 native gcc(4.5.3) compiler and gfortran)
+
+ Windows Vista Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows Vista x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Mac OS X 10.8.0 (Intel 64-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (Apple Inc. build 5666) (dot 3)
+ Darwin Kernel Version 10.8.0 GNU Fortran (GCC) 4.6.1
+ Intel C, C++ and Fortran compilers 12.1.0
+
+ Mac OS X 10.8.0 (Intel 32-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (Apple Inc. build 5666) (dot 3)
+ Darwin Kernel Version 10.8.0 GNU Fortran (GCC) version 4.6.1
+ Intel C, C++ and Fortran compilers 12.1.0
+
+ Fedora 12 2.6.32.16-150.fc12.ppc64 #1 SMP ppc64 GNU/Linux
+ gcc (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10)
+ GNU Fortran (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10)
+
+ Debian6.0.3 2.6.32-5-686 #1 SMP i686 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+
+ Debian6.0.3 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+
+ Fedora15 2.6.40.6-0.fc15.i686.PAE #1 SMP i686 i686 i386 GNU/Linux
+ gcc (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9)
+ GNU Fortran (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9)
+
+ Fedora15 2.6.40.6-0.fc15.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9)
+ GNU Fortran (GCC) 4.6.1 20110908 (Red Hat 4.6.1-9)
+
+ SUSE 11.4 2.6.37.6-0.7-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux
+ gcc (SUSE Linux) 4.5.1 20101208
+ GNU Fortran (SUSE Linux) 4.5.1 20101208
+
+ SUSE 11.4 2.6.37.6-0.7-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.5.1 20101208
+ GNU Fortran (SUSE Linux) 4.5.1 20101208
+
+ Ubuntu 11.10 3.0.0-12-generic #20-Ubuntu SMP i686 GNU/Linux
+ gcc (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1
+ GNU Fortran (Ubuntu/Linaro 4.6.4-9ubuntu3) 4.6.1
+
+ Ubuntu 11.10 3.0.0-12-generic #20-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1
+ GNU Fortran (Ubuntu/Linaro 4.6.1-9ubuntu3) 4.6.1
+
+ OpenVMS Alpha 8.3 HP C V7.3-009
+ HP Fortran V8.2-104679-48H9K
+ HP C++ V7.3-009
+
+ Cray Linux Environment (CLE) PrgEnv-pgi 2.2.74
+ hopper.nersc.gov pgcc 11.7-0 64-bit target on x86-64 Linux -tp k8e
+ franklin.nersc.gov pgf90 11.7-0 64-bit target on x86-64 Linux -tp k8e
+ pgCC 11.7-0 64-bit target on x86-64 Linux -tp k8e
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+Solaris2.10 32-bit n y n y y y
+Solaris2.10 64-bit n y n y y y
+Windows XP n y(4) n y y y
+Windows XP x64 n y(4) n y y y
+Windows Vista n y(4) n y y y
+Windows Vista x64 n y(4) n y y y
+OpenVMS Alpha n y n y y n
+Mac OS X 10.8 Intel 32-bit n y n y y y
+Mac OS X 10.8 Intel 64-bit n y n y y y
+AIX 5.3 32- and 64-bit n y n y y y
+FreeBSD 8.2-STABLE 32&64 bit n x n x y y
+CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y y y
+CentOS 5.5 Linux 2.6.18-194 i686 Intel W n y n y y n
+CentOS 5.5 Linux 2.6.18-194 i686 PGI W n y n y y n
+CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y
+CentOS 5.5 Linux 2.6.16 x86_64 Intel W n y n y y n
+CentOS 5.5 Linux 2.6.16 x86_64 PGI W n y n y y y
+Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 n y n y y y
+SGI ProPack 7 Linux 2.6.32.24 y y y y y y
+Red Hat Enterprise Linux 6 y y y y y y
+CLE hopper.nersc.gov y y(3) y y y n
+CLE franklin.nersc.gov y y(3) y y y n
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit y y y y
+Windows XP y y(4) y n
+Windows XP x64 y y(4) y n
+Windows Vista y y(4) y y
+Windows Vista x64 y y(4) y y
+OpenVMS Alpha n n n n
+Mac OS X 10.8 Intel 32-bit y(5) n y n
+Mac OS X 10.8 Intel 64-bit y(5) n y n
+AIX 5.3 32- and 64-bit n n n y
+FreeBSD 8.2-STABLE 32&64 bit y x x y
+CentOS 5.5 Linux 2.6.18-128 i686 GNU (1)W y y(2) y y
+CentOS 5.5 Linux 2.6.18-128 i686 Intel W y y y n
+CentOS 5.5 Linux 2.6.18-128 i686 PGI W y y y n
+CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y y y
+CentOS 5.5 Linux 2.6.16 x86_64 Intel W y y y n
+CentOS 5.5 Linux 2.6.16 x86_64 PGI W y y y n
+Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 y y y y
+SGI ProPack 7 Linux 2.6.32.24 y y y n
+Red Hat Enterprise Linux 6 y y y n
+CLE hopper.nersc.gov n n n n
+CLE franklin.nersc.gov n n n n
+
+ (1) Fortran compiled with gfortran.
+ (2) With PGI and Absoft compilers.
+ (3) With PGI compiler for Fortran.
+ (4) Using Visual Studio 2008 w/ Intel Fortran 10.1 (Cygwin shared libraries are not supported)
+ (5) C and C++ shared libraries will not be built when Fortran is enabled.
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+
+* The STDIO VFD does not work on some architectures, possibly due to 32/64
+ bit or large file issues. The basic STDIO VFD test is known to fail on
+ 64-bit SunOS 5.10 on SPARC when built with -m64 and 32-bit OS X/Darwin
+ 10.7.0. The STDIO VFD test has been disabled while we investigate and
+ a fix should appear in a future release, possibly 1.8.9.
+ (DER - 2011/10/14)
+
+* h5diff can report inconsistent results when comparing datasets of enum type
+ that contain invalid values. This is due to how enum types are handled in
+ the library and will be addressed in the next release.
+ (DER - 2011/10/14 - HDFFV-7527)
+
+* The links test can fail under the stdio VFD due to some issues with external
+ links. This will be investigated and fixed in a future release.
+ (DER - 2011/10/14 - HDFFV-7768)
+
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 - HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value. (AKC - 2011/05/07)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD.
+ (QAK - 2011/04/26)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however. (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2. (AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+ (NAF - 2011/01/19)
+
+* The library's test dt_arith.c showed a compiler's rounding problem on
+ Cygwin when converting from unsigned long long to long double. The
+ library's own conversion works fine. We defined a macro for Cygwin to
+ skip this test until we can solve the problem.
+ (SLU - 2010/05/05 - HDFFV-1264)
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ (SLU - 2010/02/02)
+
+* MinGW has a missing libstdc++.dll.a library file and will not successfully link
+ C++ applications/tests. Do not use the enable-cxx configure option. Read all of
+ the INSTALL_MINGW.txt file for all restrictions. (ADB - 2009/11/11)
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+ (MAM - 2009/11/04)
+
+* The PathScale MPI implementation, accessing a Panasas file system, would
+ cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file does not
+ exist. This is due to the MPI_File_open() call failing if the mode has
+ the MPI_MODE_EXCL bit set. (AKC - 2009/08/11 - HDFFV-988)
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO. (CMC - 2009/04/28)
+
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and
+ tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests
+ are expected to fail and should exit with a non-zero code but the yod
+ command does not propagate the exit code of the executables. Yod always
+ returns 0 if it can launch the executable. The test suite shell expects
+ a non-zero for this particular test, therefore it concludes the test has
+ failed when it receives 0 from yod. Skip all the "failing" test for now
+ by changing them as following.
+
+ ======== Original tools/h5ls/testh5ls.sh =========
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Change to ===============================
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ==================================================
+
+ ======== Original tools/h5copy/testh5copy.sh =========
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ H5LSTEST $FILEOUT
+ ======== Change to ===============================
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ echo SKIP H5LSTEST $FILEOUT
+ ==================================================
+ (AKC - 2008/11/10)
+
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message, "yod allocation delayed for node recovery". This interferes with
+ test suites that do not expect to see this message. See the section of "Red
+ Storm" in file INSTALL_parallel for a way to deal with this problem.
+ (AKC - 2008/05/28)
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ (SLU - 2005/06/30)
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use",
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+ (AKC - 2004/12/08)
+
+
+%%%%1.8.7%%%%
+
+
+HDF5 version 1.8.7 released on Tue May 10 09:24:44 CDT 2011
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.6 and
+HDF5 1.8.7, and contains information on the platforms tested and
+known problems in HDF5-1.8.7.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.7 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.7 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.7 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.7 (current
+release) versus Release 1.8.6":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.6
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Configure now generates Makefiles that build in "silent make mode"
+ by default in which compile and link lines are significantly
+ simplified for clarity. To override this and view actual compile and
+ link lines during building, the --disable-silent-rules flag can be used
+ at configure time, or the 'make' command can be followed by V=1, to
+ indicate a "verbose" make. (MAM - 2011/4/14).
+ - Added mpicc and mpif90 as the default C and Fortran compilers for Linux
+ systems when --enable-parallel is specified but no $CC or $FC is defined.
+ (AKC - 2011/2/7)
+ - Added a new configure option, "--enable-unsupported", which can
+ be used to stop configure from preventing the use of unsupported
+ configure option combinations, such as c++ in parallel or Fortran
+ with threadsafe. Use at your own risk, as it may result in a
+ library that won't compile or run as expected!
+ (MAM - 2010/11/17 - Bug 2061)
+
+ Library
+ -------
+ - The library allows the dimension size of a dataspace to be zero. In
+ the past, the library would allow this only if the maximal dimension
+ size was unlimited. Now there is no such restriction, but no data
+ can be written to this kind of dataset. (SLU - 2011/4/20)
+ - We added two new macros, H5_VERSION_GE and H5_VERSION_LE, to let users
+ compare certain version numbers with the library being used. (SLU -
+ 2011/4/20)
+ - Added ability to cache files opened through external links. Added new
+ public functions H5Pset_elink_file_cache_size(),
+ H5Pget_elink_file_cache_size(), and H5Fclear_elink_file_cache().
+ (NAF - 2011/02/17)
+ - Finished implementing all options for 'log' VFD. (QAK - 2011/1/25)
+ - Removed all old code for Metrowerks compilers, bracketed by
+ __MWERKS__). Metrowerks compiler is long gone. (AKC - 2010/11/17)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5diff: Added new "verbose with levels" option, '-vN, --verbose=N'.
+ The old '-v, --verbose' option is deprecated but remains available;
+ it is exactly equivalent to '-v0, --verbose=0'.
+ The new levels 1 ('-v1' or '--verbose=1') and 2 ('-v2' or
+ '--verbose=2') can be specified to view more information regarding
+ attributes differences. Bug #2121 (JKM 2011/3/23)
+ - h5dump: Added new option --enable-error-stack. This option will
+ display error stack information in the output stream. This is
+ useful when the "h5dump: Unable to print data" message is output.
+ (ADB - 2011/03/03)
+
+ High-Level APIs
+ ---------------
+ - Fortran LT make datasets routines (H5LTmake_dataset_f,
+ h5ltmake_dataset_int_f, h5ltmake_dataset_float_f, h5ltmake_dataset_double_f)
+ and LT read datasets routines (h5ltread_dataset_f,h5ltread_dataset_int_f,
+ h5ltread_dataset_float_f, 5ltread_dataset_double_f) can now handle
+ 4-dimensional to 7-dimensional rank datasets. HDFFV-1217 (MSB-2011/4/24/2011)
+
+ F90 API
+ -------
+ - None
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - Intel V11.1 uses now -O3 optimization in production mode (EIP - 2010/10/08)
+
+
+
+Bug Fixes since HDF5-1.8.6
+==========================
+
+ Configuration
+ -------------
+ - Shared C++ and HL libraries on AIX should now be working correctly.
+ Note that Fortran shared libraries are still not working on AIX.
+ (See the Known Problems section, below). (MAM - 2011/4/20)
+ - Removed config/ibm-aix6.x. All IBM-AIX settings are in one file,
+ ibm-aix. (AKC - 2011/4/14)
+ - Shared C libraries are no longer disabled on Mac when Fortran
+ is enabled. Shared Fortran libraries are still not supported on Mac,
+ so configure will disable them by default, but this is overrideable
+ with the new --enable-unsupported configure option. The configure
+ summary has been updated to reflect the fact that the shared-ness of
+ the C++/Fortran wrapper libraries may not align with the C library.
+ (MAM - 2011/04/11 - HDFFV-4353).
+
+ Library
+ -------
+ - Changed assertion failure when decoding a compound datatype with no
+ fields into a normal error failure. Also prohibit using this sort
+ of datatype for creating an attribute (as is already the case for
+ datasets and committed (named) datatypes). (QAK - 2011/04/15, Jira
+ issue #HDFFV-2766)
+ - Tell the VFL flush call that the file will be closing, allowing
+ the VFDs to avoid sync'ing the file (particularly valuable in parallel).
+ (QAK - 2011/03/09)
+ - The datatype handler created with H5Tencode/decode used to have the
+ reference count 0 (zero); it now has the reference count 1 (one).
+ (SLU - 2011/2/18)
+ - Fixed the definition of H5_HAVE_GETTIMEOFDAY on Windows so that
+ HDgettimeofday() is defined and works properly. Bug HDFFV-5931
+ (DER - 2011/04/14)
+ - Added basic VFD tests for the Windows, STDIO and log VFD tests.
+ (DER - 2011/04/11)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - Updated h5dump test case script to prevent entire test failure when
+ source directory is read-only. Bug #HDFFV-4342 (JKM 2011/4/12)
+ - Fixed h5dump displaying incorrect values for H5T_STD_I8BE type data in
+ attribute on Big-Endian machine. H5T_STD_I8BE is unsigned 8bit type,
+ so h5dump is supposed to display -2 instead of 254. It worked correctly
+ on Little-Endian system , but not on Big-Endian system. Bug #HDFFV-4358
+ (JKM 04/08/2011)
+ - Updated some HDF5 tools to standardize the option name as
+ '--enable-error-stack' for printing HDF5 error stack messages. h5ls and
+ h5dump have been updated. For h5ls, this replaces "-e/--errors" option,
+ which is deprecated. For h5dump, this is a new option. Bug #2182
+ (JKM 2011/3/30)
+ - Fixed the h5diff --use-system-epsilon option. The formula used in the
+ calculation was changed from ( |a - b| / b ) to ( |a - b| ).
+ This was done to improve performance. Bug #2184 (JKM 2011/3/24)
+ - Fixed output for H5T_REFERENCE in h5dump. According to the BNF document
+ the output of a H5T_REFERENCE should be followed by the type;
+ <reference> ::= H5T_REFERENCE { <ref_type> }
+ <ref_type> ::= H5T_STD_REF_OBJECT | H5T_STD_REF_DSETREG
+ Previously this was only displayed if the -R option was used.
+ Bug #1725 (ADB 2011/3/28)
+ - Fixed two h5diff issues. 1) h5diff compared attributes correctly only
+ when two objects had the same number of attributes and the attribute
+ names were identical. 2) h5diff did not display useful information about
+ attribute differences. Bug #2121 (JKM 2011/3/17)
+ - Fixed a memory leak in h5diff that occurred when accessing symbolic links
+ with the --follow-symlink option. Bug #2214 (JKM 2011/3/18)
+ - Fixed a memory leak in h5diff that occurred when accessing variable length
+ string data. Bug #2216 (JKM 2011/3/18)
+ - Fixed and improved the help page for h5ls -a, --address option.
+ Bug #1904 (JKM 2011/3/11)
+ - Fixed h5copy to enable copying an object into the same HDF5 file.
+ Previously h5copy displayed an error message when the target file
+ was the same as the source file. (XCAO 2011/3/8)
+ - Fixed an h5dump problem that caused the tool to skip some data elements
+ in large datasets with a large array datatype on Windows. This issue
+ arose only on Windows due to the different return behavior of the
+ _vsnprintf() function. Bug #2161 (JKM 2011/3/3)
+ - Fixed h5dump which was skipping some array indices in large datasets
+ with a relatively large array datatype. The interval of skipped indices
+ varied according to the size of the array. Bug #2092 (JKM 2011/2/15)
+ - Fixed h5diff which was segfaulting when comparing compound datasets
+ with a combination of fixed-length string datatypes and variable-length
+ string datatypes in certain orders. Bug #2089 (JKM 2010/12/28)
+ - Improved h5diff performance. 1) Now use HDmemcmp() before comparing two
+ elements. 2) Replace expensive H5Tequals() calls. 3) Retrieve datatype
+ information at dataset level, not at each element level for compound
+ datasets. HDFFV-7516 (JKM 2011/4/18)
+ - Fixed h5ls to display nested compound types with curly brackets
+ when -S (--simple) option is used with -l (--label), so it shows
+ which members (in curly brackets) belong to which nested compound type,
+ making the output clearer. Bug #1979 (JKM 2010/11/09)
+ - Fixed h5diff to handle variable-length strings in a compound dataset
+ and variable-length string arrays in a compound dataset correctly.
+ Garbage values were previously displayed when h5diff compared multiple
+ variable-length strings in a compound type dataset.
+ Bug #1989 (JKM 2010/10/28)
+ - Fixed h5copy to fail gracefully when copying an object to a non-
+ existing group without the -p option. Bug #2040 (JKM 2010/10/18)
+
+ F90 API
+ ------
+ - None
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+ - None
+
+ Fortran High-Level APIs:
+ ------
+ - h5tbmake_table_f: Fixed error in passing an array of characters with different
+ length field names.
+ - h5tget_field_info_f: Fixed error with packing the C strings into a Fortran
+ array of strings. Added optional argument called 'maxlen_out' which returns
+ the maximum string character length in a field name element.
+ Bug HDFFV-1255 (MSB- 4/17/2011)
+
+
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ AIX 6.1 xlc 11.1.0.3
+ (NCSA BP) xlC 11.1.0.3
+ xlf90 13.1.0.3
+ mpcc_r 11.1.0.3
+ mpxlf90_r 13.1.0.3
+
+ FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719
+ (loyalty) g++ 4.2.1 [FreeBSD] 20070719
+ gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719
+ (freedom) g++ 4.2.1 [FreeBSD] 20070719
+ gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ Linux 2.6.18-194.3.1.el5PAE gcc (GCC) 4.1.2 and 4.4.2
+ #1 SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010)
+ (jam) GNU Fortran (GCC) 4.1.2 20080704
+ (Red Hat 4.1.2-48) and 4.4.2
+ PGI C, Fortran, C++ 10.4-0 32-bit
+ PGI C, Fortran, C++ 10.6-0 32-bit
+ Intel(R) C Compiler for 32-bit
+ applications, Version 11.1
+ Intel(R) C++ Compiler for 32-bit
+ applications, Version 11.1
+ Intel(R) Fortran Compiler for 32-bit
+ applications, Version 11.1
+ Absoft 32-bit Fortran 95 10.0.7
+ MPICH mpich2-1.3.1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.18-194.17.1.el5 gcc 4.1.2 and 4.4.2
+ #1 SMP x86_64 GNU/Linux G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010)
+ (amani) tested for both 32- and 64-bit binaries
+ GNU Fortran (GCC) 4.1.2 20080704
+ (Red Hat 4.1.2-46) and 4.4.2
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64,
+ Version 11.1.
+ PGI C, Fortran, C++ Version 9.0-4
+ for 64-bit target on x86-64
+ MPICH mpich2-1.3.1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ SGI ProPack 7 Linux Intel(R) C++ Version 11.1 20100806
+ 2.6.32.24-0.2.1.2230.2.PTF- Intel(R) Fortran Version 11.1 20100806
+ default #1 SMP SGI MPT 2.01
+ SGI Altix UV
+ (NCSA ember)
+
+ SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16
+ (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13
+ Sun C++ 5.9 Sun OS_sparc Patch 124863-26
+ Sun C 5.10 SunOS_sparc Patch 141861-07
+ Sun Fortran 95 8.4 SunOS_sparc Patch 128231-06
+ Sun C++ 5.10 SunOS_sparc 128228-11
+
+ Intel Xeon Linux 2.6.18- gcc 4.2.4
+ 92.1.10.el5_lustre.1.6.6smp- Intel(R) C++ Version 10.1.017
+ perfctr #8 SMP Intel(R) Fortran Compiler Version 10.1.017
+ (NCSA abe) Open MPI 1.3.2
+ MVAPICH2-1.5.1_pgi-10.8
+
+ Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 (cmake)
+ Cygwin(1.7.7 native gcc(4.3.4) compiler and gfortran)
+
+ Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 (cmake)
+ Cygwin(1.7.7 native gcc(4.3.4) compiler and gfortran)
+
+ Windows Vista Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows Vista x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Mac OS X 10.7.0 (Intel 64-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1
+ Darwin Kernel Version 10.7.0 GNU Fortran (GCC) 4.6.0 20101106 (experimental)
+ Intel C, C++ and Fortran compilers 12.0.1.122 20101110
+
+ Mac OS X 10.7.0 (Intel 32-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (Apple Inc. build 5666) (dot 3)
+ Darwin Kernel Version 10.7.0 GNU Fortran (GCC) version 4.4.0 20090123 (experimental)
+ [trunk revision 143587]
+
+ Fedora 12 2.6.32.16-150.fc12.ppc64 #1 SMP ppc64 GNU/Linux
+ gcc (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10)
+ GNU Fortran (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10)
+
+ Debian6.01 2.6.32-5-686 #1 SMP i686 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+
+ Debian6.01 2.6.32-5-amd64 #1 SMP x86_64 GNU/Linux
+ gcc (Debian 4.4.5-8) 4.4.5
+ GNU Fortran (Debian 4.4.5-8) 4.4.5
+
+ Fedora14 2.6.35.12-88.fc14.i686.PAE #1 SMP i686 i686 i386 GNU/Linux
+ gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)
+ GNU Fortran (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)
+
+ Fedora14 2.6.35.12-88.fc14.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)
+ GNU Fortran (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)
+
+ SUSE 11.4 2.6.37.1-1.2-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux
+ gcc (SUSE Linux) 4.5.1 20101208
+ GNU Fortran (SUSE Linux) 4.5.1 20101208
+
+ SUSE 11.4 2.6.37.1-1.2-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.5.1 20101208
+ GNU Fortran (SUSE Linux) 4.5.1 20101208
+
+ Ubuntu 10.10 2.6.35-28-generic #50-Ubuntu SMP i686 GNU/Linux
+ gcc (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5
+ GNU Fortran (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5
+
+ Ubuntu 10.10 2.6.35-28-generic #50-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5
+ GNU Fortran (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5
+
+ OpenVMS Alpha 8.3 HP C V7.3-009
+ HP Fortran V8.2-104679-48H9K
+ HP C++ V7.3-009
+
+Tested Configuration Features Summary
+========================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+Solaris2.10 32-bit n y n y y y
+Solaris2.10 64-bit n y n y y y
+Windows XP n y(4) n y y y
+Windows XP x64 n y(4) n y y y
+Windows Vista n y(4) n y y y
+Windows Vista x64 n y(4) n y y y
+OpenVMS Alpha n y n y y n
+Mac OS X 10.7 Intel 32-bit n y n y y y
+Mac OS X 10.7 Intel 64-bit n y n y y y
+AIX 6.1 32- and 64-bit y y y y y y
+FreeBSD 8.2-STABLE 32&64 bit n x n x y y
+CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y y y
+CentOS 5.5 Linux 2.6.18-194 i686 Intel W n y n y y n
+CentOS 5.5 Linux 2.6.18-194 i686 PGI W n y n y y n
+CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y
+CentOS 5.5 Linux 2.6.16 x86_64 Intel W n y n y y n
+CentOS 5.5 Linux 2.6.16 x86_64 PGI W n y n y y y
+RedHat EL4 2.6.18 Xeon Lustre C y y y y y n
+Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 n y n y y y
+SGI Linux 2.6.32.19 y y y y y y
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit y y y y
+Windows XP y y(4) y n
+Windows XP x64 y y(4) y n
+Windows Vista y y(4) y y
+Windows Vista x64 y y(4) y y
+OpenVMS Alpha n n n n
+Mac OS X 10.7 Intel 32-bit y(5) n y n
+Mac OS X 10.7 Intel 64-bit y(5) n y n
+AIX 6.1 32- and 64-bit n n n y
+FreeBSD 8.2-STABLE 32&64 bit y x x y
+CentOS 5.5 Linux 2.6.18-128 i686 GNU (1)W y y(2) y y
+CentOS 5.5 Linux 2.6.18-128 i686 Intel W y y y n
+CentOS 5.5 Linux 2.6.18-128 i686 PGI W y y y n
+CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y y y
+CentOS 5.5 Linux 2.6.16 x86_64 Intel W y y y n
+CentOS 5.5 Linux 2.6.16 x86_64 PGI W y y y n
+RedHat EL4 2.6.18 Xeon Lustre C y y y n
+Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 y y y y
+SGI Linux 2.6.32.19 y y y y
+
+ (1) Fortran compiled with gfortran.
+ (2) With PGI and Absoft compilers.
+ (3) With PGI compiler for Fortran.
+ (4) Using Visual Studio 2008 w/ Intel Fortran 10.1 (Cygwin shared libraries are not supported)
+ (5) C and C++ shared libraries will not be built when Fortran is enabled.
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* After the shared library support was fixed for some bugs, it was discovered
+ that "make prefix=XXX install" no longer works for shared libraries. It
+ still works correctly for static libraries. Therefore, if you want to
+ install the HDF5 shared libraries in a location such as /usr/local/hdf5,
+ you need to specify the location via the --prefix option during configure
+ time. E.g, ./configure --prefix=/usr/local/hdf5 ...
+ (AKC - 2011/05/07 HDFFV-7583)
+
+* The parallel test, t_shapesame, in testpar/, may run for a long time and may
+ be terminated by the alarm signal. If that happens, one can increase the
+ alarm seconds (default is 1200 seconds = 20 minutes) by setting the
+ environment variable, $HDF5_ALARM_SECONDS, to a larger value such as 3600
+ (60 minutes). Note that the t_shapesame test may fail in some systems
+ (see the "While working on the 1.8.6 release..." problem below). If
+ it does, it will waste more time if $HDF5_ALARM_SECONDS is set
+ to a larger value. (AKC - 2011/05/07)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD.
+ (QAK - 2011/04/26)
+
+* Shared Fortran libraries are not quite working on AIX. While they are
+ generated when --enable-shared is specified, the fortran and hl/fortran
+ tests fail. We are looking into the issue. HL and C++ shared libraries
+ should now be working as intended, however. (MAM - 2011/04/20)
+
+* The --with-mpe configure option does not work with Mpich2. AKC - 2011/03/10
+
+* If parallel gmake (e.g., gmake -j 4) is used, the "gmake clean" command
+ sometimes fails in the perform directory due to the attempt to remove the
+ executable of h5perf or h5perf_serial by two "parallel" commands. This error
+ has no consequence on the functionality of the HDF5 library or install. It
+ is fixed in the next release. AKC - 2011/01/25
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or file systems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+
+* The library's test dt_arith.c showed a compiler's rounding problem on
+ Cygwin when converting from unsigned long long to long double. The
+ library's own conversion works fine. We defined a macro for Cygwin to
+ skip this test until we can solve the problem. Please see bug #1813.
+ SLU - 2010/5/5
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added in. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug in 1.8 library. Because there's only one user
+ complaining about it, we (Elena, Quincey, and I) decided to leave it as
+ it is (see bug report #1279). Quincey will make a plan for 1.10.
+ SLU - 2010/2/2
+
+* MinGW has a missing libstdc++.dll.a library file and will not successfully link
+ C++ applications/tests. Do not use the enable-cxx configure option. Read all of
+ the INSTALL_MINGW.txt file for all restrictions. ADB - 2009/11/11
+
+* The PathScale MPI implementation, accessing a Panasas file system, would
+ cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file does not
+ exist. This is due to the MPI_File_open() call failing if the mode has
+ the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO. CMC - 2009/04/28
+
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and
+ tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests
+ are expected to fail and should exit with a non-zero code but the yod
+ command does not propagate the exit code of the executables. Yod always
+ returns 0 if it can launch the executable. The test suite shell expects
+ a non-zero for this particular test, therefore it concludes the test has
+ failed when it receives 0 from yod. Skip all the "failing" test for now
+ by changing them as following.
+
+ ======== Original tools/h5ls/testh5ls.sh =========
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Change to ===============================
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ==================================================
+
+ ======== Original tools/h5copy/testh5copy.sh =========
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ H5LSTEST $FILEOUT
+ ======== Change to ===============================
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ echo SKIP H5LSTEST $FILEOUT
+ ==================================================
+ AKC - 2008/11/10
+
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message, "yod allocation delayed for node recovery". This interferes with
+ test suites that do not expect to see this message. See the section of "Red
+ Storm" in file INSTALL_parallel for a way to deal with this problem.
+ AKC - 2008/05/28
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ SLU - 2005/6/30
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use",
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+
+* There is also a configure error on Altix machines that incorrectly reports
+ when a version of Szip without an encoder is being used.
+
+* On cobalt, an SGI Altix SMP ia64 system, Intel compiler version 10.1 (which
+ is the default on that system) does not work properly and results in
+ failures during make check (in a static build) and make installcheck (during
+ a shared build). This appears to be a compiler optimization problem.
+ Reducing optimization by setting CFLAGS to -O1 or below resolves the issue.
+ Alternatively, using a newer version of the compiler (11.0) also works as
+ intended. MAM - 2010/06/01
+
+* h5diff will not report enum value differences when one or both of the values
+ is not a valid enumeration value. The source of this bug has been identified
+ and it will be fixed in 1.8.8. DER - 2011/04/27
+
+
+%%%%1.8.6%%%%
+
+
+HDF5 version 1.8.6 released on Mon Feb 14 10:26:30 CST 2011
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.5 and
+HDF5 1.8.6, and contains information on the platforms tested and
+known problems in HDF5-1.8.6.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.6 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.6 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.6 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.6 (current
+release) versus Release 1.8.5":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.5
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - CMake: Improved CPack packaging, added parallel commands, improved
+ configuration options (better similarity to configure), added more
+ tests, better support for use in external cmake projects.
+ (ADB - 2010/10/07)
+ - The default configuration setting for official releases is
+ --enable-production. For unofficial releases, the default configuration
+ setting has been --disable-production. (AKC - 2010/05/28)
+ Library
+ -------
+ - Added support for thread safety on Windows using the Windows threads
+ library. Use the HDF5_ENABLE_THREADSAFE option in CMake on a Windows
+ platform to enable this functionality. This is supported on Windows
+ Vista and newer Windows operating systems. (MAM - 2010/09/10)
+ - H5Tset_order and H5Tget_order now support all datatypes. A new byte
+ order, H5T_ORDER_MIXED, has been added specifically for a compound
+ datatype and its derived type. (SLU - 2010/8/23)
+ - Improved performance of metadata I/O by changing the default algorithm
+ to perform I/O from all processes (instead of just process 0) when using
+ parallel I/O drivers. (QAK - 2010/07/19)
+ - Improved performance of I/O on datasets with the same shape, but
+ different rank. (QAK - 2010/07/19)
+ - Improved performance of the chunk cache by avoiding unnecessary b-tree
+ lookups of chunks already in cache. (NAF - 2010/06/15)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5diff: Added a new flag: --exclude-path. The specified path to an
+ object will be excluded when comparing two files or two groups. If a
+ group is specified to be excluded, all member objects of that group
+ will be excluded. (JKM - 2010/09/16).
+ - h5ls: Added a new flag: --no-dangling-links. See --help output for
+ details. (JKM - 2010/06/15)
+ - h5ls: Added a new flag --follow-symlinks. See --help output for
+ details. (JKM - 2010/05/25)
+
+ High-Level APIs
+ ---------------
+ - None
+
+ F90 API
+ -------
+ - None
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - Sun C and C++ 5.10 and Sun Fortran 95 8.4.
+ - Mac OS X 10.6.4 with gcc 4.2.1 and gfortran 4.6
+
+
+Bug Fixes since HDF5-1.8.5
+==========================
+
+ Configuration
+ -------------
+ - The default number of MPI processes for testing purposes has been
+ changed from 3 to 6. (AKC - 2010/11/11)
+ - Some tests in tools/h5repack may fail in AIX systems when -q32 mode is
+ used. The error is caused by not requesting enough memory in default.
+ Added "env LDR_CNTRL=MAXDATA=0x20000000@DSA" into the $RUNSERIAL and
+ $RUNPARALLE in the AIX config file so that executables are tested with
+ more memory. (AKC - 2010/11/11)
+ - Removed recognition of the parallel compilers of LAM(hcc) and
+ ChMPIon(cmpicc) since we have no access to these two MPI implementations
+ and cannot verify their correctness. (AKC - 2010/07/14 - Bug 1921)
+ - PHDF5 was changed to use "mpiexec" instead of mpirun as the default
+ MPI applications startup command as defined in the MPI-2 definition,
+ section 4.1. (AKC - 2010/06/11 - Bug 1921)
+
+ Library
+ -------
+ - Fixed a bug that caused big endian machines to generate corrupt files
+ when using the scale-offset filter with floating point data or fill
+ values. Note that such datasets will no longer be readable by any
+ by any machine after this patch. (NAF - 2010/02/02 - Bug 2131)
+ - Retrieving a link's name by index in the case where the link is external
+ and the file that the link refers to doesn't exist will now fail
+ gracefully rather than cause a segmentation fault. (MAM - 2010/11/17)
+ - Modified metadata accumulator to better track accumulated dirty metadata
+ in an effort to reduce unnecessary I/O in certain situations and to
+ fix some other corner cases which were prone to error. (MAM - 2010/10/15)
+ - Added a new set of unit tests that are run during 'make check' to verify
+ the behavior of the metadata accumulator. (MAM - 2010/10/15)
+ - Modified library to always cache symbol table information. Libraries
+ from version 1.6.3 and earler have a bug which causes them to require
+ this information for some operations. (NAF - 2010/09/21 - Bug 1864)
+ - Fixed a bug where the library could generate an assertion/core dump when
+ a file that had been created with H5Pset_libver_bounds(fapl,
+ H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) but didn't have a superblock
+ extension was later reopened. (QAK - 2010/09/16 - Bug 1968)
+ - Fixed a bug that could occur when getting information for a new-style
+ group that was previously opened through a file handle that was later
+ closed. (NAF - 2010/09/15)
+ - Added define check in H5public.h if stdint.h is supported by the C++
+ compiler. This define is only available on Windows with VS2010 and using
+ CMake to build the library. (ADB - 2010/09/13 - Bug 1938)
+ - When a mandatory filter failed to write data chunks, the dataset
+ couldn't close (bug 1260). The fix releases all resources and closes
+ the dataset but returns a failure. (SLU - 2010/09/08)
+ - H5Eset_current_stack now also closes the error stack set as the
+ default. This is to avoid a potential problem.
+ (SLU - 2010/09/07 - Bug 1799)
+ - Corrected situation where 1-D chunked dataset could get created by an
+ application without calling H5Pset_chunk(). H5Pset_chunk is now
+ required for creating all chunked datasets. (QAK - 2010/09/02)
+ - Fixed many memory issues that valgrind exposed. (QAK - 2010/08/24)
+ - Fixed the bug in the filter's public CAN_APPLY function. The return
+ value should be htri_t not herr_t. (SLU - 2010/08/05 - Bug 1239)
+ - Fixed the STDIO VFD to use fseeko64 instead of fseek64 for 64-bit I/O
+ support. (AKC - 2010/7/30)
+ - Fixed a bug in the direct I/O driver that could render files with certain
+ kinds of unaligned data unreadable or corrupt them. (NAF - 2010/07/28)
+ - valgrind reported an error of copying data to itself when a new attribute
+ is written. Fixed by taking out the memcpy step in the attribute code.
+ (SLU - 2010/07/28 - Bug 1956)
+ - Corrected various issues in the MPI datatype creation code which could
+ cause resource leaks or incorrect behavior (and may improve the
+ performance as well). (QAK - 2010/07/19)
+ - Fixed a bug that could cause file corruption when using non-default sizes
+ of addresses and/or lengths. This bug could also cause uncorrupted files
+ with this property to be unreadable. This bug was introduced in 1.8.5.
+ (NAF - 2010/07/16 - Bug 1951)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - Fixed h5diff to compare member objects and groups recursively when
+ two files or groups are compared. (JKM - 2010/9/16 - Bug 1975)
+ - Fixed h5repack to be able to convert a dataset to COMPACT layout.
+ (JKM - 2010/09/15 - Bug 1896)
+ - Changed h5ls to not interpret special characters in object or attribute
+ names for output. (JKM - 2010/06/28 - Bug 1784)
+ - Revised the order of arguments for h5cc, h5fc, h5c++, h5pcc and h5pfc.
+ CPPFLAGS, CFLAGS, LDFLAGS, and LIBS have been duplicated with an H5BLD_
+ prefix to put the flags and paths from the hdf5 build in the correct
+ places and allow the script user to add entries in CPPFLAGS, CFLAGS,
+ LDFLAGS, and LIBS that will take precedence over those from the hdf5
+ build. The user can make these entries persistent by editing
+ CFLAGSBASE, CPPFLAGSBASE, LDFLAGSBASE, and LIBSBASE near the top of
+ the script or temporary by setting HDF5_CFLAGS, HDF5_CPPFLAGS,
+ HDF5_LDFLAGS, or HDF5_LIBS in the environment. The new order of
+ arguments in these scripts is $CLINKER $H5BLD_CPPFLAGS $CPPFLAGS
+ $H5BLD_CFLAGS $CFLAGS $LDFLAGS $clibpath $link_objs $LIBS $link_args
+ $shared_link. (LRK - 2010/10/25 - Bug 1973)
+
+ F90 API
+ ------
+ - None
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+ - None
+
+ Fortran High-Level APIs:
+ ------
+ - None
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ AIX 6.1 xlc 11.1.0.3
+ (NCSA BP) xlC 11.1.0.3
+ xlf 13.1.0.3
+ mpcc_r 11.1.0.3
+ mpxlf_r 13.1.0.3
+
+ FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305
+ (duty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.4.5 20100803
+ g++ 4.4.5 20100803
+ gfortran 4.4.5 20100803
+
+ FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305
+ (liberty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.4.5 20100803
+ g++ 4.4.5 20100803
+ gfortran 4.4.5 20100803
+
+ Linux 2.6.18-194.3.1.el5PAE gcc (GCC) 4.1.2 and 4.4.2
+ #1 SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010)
+ (jam) GNU Fortran (GCC) 4.1.2 20080704
+ (Red Hat 4.1.2-48) and 4.4.2
+ PGI C, Fortran, C++ 10.4-0 32-bit
+ PGI C, Fortran, C++ 10.6-0 32-bit
+ Intel(R) C Compiler for 32-bit
+ applications, Version 11.1
+ Intel(R) C++ Compiler for 32-bit
+ applications, Version 11.1
+ Intel(R) Fortran Compiler for 32-bit
+ applications, Version 11.1
+ Absoft 32-bit Fortran 95 10.0.7
+ MPICH mpich2-1.3.1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ Linux 2.6.18-194.17.1.el5 gcc 4.1.2 and 4.4.2
+ #1 SMP x86_64 GNU/Linux G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010)
+ (amani) tested for both 32- and 64-bit binaries
+ GNU Fortran (GCC) 4.1.2 20080704
+ (Red Hat 4.1.2-46) and 4.4.2
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64,
+ Version 11.1.
+ PGI C, Fortran, C++ Version 9.0-4
+ for 64-bit target on x86-64
+ MPICH mpich2-1.3.1 compiled with
+ gcc 4.1.2 and gfortran 4.1.2
+
+ SGI ProPack 7 Linux Intel(R) C++ Version 11.1 20100806
+ 2.6.32.19-0.3.1.1982.0.PTF- Intel(R) Fortran Version 11.1 20100806
+ default #1 SMP SGI MPT 2.01
+ SGI Altix UV
+ (NCSA ember)
+
+ SunOS 5.10 32- and 64-bit Sun C 5.9 Sun OS_sparc Patch 124867-16
+ (linew) Sun Fortran 95 8.3 Sun OS_sparc Patch 127000-13
+ Sun C++ 5.9 Sun OS_sparc Patch 124863-62
+ Sun C 5.10 SunOS_sparc Patch 141861-07
+ Sun Fortran 95 8.4 SunOS_sparc Patch 128231-06
+ Sun C++ 5.10 SunOS_sparc 128228-11
+
+ Intel Xeon Linux 2.6.18- gcc 4.2.4
+ 92.1.10.el5_lustre.1.6.6smp- Intel(R) C++ Version 10.1.017
+ perfctr #8 SMP Intel(R) Fortran Compiler Version 10.1.017
+ (NCSA abe) Open MPI 1.3.2
+ MVAPICH2-1.5.1_pgi-10.8
+
+ Windows XP Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 (cmake)
+ Cygwin(1.7.7 native gcc(4.3.4) compiler and gfortran)
+
+ Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+ Visual Studio 2010 (cmake)
+ Cygwin(1.7.7 native gcc(4.3.4) compiler and gfortran)
+
+ Windows Vista Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows Vista x64 Visual Studio 2008 w/ Intel Fortran 10.1 (project files)
+ Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows 7 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Windows 7 x64 Visual Studio 2008 w/ Intel Fortran 11.1 (cmake)
+
+ Mac OS X 10.6.3 (Intel 64-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1
+ Darwin Kernel Version 10.3.1 GNU Fortran (GCC) 4.5.0 20090910
+ Intel C, C++ and Fortran compilers 11.1 20100806
+
+ Mac OS X 10.6.4 (Intel 32-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1
+ Darwin Kernel Version 10.4.0 GNU Fortran (GCC) 4.6.0 20101106
+ Intel C, C++ and Fortran compilers 12.0.0 20101110
+
+ Mac OS X 10.6.4 (Intel 64-bit) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1 (Apple Inc. build 5659)
+ Darwin Kernel Version 10.6.0 GNU Fortran (GCC) 4.5.0 20090910
+ Intel C, C++ and Fortran compilers 11.1 20100806
+
+ Fedora 12 2.6.32.16-150.fc12.ppc64 #1 SMP ppc64 GNU/Linux
+ gcc (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10)
+ GNU Fortran (GCC) 4.4.4 20100630 (Red Hat 4.4.4-10)
+
+ Debian5.06 2.6.26-2-686 #1 SMP i686 GNU/Linux
+ gcc (Debian 4.3.2-1.1) 4.3.2
+ GNU Fortran (Debian 4.3.2-1.1) 4.3.2
+
+ Debian5.06 2.6.26-2-amd64 #1 SMP x86_64 GNU/Linux
+ gcc (Debian 4.3.2-1.1) 4.3.2
+ GNU Fortran (Debian 4.3.2-1.1) 4.3.2
+
+ Fedora14 2.6.35.6-48.fc14.i686.PAE #1 SMP i686 i686 i386 GNU/Linux
+ gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)
+ GNU Fortran (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)
+
+ Fedora14 2.6.35.6-48.fc14.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)
+ GNU Fortran (GCC) 4.5.1 20100924 (Red Hat 4.5.1-4)
+
+ SUSE 11.3 2.6.34.7-0.7-desktop #1 SMP PREEMPT i686 i686 i386 GNU/Linux
+ gcc (SUSE Linux) 4.5.0 20100604 [gcc-4_5-branch revision 160292]
+ GNU Fortran (SUSE Linux) 4.5.0 20100604 [gcc-4_5-branch revision 160292]
+
+ SUSE 11.3 2.6.34.7-0.7-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.5.0 20100604 [gcc-4_5-branch revision 160292]
+ GNU Fortran (SUSE Linux) 4.5.0 20100604 [gcc-4_5-branch revision 160292]
+
+ Ubuntu 10.10 2.6.35-25-generic #44-Ubuntu SMP i686 GNU/Linux
+ gcc (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5
+ GNU Fortran (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5
+
+ Ubuntu 10.10 2.6.35-25-generic #44-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5
+ GNU Fortran (Ubuntu/Linaro 4.4.4-14ubuntu5) 4.4.5
+
+ OpenVMS Alpha 8.3 HP C V7.3-009
+ HP Fortran V8.2-104679-48H9K
+ HP C++ V7.3-009
+
+Tested Configuration Features Summary
+========================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+Solaris2.10 32-bit n y n y y y
+Solaris2.10 64-bit n y n y y y
+Windows XP n y(4) n y y y
+Windows XP x64 n y(4) n y y y
+Windows Vista n y(4) n y y y
+Windows Vista x64 n y(4) n y y y
+OpenVMS Alpha n y n y y n
+Mac OS X 10.6 Intel n y n y y y
+AIX 6.1 32- and 64-bit y y y y y y
+FreeBSD 6.3-STABLE 32&64 bit n y n y y y
+CentOS 5.5 Linux 2.6.18-194 i686 GNU (1)W y y(2) y y y y
+CentOS 5.5 Linux 2.6.18-194 i686 Intel W n y n y y n
+CentOS 5.5 Linux 2.6.18-194 i686 PGI W n y n y y n
+CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y
+CentOS 5.5 Linux 2.6.16 x86_64 Intel W n y n y y n
+CentOS 5.5 Linux 2.6.16 x86_64 PGI W n y n y y y
+RedHat EL4 2.6.18 Xeon Lustre C y y y y y n
+Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 n y n y y y
+SGI Linux 2.6.32.19 y y y y y y
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit y y y y
+Windows XP y y(4) y n
+Windows XP x64 y y(4) y n
+Windows Vista y y(4) y y
+Windows Vista x64 y y(4) y y
+OpenVMS Alpha n n n n
+Mac OS X 10.6 y(5) n y n
+AIX 6.1 32- and 64-bit n n n y
+FreeBSD 6.3-STABLE 32&64 bit y n y y
+CentOS 5.5 Linux 2.6.18-128 i686 GNU (1)W y y(2) y y
+CentOS 5.5 Linux 2.6.18-128 i686 Intel W y y y n
+CentOS 5.5 Linux 2.6.18-128 i686 PGI W y y y n
+CentOS 5.5 Linux 2.6.16 x86_64 GNU (1) W y y y y
+CentOS 5.5 Linux 2.6.16 x86_64 Intel W y y y n
+CentOS 5.5 Linux 2.6.16 x86_64 PGI W y y y n
+RedHat EL4 2.6.18 Xeon Lustre C y y y n
+Fedora 12 Linux 2.6.32.16-150.fc12.ppc64 y y y y
+SGI Linux 2.6.32.19 y y y y
+
+ (1) Fortran compiled with gfortran.
+ (2) With PGI and Absoft compilers.
+ (3) With PGI compiler for Fortran.
+ (4) Using Visual Studio 2008 w/ Intel Fortran 10.1 (Cygwin shared libraries are not supported)
+ (5) C and C++ shared libraries will not be built when Fortran is enabled.
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* examples/run-all-ex.sh does not work on Cygwin. (NAF - 2011/02/11)
+
+* Parallel test, t_shapesame in testpar, is rather unstable as it continues to
+ have occasional errors in AIX and quite often in NCSA Abe. It is being built
+ but it is not run automatically in the "make check" command. One would have to
+ run it by hand to see if it works in a particular machine. AKC - 2011/01/28
+
+* Although OpenVMS Alpha is supported, there are several problems with the C
+ test suite - getname.c, lheap.c, lheap.c, mtime.c, and stab.c. The test
+ suite for h5diff also fails. These failures are from the tests, not the
+ library. We have fixed these failures. But it's too late to put the fixes
+ into this release. If you install the 1.8.6 library, it should still work
+ despite of these test failures. If you want the working copy without any
+ test failure, you can request it from us. SLU - 2011/01/26
+
+* If parallel gmake (e.g., gmake -j 4) is used, the "gmake clean" command
+ sometimes fails in the perform directory due to the attempt to remove the
+ executable of h5perf or h5perf_serial by two "parallel" commands. This error
+ has no consequence on the functionality of the HDF5 library or install. It
+ is fixed in the next release. AKC - 2011/01/25
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or filesystems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+
+* The h5diff tool can display garbage values when variable-length strings in
+ a compound type dataset are compared. This also occurs with variable-length
+ string arrays in a compound type dataset. See bug #1989. This will be fixed
+ in the next release. JKM - 2010/11/05
+
+* The AIX --enable-shared setting does not quite work. It can produce a shared
+ library, but there cannot be more than one shared library that is
+ interlinked. This means that the high level APIs will not work which is not
+ very useful. We hope to have a solution in the next release.
+ (AKC - 2010/10/15)
+
+* H5Eset_auto can cause a seg fault for a library API call if the application
+ compiles with -DH5_USE_16_API (see bug 1707). It will be fixed in the
+ next release. SLU - 2010/10/5
+
+* The library's test dt_arith.c showed a compiler's rounding problem on
+ Cygwin when converting an unsigned long long to a long double. The
+ library's own conversion works fine. We defined a macro for Cygwin to
+ skip this test until we can solve the problem. Please see bug #1813.
+ SLU - 2010/5/5
+
+* All the VFL drivers aren't backwardly compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. A new parameter was added to the
+ get_eoa and set_eoa callback functions, and a new callback function
+ get_type_map was added. The public function H5FDrealloc was taken out in
+ 1.8. The problem only happens when users define their own driver for 1.6
+ and try to plug in a 1.8 library. This will be fixed in 1.10. SLU - 2010/2/2
+
+* MinGW has a missing libstdc++.dll.a library file and will not successfully link
+ C++ applications/tests. Do not use the enable-cxx configure option. Read all of
+ the INSTALL_MINGW.txt file for all restrictions. ADB - 2009/11/11
+
+* The PathScale MPI implementation, accessing a Panasas file system, would
+ cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file does not
+ exist. This is due to the MPI_File_open() call failing if the amode has
+ the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO. CMC - 2009/04/28
+
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and
+ tools/h5copy/testh5copy.sh will fail some of their sub-tests. These
+ sub-tests are expected to fail and should exit with a non-zero code but
+ the yod command does not propagate the exit code of the executables. Yod
+ always returns 0 if it can launch the executable. The test suite shell
+ expects a non-zero for this particular test. Therefore, it concludes the
+ test has failed when it receives 0 from yod. To skip all the "failing"
+ tests for now, change them as shown below.
+
+ ======== Original tools/h5ls/testh5ls.sh =========
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Change to ===============================
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ==================================================
+
+ ======== Original tools/h5copy/testh5copy.sh =========
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ H5LSTEST $FILEOUT
+ ======== Change to ===============================
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ echo SKIP H5LSTEST $FILEOUT
+ ==================================================
+ AKC - 2008/11/10
+
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message "yod allocation delayed for node recovery." This interferes
+ with test suites that do not expect to see this message. See the "Red Storm"
+ section in file INSTALL_parallel for a way to deal with this problem.
+ AKC - 2008/05/28
+
+* On an Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use the -mp -O1 compilation flags to build the libraries. A higher level
+ of optimization causes failures in several HDF5 library tests.
+
+* On mpich 1.2.5 and 1.2.6 on a system using four processors, if more than
+ two processes contribute no I/O and the application asks to do collective
+ I/O, we have found that a simple collective write will sometimes hang. This
+ can be verified with the t_mpi test under testpar.
+
+* A dataset created or rewritten with a v1.6.3 or later library cannot be
+ read with the v1.6.2 or earlier library when the Fletcher32 EDC filter
+ is enabled. There was a bug in the calculation of the Fletcher32 checksum
+ in the library before v1.6.3; the checksum value was not consistent
+ between big-endian and little-endian systems. This bug was fixed in
+ Release 1.6.3. However, after fixing the bug, the checksum value was no
+ longer the same as before on little-endian system. Library releases after
+ 1.6.4 can still read datasets created or rewritten with an HDF5 library of
+ v1.6.2 or earlier. SLU - 2005/6/30
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'. To
+ work around this, set the environment variable MP_INFOLEVEL to 0 to
+ minimize the messages and run the tests again. The tests may fail with
+ messages like "The socket name is already in use", but HDF5 does not use
+ sockets. This failure is due to problems with the poe command trying to
+ set up the debug socket. To resolve this problem, check to see whether
+ there are any old /tmp/s.pedb.* files around. These are sockets used by
+ the poe command and left behind if the command failed at some point. To
+ resolve this, ask your system administrator to remove the
+ old/tmp/s.pedb.* files, and then ask IBM to provide a means to run poe
+ without the debug socket.
+
+* The --enable-static-exec configure flag will only statically link
+ libraries if the static version of that library is present. If only the
+ shared version of a library exists (i.e., most system libraries on
+ Solaris, AIX, and Mac, for example, only have shared versions), the flag
+ should still result in a successful compilation, but note that the
+ installed executables will not be fully static. Thus, the only guarantee
+ on these systems is that the executable is statically linked with just
+ the HDF5 library.
+
+* On an SGI Altix SMP ia64 system, the Intel compiler version 10.1 (which
+ is the default on that system) does not work properly and results in
+ failures during the make check (in a static build) and the make
+ installcheck (in a shared build). This appears to be a compiler
+ optimization problem. Reducing the optimization by setting CFLAGS to
+ -O1 or below resolves the issue. Using a newer version of the compiler
+ (11.0) avoids the issue. MAM - 2010/06/01
+
+* On solaris systems, when running the examples with the scripts installed in
+ .../share/hdf5_examples, two of the c tests, h5_extlink and h5_elink_unix2win
+ may fail or generate HDF5 errors because the script commands in c/run-c-ex.sh
+ fail to create test directories red, blue, and u2w. Moving the '!' in lines
+ 67, 70, 73 of run-c-ex.sh will fix the problem. For example the script command
+ "if ! test -d red; then" will work on solaris if changed to
+ "if test ! -d red; then".
+
+
+%%%%1.8.5%%%%
+
+
+HDF5 version 1.8.5 released on Fri Jun 4 13:27:31 CDT 2010
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.4 and HDF5 1.8.5, and
+contains information on the platforms tested and known problems in HDF5-1.8.5.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt and HISTORY-1_8.txt
+in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.5 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.5 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.5 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.5 (current
+release) versus Release 1.8.4":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.4
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - CMake Early Access: This release adds support for building HDF5 using
+ the CMake system. Initial work has targeted Windows, but other platforms
+ can be used. See the CMake.TXT file for more information. Version
+ 2.8.1 of CMake is required.
+ - Configure now adds appropriate defines for supporting large (64-bit)
+ files on all systems, where supported, by default, instead of only Linux.
+ This large file support is controllable with the --enable-largefile
+ configure option. The Linux-specific --enable-linux-lfs option has been
+ deprecated in favor of this new option. Please note that specifying
+ --disable-large does NOT attempt to "turn off" largefile support if it
+ is natively supported by the compiler, but rather just disables
+ configure from actively trying to add any additional compiler flags.
+ (MAM - 2010/05/05 - Bug # 1772/1434)
+ - Fixed an signal handling mask error in H5detect that might result in
+ SIGBUS or SIGSEGV failures in some platforms such as Linux on Sparc.
+ (AKC - 2010/4/28 - Bug # 1764)
+ - Fixed various "strict aliasing" problems, allowing higher levels
+ of compiler optimization (in particular, allowing '-O3' to work
+ with recent versions of GCC). (QAK - 2010/04/26)
+ - Upgraded versions of autotools used to generate configuration suite.
+ We now use Automake 1.11.1, Autoconf 2.65, and Libtool 2.2.6b.
+ (MAM - 2010/04/15)
+ - Added the xlc-* and mpcc_r-* BASENAME patterns to be recognized as IBM
+ compilers so that the IBM compiler options can be added properly. This
+ allows non-system-default compiler command names (e.g. xlc-m.n.k.l) be
+ recognized. (AKC - 2009/11/26)
+
+ Library
+ -------
+ - Performance is substantially improved when extending a dataset with early
+ allocation. (NAF - 2010/03/24 - Bug # 1637)
+ - Added support for filtering densely stored groups. Many of the API
+ functions related to filters have been extended to support dense groups
+ as well as datasets. Pipeline messages can now be stored in a group's
+ object header. (NAF/QAK - 2009/11/3)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5dump: Added the new packed bits feature which prints packed bits stored
+ in an integer dataset. (AKC/ADB - 2010/5/7)
+ - h5diff: Fixed incorrect behavior (hang) in parallel mode when specifying
+ invalid options (ex: -v and -q). (JKM - 2010/02/17)
+ - h5diff: Added new flag --no-dangling-links (see --help for details).
+ (JKM - 2010/02/10)
+ - h5diff: Added new flag --follow-symlinks (see --help for details).
+ (JKM - 2010/01/25)
+ - h5diff: Added a fix to correct the display of garbage values when
+ displaying big-endian data on a little-endian machine. (JKM - 2009/11/20)
+
+ High-Level APIs
+ ---------------
+ - None
+
+ F90 API
+ -------
+ - None
+
+ C++ API
+ -------
+ - New member functions
+ + Overloaded CommonFG::getObjnameByIdx to take char* for name.
+ + Overloaded CommonFG::getObjTypeByIdx to return type name as a char*.
+ (BMR - 2010/05/10)
+ + Added DataSet::getInMemDataSize() to simplify getting the dataset's
+ data size in memory. (BMR - 2009/07/26)
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - AIX 6.1 has been added. (AKC - 2010/1/4)
+
+
+Bug Fixes since HDF5-1.8.4
+==========================
+
+ Configuration
+ -------------
+ - Fixed various "strict aliasing" problems, allowing higher levels
+ of compiler optimization (in particular, allowing '-O3' to work
+ with recent versions of GCC). (QAK - 2010/04/26)
+
+ Library
+ -------
+ - Fixed a file corruption bug that could happen when shrinking a compressed
+ dataset. (NAF - 2010/05/20)
+ - Fixed some memory leaks in VL datatype conversion when strings are
+ used as fill values. (MAM - 2010/05/12 - Bug # 1826)
+ - Fixed an H5Rcreate failure when passing in a -1 for the dataspace
+ identifier. (ADB - 2010/4/28)
+ - Fixed a bug when copying objects with NULL references with the
+ H5O_COPY_EXPAND_REFERENCE_FLAG flag set. (NAF - 2010/04/08 - Bug # 1815)
+ - Added a mechanism to the H5I interface to save returned object identifier
+ structures for immediate re-use if needed. This addresses a potential
+ performance issue by delaying the case when the next identifier to be
+ registered has grown so large that it wraps around and needs to be
+ checked to see whether it is available for distribution.
+ (MAM - 2010/03/15 - Bug # 1730)
+ - Files can now be concurrently opened more than once using the core file
+ driver, as long as the backing store is used. (NAF - 2010/03/09)
+ - Added support for H5O_COPY_EXPAND_EXT_LINK_FLAG to H5Ocopy. External
+ links will now be expanded if this flag is set.
+ (NAF - 2010/03/05 - Bug # 1733)
+ - Fixed a bug where the library, when traversing an external link, would
+ reopen the source file if nothing else worked. (NAF - 2010/03/05)
+ - Fixed a bug where fractal heap identifiers for attributes and shared
+ object header messages could be incorrectly encoded in the file for
+ files created on big-endian platforms.
+ Please see http://www.hdfgroup.org/HDF5/release/known_problems if you
+ suspect you have a file with this problem.
+ (QAK - 2010/02/23 - Bug # 1755)
+ - Fixed an intermittent bug in the b-tree code which could be triggered
+ by expanding and shrinking chunked datasets in certain ways.
+ (NAF - 2010/02/16)
+ - H5Tdetect_class said a VL string is a string type. But when it's in
+ a compound type, it said it's a VL type. THis has been fixed to be
+ consistent; it now always returns a string type.
+ (SLU - 2009/12/10 - Bug # 1584)
+ - Allow "child" files from external links to be correctly located when
+ relative to a "parent" file that is opened through a symbolic link.
+ (QAK - 2009/12/01)
+
+ Parallel Library
+ ----------------
+ - Parallel mode in AIX will fail some of the testcheck_version.sh tests
+ where it treats "exit(134) the same as if process 0 had received an abort
+ signal. Fixed. (AKC - 2009/11/3)
+
+ Tools
+ -----
+ - Fixed h5ls to return exit code 1 (error) when a non-existent file is
+ specified. (JKM - 2010/04/27 - Bug # 1793)
+ - Fixed h5copy failure when copying a dangling link that is specified
+ directly. (JKM - 2010/04/22 - Bug # 1817)
+ - Fixed an h5repack failure that lost attributes from a dataset of
+ reference type. (JKM - 2010/3/25 - Bug # 1726)
+ - Fixed h5repack error that set NULL for object reference values for
+ datasets, groups, or named datatypes. (JKM - 2010/03/19 - Bug # 1814)
+
+ F90 API
+ ------
+ - None
+
+ C++ API
+ ------
+ - The constructor PropList::PropList(id) was fixed to act properly
+ according to the nature of 'id'. When 'id' is a property class
+ identifier, a new property list will be created. When 'id' is a
+ property list identifier, a copy of the property list will be made.
+ (BMR - 2010/5/9)
+ - The parameters 'size' and 'bufsize' in CommonFG::getLinkval and
+ CommonFG::getComment, respectively, now have default values for the
+ user's convenience. (BMR - 2009/10/23)
+ - NULL pointer accessing was fixed. (BMR - 2009/10/05 - Bug # 1061)
+ - Read/write methods of DataSet and Attribute classes were fixed
+ to handle string correctly. (BMR - 2009/07/26)
+
+ High-Level APIs:
+ ------
+ - Fixed a bug in H5DSattach_scale, H5DSis_attached, and H5DSdetach_scale
+ caused by using the H5Tget_native_type function to determine the native
+ type for reading the REFERENCE_LIST attribute. This bug was exposed
+ on Mac PPC. (EIP - 2010/05/22 - Bug # 1851)
+ - Fixed a bug in the H5DSdetach_scale function when 0 bytes were
+ allocated after the last reference to a dimension scale was removed
+ from the list of references in a VL element of the DIMENSION_LIST
+ attribute. Modified the function to comply with the specification:
+ the DIMENSION_LIST attribute is now deleted when no dimension scales
+ are left attached. (EIP - 2010/05/14 - Bug # 1822)
+
+ Fortran High-Level APIs:
+ ------
+ - None
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ AIX 5.3 xlc 7.0.0.9, 8.0.0.20, 9.0.0.4
+ (LLNL Up) xlC 7.0.0.9, 8.0.0.20, 9.0.0.4
+ xlf 9.1.0.9, 10.1.0.9, 11.1.0.7
+ mpcc_r 7.0.0.9
+ mpxlf_r 09.01.0000.0008
+
+ AIX 6.1 xlc 10.1.0.6
+ (NCSA BP) xlC 10.1.0.6
+ xlf 12.1.0.7
+
+ Cray XT3 (2.1.56) cc (pgcc) 10.0-0
+ (SNL red storm) ftn (pgf90) 10.0-0
+ CC (pgCC) 10.0-0
+
+ FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305
+ (duty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.3.4 20090419
+ g++ 4.3.4 20090419
+ gfortran 4.3.4 20090419
+
+ FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305
+ (liberty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.4.1 20090421
+ g++ 4.4.1 20090421
+ gfortran 4.4.1 20090421
+
+ Linux 2.6.18-128.1.6.el5xen gcc (GCC) 4.1.2 20080704 and 4.4.2
+ #1 SMP i686 i686 i386 GNU Fortran (GCC) 4.1.2 20080704 and 4.4.2
+ (jam) g++ (GCC) 4.1.2 20080704 and 4.4.2
+ G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010)
+ Absoft 32-bit Fortran 95 10.0.7
+ PGI C, Fortran, C++ 10.4-0 32-bit
+ Intel(R) C, C++, Fortran Compilers for 32-bit
+ applications, Version 11.1 Build 20090827
+ MPICH mpich2-1.0.8 compiled with
+ gcc 4.1.2 and GNU Fortran (GCC) 4.1.2
+
+ Linux 2.6.18-164.el5 #1 SMP gcc 4.1.2 20080704 and gcc 4.4.2
+ x86_64 GNU/Linux GNU Fortran (GCC) 4.1.2 20080704 and 4.4.2
+ (amani) g++ (GCC) 4.1.2 20080704 and 4.4.2
+ G95 (GCC 4.0.3 (g95 0.93!) Apr 21 2010)
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64,
+ Version 11.1 Build 20090827.
+ PGI C, Fortran, C++ Version 10.4-0
+ for 32 & 64-bit target on x86-64
+ MPICH mpich2-1.0.8 compiled with
+ gcc 4.1.2 and GNU Fortran (GCC) 4.1.2
+
+ Linux 2.6.16.54-0.2.5 #1 Intel(R) C++ Version 11.0.074
+ SGI Altix SMP ia64 Intel(R) Fortran Itanium(R) Version 11.0.074
+ (cobalt) SGI MPI 1.38
+
+ SunOS 5.10 32- and 64-bit Sun C 5.9 SunOS_sparc Patch 124867-14
+ (linew) Sun Fortran 95 8.3 SunOS_sparc
+ Patch 127000-13
+ Sun C++ 5.9 SunOS_sparc Patch 124863-23
+
+ Intel Xeon Linux 2.6.18- Intel(R) C++ Version 10.0.026
+ 92.1.10.el5_lustre.1.6.6smp- Intel(R) Fortran Compiler Version 10.0.026
+ perfctr #7 SMP Open MPI 1.2.2
+ (abe) MVAPICH2-0.9.8p28p2patched-intel-ofed-1.2
+ compiled with icc v10.0.026 and ifort 10.0.026
+
+ Linux 2.6.18-76chaos #1 SMP Intel(R) C, C++, Fortran Compilers for
+ SMP x86_64 GNU/Linux applications running on Intel(R) 64,
+ (SNL Glory) Versions 11.1.
+
+ Windows XP Visual Studio 2008 w/ Intel Fortran 10.1
+ Cygwin(1.7.5 native gcc(4.3.4) compiler and
+ gfortran)
+
+ Windows XP x64 Visual Studio 2008 w/ Intel Fortran 10.1
+
+ Windows Vista Visual Studio 2008 w/ Intel Fortran 10.1
+
+ Windows Vista x64 Visual Studio 2008 w/ Intel Fortran 10.1
+
+ MAC OS 10.6.3 (Intel) i686-apple-darwin10-gcc-4.2.1 (GCC) 4.2.1
+ (pahra) GNU Fortran (GCC) 4.5.0 20090910
+ i686-apple-darwin10-g++-4.2.1 (GCC) 4.2.1
+ Intel C, C++ and Fortran compilers 11.1
+
+ MAC OS 10.5.8 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1
+ (tejeda)
+
+ MAC OS 10.5 (PPC) powerpc-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1
+ (juniper-w)
+
+ OpenVMS Alpha V8.3 HP C V7.3-009
+ HP C++ V7.3-009
+ HP Fortran V8.0-1-104669-48GBT
+
+Supported Configuration Features Summary
+========================================
+
+ In the tables below
+ y = tested and supported
+ n = not supported or not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+Solaris2.10 32-bit n y n y y y
+Solaris2.10 64-bit n y n y y y
+Windows XP n y(4) n(4) y y y
+Windows XP x64 n y(4) n(4) y y y
+Windows Vista n y(4) n(4) y y y
+Windows Vista x64 n y(4) n(4) y y y
+Mac OS X 10.5 PPC n n n n y n
+Mac OS X 10.5 Intel n y n y y y
+Mac OS X 10.6 Intel n y n y y y
+AIX 5.3 32- and 64-bit n y n y y n
+AIX 6.1 32- and 64-bit n y n y y n
+FreeBSD 6.3-STABLE 32&64 bit n y n y y y
+RedHat EL4 2.6.9-42 i686 GNU (1) W y y y y y y
+RedHat EL5 2.6.18-128 i686 GNU (1)W y y(2) y y y y
+RedHat EL5 2.6.18-128 i686 Intel W n y n y y n
+RedHat EL5 2.6.18-128 i686 PGI W n y n y y n
+SuSe Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y
+SuSe Linux 2.6.16 x86_64 Intel W n y n y y n
+SuSe Linux 2.6.16 x86_64 PGI W n y n y y y
+SuSe Linux 2.6.16 SGI Altix ia64 C y y y y y y
+RedHat EL4 2.6.18 Xeon Lustre C y y y y y n
+Cray XT3 2.1.56 y y y y y n
+OpenVMS Alpha V8.3 n y n y y n
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit y y y y
+Windows XP y y(4) y n
+Windows XP x64 y y(4) y n
+Windows Vista y y(4) y n
+Windows Vista x64 y y(4) y n
+Mac OS X 10.5 PPC y n n n
+Mac OS X 10.5 (Intel) y(5) n y n
+Mac OS X 10.6 (Intel) y(5) n y n
+AIX 5.3 32- and 64-bit n n n n
+AIX 6.1 32- and 64-bit n n n n
+FreeBSD 6.3-STABLE 32&64 bit y n y y
+RedHat EL4 2.6.9-42 i686 GNU (1) W y y y y
+RedHat EL5 2.6.18-128 i686 GNU (1)W y y(2) y y
+RedHat EL5 2.6.18-128 i686 Intel W y y y n
+RedHat EL5 2.6.18-128 i686 PGI W y y y n
+SuSe Linux 2.6.16 x86_64 GNU (1) W y y y y
+SuSe Linux 2.6.16 x86_64 Intel W y y y n
+SuSe Linux 2.6.16 x86_64 PGI W y y y n
+SuSe Linux 2.6.16 SGI Altix ia64 C y n
+RedHat EL4 2.6.18 Xeon Lustre C y y y n
+Cray XT3 2.1.56 n n n n
+OpenVMS Alpha V8.3 n n n n
+
+ (1) Fortran compiled with g95.
+ (2) With PGI and Absoft compilers.
+ (3) With PGI compiler for Fortran.
+ (4) Using Visual Studio 2008. (Cygwin shared libraries are not
+ supported.)
+ (5) Shared C and C++ are disabled when Fortran is configured in.
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* The library's test dt_arith.c exposed a compiler's rounding problem on
+ Cygwin when converting from unsigned long long to long double. The
+ library's own conversion works correctly. A macro is defined for Cygwin
+ to skip this test until we can solve the problem. (Please see bug #1813.)
+ SLU - 2010/5/5
+
+* All the VFL drivers aren't backward compatible. In H5FDpublic.h, the
+ structure H5FD_class_t changed in 1.8. There is a new parameter added to
+ get_eoa and set_eoa callback functions. A new callback function
+ get_type_map was added. The public function H5FDrealloc was taken
+ out in 1.8. The problem only happens when users define their own driver
+ for 1.6 and try to plug it into a 1.8 library. This affects a very small
+ number of users. (See bug report #1279.) SLU - 2010/2/2
+
+* MinGW has a missing libstdc++.dll.a library file and will not successfully
+ link C++ applications/tests. Do not use the enable-cxx configure option.
+ Read all of the INSTALL_MINGW.txt file for all restrictions.
+ ADB - 2009/11/11
+
+* Some tests in tools/h5repack may fail in AIX systems when -q32 mode is used.
+ The error is due to insufficient memory requested. Request a large amount
+ of runtime memory by setting the following environment variable for more
+ memory.
+ LDR_CNTRL=MAXDATA=0x20000000@DSA
+ AKC - 2009/10/31
+
+* The PathScale MPI implementation, accessing a Panasas file system, would
+ cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file is not
+ existing. This is due to the MPI_File_open() call failing if the amode has
+ the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO. CMC - 2009/04/28
+
+* For gcc v4.3 and v4.4, with production mode, if -O3 is used, H5Tinit.c
+ would fail to compile. Actually bad H5Tinit.c is produced. If -O (same
+ as -O1) is used, H5Tinit.c compiled okay but test/dt_arith would fail.
+ When -O0 (no optimizatio) is used, H5Tinit.c compilete okay and all
+ tests passed. Therefore, -O0 is imposed for v4.3 and v4.4 of gcc.
+ AKC - 2009/04/20
+
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and
+ tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests
+ are expected to fail and should exit with a non-zero code but the yod
+ command does not propagate the exit code of the executables. Yod always
+ returns 0 if it can launch the executable. The test suite shell expects
+ a non-zero for this particular test, therefore it concludes the test has
+ failed when it receives 0 from yod. Skip all the "failing" test for now
+ by changing them as following.
+
+ ======== Original tools/h5ls/testh5ls.sh =========
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Change to ===============================
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ==================================================
+
+ ======== Original tools/h5copy/testh5copy.sh =========
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ H5LSTEST $FILEOUT
+ ======== Change to ===============================
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ echo SKIP H5LSTEST $FILEOUT
+ ==================================================
+ AKC - 2008/11/10
+
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message, "yod allocation delayed for node recovery". This interferes with
+ test suites that do not expect seeing this message. See the section of "Red
+ Storm" in file INSTALL_parallel for a way to deal with this problem.
+ AKC - 2008/05/28
+
+* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ SLU - 2005/6/30
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use,"
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+
+* There is also a configure error on Altix machines that incorrectly reports
+ when a version of Szip without an encoder is being used.
+
+* On FREE-BSD systems when shared libraries are disabled, make install fails
+ in install-examples with the error '"Makefile", line 635: Need an operator'.
+ When this error occurs removing or commenting out the line "export
+ LD_LIBRARY_PATH=$(LL_PATH)" (line 635 in examples/Makefile) will allow make
+ install to finish installing examples. The problem will be fixed in the
+ next release. LRK - 2010/05/26
+
+* On cobalt, an SGI Altix SMP ia64 system, Intel compiler version 10.1 (which
+ is the default on that system) does not work properly and results in
+ failures during make check (in a static build) and make installcheck (during
+ a shared build). This appears to be a compiler optimization problem.
+ Reducing optimization by setting CFLAGS to -O1 or below resolves the issue.
+ Alternatively, using a newer version of the compiler (11.0) also works as
+ intended. MAM - 2010/06/01
+
+
+%%%%1.8.4%%%%
+
+
+HDF5 version 1.8.4 released on Tue Nov 10 15:33:14 CST 2009
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.3 and
+HDF5 1.8.4, and contains information on the platforms tested and
+known problems in HDF5-1.8.4
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.4 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.4 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.4 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.4 (current
+release) versus Release 1.8.3":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.3
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Configuration suite now uses Automake 1.11 and Autoconf 2.64.
+ MAM 2009/08/31.
+ - Changed default Gnu fortran compiler from g95 to gfortran since
+ gfortran is more likely installed with gcc now. -AKC 2009/07/19-
+
+ Library
+ -------
+ - The embedded library information is displayed by H5check_version() if a
+ version mismatch is detected. Also changed H5check_version() to
+ suppress the warning message totally if $HDF5_DISABLE_VERSION_CHECK is 2
+ or higher. (Old behavior treated 3 or higher the same as 1, that is
+ print a warning and allows the program to continue. (AKC - 2009/9/28)
+ - If a user does not care for the extra library information insert
+ in the executables, he may turn it off by --disable-embedded-libinfo
+ during configure. (AKC - 2009/9/15)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5diff: h5diff treats two INFINITY values different. Fixed by checking
+ (value==expect) before call ABS(...) at h5diff_array.c. This will make
+ that (INF==INF) is true (INF is treated as an number instead of NaN)
+ (PC -- 2009/07/28)
+ - h5diff: add option "--use-system-epsilon" to print difference if
+ (|a-b| > EPSILON).
+ Change default to use strict equality (PC -- 2009/09/12)
+
+ High-Level APIs
+ ---------------
+ - None
+
+ F90 API
+ -------
+ - Added H5Oopen_by_addr_f MSB - 9/14/09
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - PathScale compilers are recognized and can build the HDF5 library
+ properly. AKC - 2009/7/28 -
+
+
+Bug Fixes since HDF5-1.8.3
+==========================
+
+ Configuration
+ -------------
+ - Removed the following config files, as we no longer support them:
+ config/dec-osf*, config/hpux11.00, config/irix5.x,
+ config/powerpc-ibm-aix4.x config/rs6000-ibm-aix5.x config/unicos*
+ MAM - 2009/10/08
+ - Modified configure and make process to properly preserve user's CFLAGS
+ (and company) environment variables. Build will now properly use
+ automake's AM_CFLAGS for any compiler flags set by the configure
+ process. Configure will no longer modify CFLAGS directly, nor will
+ setting CFLAGS during make completely replace what configure has set up.
+ MAM - 2009/10/08
+ - Support for TFLOPS, config/intel-osf1, is removed since the TFLOPS
+ machine has long retired. AKC - 2009/10/06.
+ - Added $(EXEEXT) extension to H5detect when it's executed in the
+ src/Makefile to generate H5Tinit.c so it works correctly on platforms
+ that require the full extension when running executables.
+ MAM - 2009/10/01 - BZ #1613
+ - Configure will now set FC and CXX to "no" when fortran and c++
+ are not being compiled, respectively, so configure will not run
+ some of the compiler tests for these languages when they are not
+ being used. MAM - 2009/10/01
+ - The --enable-static-exec flag will now properly place the -static flag
+ on the link line of all installed executables. This will force the
+ executable to link with static libraries over shared libraries, provided
+ the static libraries are available. MAM - 2009/08/31 - BZ #1583
+ - The PathScale compiler (v3.2) was mistaken as gcc v4.2.0 but it fails to
+ recognize some gcc options. Fixed. (see bug 1301). AKC - 2009/7/28 -
+
+ Library
+ -------
+ - Fixed a bug where writing and deleting many global heap objects (i.e.
+ variable length data) would render the file unreadable. Previously
+ created files exhibiting this problem should now be readable.
+ NAF - 2009/10/27 - 1483
+ - Fixed error in library's internal caching mechanisms which could cause
+ an assertion failure (and attendent core dump) when encountering an
+ unusually formatted file. (QAK - 2009/10/13)
+ - Fixed incorrect return value for H5Pget_preserve. AKC - 2009/10/08 - 1628
+ - Fixed an assertion failure that occurred when H5Ocopy was called on a
+ dataset using a vlen inside a compound. NAF - 2009/10/02 - 1597
+ - Fixed incorrect return value for H5Pget_filter_by_id1/2 in H5Ppublic.h.
+ NAF - 2009/09/25 - 1620
+ - Fixed a bug where properties weren't being compared with the registered
+ compare callback. NAF - 2009/09/25 - 1555
+ - Corrected problem where library would re-write the superblock in a file
+ opened for R/W access, even when no changes were made to the file.
+ (QAK - 2009/08/20, Bz#1473)
+ - Fixed a bug where H5Pget_filter_by_id would succeed when called for a
+ filter that wasn't present. NAF - 2009/06/25 - 1250
+ - Fixed an issue with committed compound datatypes containing a vlen. Also
+ fixed memory leaks involving committed datatypes. NAF - 2009/06/10 - 1593
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5dump/h5ls display buffer resize fixed in tools library.
+ ADB - 2009/7/21 - 1520
+ - perf_serial test added to Windows projects and check batch file.
+ ADB - 2009/06/11 -1504
+
+
+ F90 API
+ ------
+ - Fixed bug in h5lget_info_by_idx_f by adding missing arguments,
+ consequently changing the API. New API is:
+
+ SUBROUTINE h5lget_info_by_idx_f(loc_id, group_name, index_field, order, n, &
+ link_type, f_corder_valid, corder, cset, address, val_size, hdferr, lapl_id)
+
+ MSB - 2009/9/17 - 1652
+
+ - Corrected the values for the H5L_flags FORTRAN constants:
+ H5L_LINK_ERROR_F, H5L_LINK_HARD_F, H5L_LINK_SOFT_F, H5L_LINK_EXTERNAL_F
+ MSB - 2009-09-17 - 1653
+
+ - Added FORTRAN equivalent of C constant H5T_ORDER_NONE: H5T_ORDER_NONE_F
+ MSB - 2009-9-24 - 1471
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+ - Fixed a bug where the H5TB API would forget the order of fields when added
+ out of offset order. NAF - 2009/10/27 - 1582
+ - H5DSis_attached failed to account for different platform types. Added a
+ get native type call. ADB - 2009/9/29 - 1562
+
+ Fortran High-Level APIs:
+ ------
+ - Lite: the h5ltread_dataset_string_f and h5ltget_attribute_string_f functions
+ had memory problems with the g95 fortran compiler. (PVN � 5/13/2009) 1522
+
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ AIX 5.3 xlc 7.0.0.8
+ (LLNL Up) xlf 09.01.0000.0008
+ xlC 7.0.0.8
+ mpcc_r 7.0.0.8
+ mpxlf_r 09.01.0000.0008
+
+ Cray XT3 (2.0.41) cc (pgcc) 7.1-4
+ (SNL red storm) ftn (pgf90) 7.1-4
+ CC (pgCC) 7.1-4
+
+ FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305
+ (duty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.3.5 20091004
+ g++ 4.3.5 20091004
+ gfortran 4.3.5 20091004
+
+ FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305
+ (liberty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.4.2 20091006
+ g++ 4.4.2 20091006
+ gfortran 4.4.2 20091006
+
+ Linux 2.6.18-164.el5 gcc (GCC) 4.1.2 20080704
+ #1 SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.92!) Jun 24 2009)
+ (jam) GNU Fortran (GCC) 4.1.2 20080704
+ (Red Hat 4.1.2-46)
+ PGI C, Fortran, C++ 8.0-5 32-bit
+ PGI C, Fortran, C++ 8.0-1 32-bit
+ Intel(R) C Compiler for 32-bit
+ applications, Versions 11.0, 11.1
+ Intel(R) C++ Compiler for 32-bit
+ applications, Version 11.0, 11.1
+ Intel(R) Fortran Compiler for 32-bit
+ applications, Version 11.0, 11.1
+ Absoft 32-bit Fortran 95 10.0.7
+ MPICH mpich2-1.0.8 compiled with
+ gcc (GCC) 4.1.2 and G95
+ (GCC 4.0.3 (g95 0.92!)
+
+ Linux 2.6.18-164.el5 #1 SMP gcc 4.1.2 20080704
+ x86_64 GNU/Linux G95 (GCC 4.0.3 (g95 0.92!) Jun 24 2009)
+ (amani) tested for both 32- and 64-bit binaries
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64,
+ Versions 11.1.
+ PGI C, Fortran, C++ Version 9.0-4
+ for 64-bit target on x86-64
+ gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!)
+ MPICH mpich2-1.0.8 compiled with
+ gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!)
+ GNU Fortran (GCC) 4.1.2 20080704
+ (Red Hat 4.1.2-46)
+
+
+ Linux 2.6.16.60-0.42.5 #1 Intel(R) C++ Version 10.1.017
+ SGI Altix SMP ia64 Intel(R) Fortran Itanium(R) Version 10.1.017
+ (cobalt) SGI MPI 1.38
+
+ SunOS 5.10 32- and 64-bit Sun C 5.9 SunOS_sparc Patch 124867-11 2009/04/30
+ (linew) Sun Fortran 95 8.3 SunOS_sparc
+ Patch 127000-11 2009/10/06
+ Sun C++ 5.9 SunOS_sparc
+ Patch 124863-16 2009/09/15
+
+ Intel Xeon Linux 2.6.18- Intel(R) C++ Version 10.0.026
+ 92.1.10.el5_lustre.1.6.6smp- Intel(R) Fortran Compiler Version 10.0.026
+ perfctr #6 SMP Open MPI 1.2.2
+ (abe) MVAPICH2-0.9.8p28p2patched-intel-ofed-1.2
+ compiled with icc v10.0.026 and ifort 10.0.026
+
+ IA-64 Linux 2.4.21-309.tg1 gcc (GCC) 3.2.2
+ #1 SMP ia64 Intel(R) C++ Version 8.1.037
+ (NCSA tg-login) Intel(R) Fortran Compiler Version 8.1.033
+ mpich-gm-1.2.7p1..16-intel-8.1.037-r1
+
+ Linux 2.6.9-55.0.9.EL_lustre Intel(R) C, C++, Fortran Compilers for
+ .1.4.11.1smp #1 SMP applications running on Intel(R) 64,
+ SMP x86_64 GNU/Linux Versions 10.1.
+ (SNL Thunderbird)
+
+ Linux 2.6.18-76chaos #1 SMP Intel(R) C, C++, Fortran Compilers for
+ SMP x86_64 GNU/Linux applications running on Intel(R) 64,
+ (SNL Glory) Versions 10.1.
+
+ Windows XP Visual Studio 2005 w/ Intel Fortran 9.1
+ Cygwin(native gcc compiler and g95)
+
+ Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1
+
+ Windows Vista Visual Studio 2005 w/ Intel Fortran 9.1
+
+ Windows Vista x64 Visual Studio 2005 w/ Intel Fortran 9.1
+
+ MAC OS 10.5.6 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1
+ GNU Fortran (GCC) 4.3.0 20070810
+ G95 (GCC 4.0.3 (g95 0.91!) Apr 24 2008)
+ Intel C, C++ and Fortran compilers 10.1
+
+
+Supported Configuration Features Summary
+========================================
+
+ In the tables below
+ y = tested and supported
+ n = not supported or not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+Solaris2.10 32-bit n y n y y y
+Solaris2.10 64-bit n y n y y y
+Windows XP n y(4) n(4) y y y
+Windows XP x64 n y(4) n(4) y y y
+Windows Vista n n n y y y
+Mac OS X 10.5 Intel n y n y y y
+AIX 5.3 32- and 64-bit n y n y y n
+FreeBSD 6.3-STABLE 32&64 bit n y n y y y
+RedHat EL5 2.6.18-164 i686 GNU (1)W y y(2) y y y y
+RedHat EL5 2.6.18-164 i686 Intel W n y n y y n
+RedHat EL5 2.6.18-164 i686 PGI W n y n y y n
+RedHat EL5 2.6.18-164 x86_64 GNU(1)W y y(3) y y y y
+RedHat EL5 2.6.18-164 x86_64 IntelW n y n y y n
+RedHat EL5 2.6.18-164 x86_64 PGI W n y n y y y
+SuSe Linux 2.6.16 SGI Altix ia64 C y y y y y y
+RedHat EL4 2.6.18 Xeon Lustre C y y y y y n
+SuSe Linux 2.4.21 ia64 Intel C y y y y y n
+Cray XT3 2.0.62 y y y y y n
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit y y y y
+Windows XP y y(4) y y
+Windows XP x64 y y(4) y y
+Windows Vista y n n y
+Mac OS X 10.5 y n y n
+AIX 5.3 32- and 64-bit n n n n
+FreeBSD 6.3-STABLE 32&64 bit y y y y
+RedHat EL5 2.6.18-164 i686 GNU (1)W y y(2) y y
+RedHat EL5 2.6.18-164 i686 Intel W y y y n
+RedHat EL5 2.6.18-164 i686 PGI W y y y n
+RedHat EL5 2.6.18-164 x86_64 GNU(1)W y y y y
+RedHat EL5 2.6.18-164 x86_64 IntelW y y y n
+RedHat EL5 2.6.18-164 x86_64 PGI W y y y n
+SuSe Linux 2.6.16 SGI Altix ia64 C y n
+RedHat EL4 2.6.18 Xeon Lustre C y y y n
+SuSe Linux 2.4.21 ia64 Intel C y y y n
+Cray XT3 2.0.62 n n n n
+
+ (1) Fortran compiled with g95.
+ (2) With PGI and Absoft compilers.
+ (3) With PGI compiler for Fortran.
+ (4) Using Visual Studio 2005 or Cygwin
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* Parallel mode in AIX will fail some of the testcheck_version.sh tests where
+ it treats "exit(134) the same as if process 0 had received an abort signal.
+ This is fixed and will be available in the next release. AKC - 2009/11/3
+
+* Some tests in tools/h5repack may fail in AIX systems when -q32 mode is used.
+ The error is due to insufficient memory requested. Request a large amount
+ of runtime memory by setting the following environment variable for more
+ memory.
+ LDR_CNTRL=MAXDATA=0x20000000@DSA
+ AKC - 2009/10/31
+
+* The PathScale MPI implementation, accessing a Panasas file system, would
+ cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file is not
+ existing. This is due to the MPI_File_open() call failing if the amode has
+ the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO. CMC - 2009/04/28
+
+* There is a known issue in which HDF5 will change the timestamp on a file
+ simply by opening it with read/write permissions, even if the file is not
+ modified in any way. This is due to the way in which HDF5 manages the file
+ superblock. A fix is currently underway and should be included in the 1.8.4
+ release of HDF5. MAM - 2009/04/28
+
+* For gcc v4.3 and v4.4, with production mode, if -O3 is used, H5Tinit.c
+ would fail to compile. Actually bad H5Tinit.c is produced. If -O (same
+ as -O1) is used, H5Tinit.c compiled okay but test/dt_arith would fail.
+ When -O0 (no optimizatio) is used, H5Tinit.c compilete okay and all
+ tests passed. Therefore, -O0 is imposed for v4.3 and v4.4 of gcc.
+ AKC - 2009/04/20
+
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and
+ tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests
+ are expected to fail and should exit with a non-zero code but the yod
+ command does not propagate the exit code of the executables. Yod always
+ returns 0 if it can launch the executable. The test suite shell expects
+ a non-zero for this particular test, therefore it concludes the test has
+ failed when it receives 0 from yod. Skip all the "failing" test for now
+ by changing them as following.
+
+ ======== Original tools/h5ls/testh5ls.sh =========
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Change to ===============================
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ==================================================
+
+ ======== Original tools/h5copy/testh5copy.sh =========
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ H5LSTEST $FILEOUT
+ ======== Change to ===============================
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ echo SKIP H5LSTEST $FILEOUT
+ ==================================================
+ AKC - 2008/11/10
+
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message, "yod allocation delayed for node recovery". This interferes with
+ test suites that do not expect seeing this message. See the section of "Red
+ Storm" in file INSTALL_parallel for a way to deal with this problem.
+ AKC - 2008/05/28
+
+* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ SLU - 2005/6/30
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use",
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+
+* There is also a configure error on Altix machines that incorrectly reports
+ when a version of Szip without an encoder is being used.
+
+%%%%1.8.3%%%%
+
+
+HDF5 version 1.8.3 released on Mon May 4 09:21:00 CDT 2009
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.2 and
+HDF5 1.8.3, and contains information on the platforms tested and
+known problems in HDF5-1.8.3.
+For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.3 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.3 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.3 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.3 (current
+release) versus Release 1.8.2":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for New Platforms, Languages, and Compilers
+- Bug Fixes since HDF5-1.8.2
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Added libtool version numbers to generated c++, fortran, and
+ hl libraries. MAM 2009/04/19.
+ - Regenerated Makefile.ins using Automake 1.10.2. MAM 2009/04/19.
+ - Added a Make target of check-all-install to test the correctness of
+ installing via the prefix= or $DESTDIR options. AKC - 2009/04/14
+
+ Library
+ -------
+ - Embed the content of libhdf5.settings into the hdf5 executables
+ so that an "orphaned" executables can display (via the Unix
+ strings command, for example) the library settings used to build
+ the executables. This is a prototype implementation. Improvement will
+ be added in next release. AKC - 2009/04/20
+ - Separated "factory" free list class from block free lists. These free
+ lists are dynamically created and manage blocks of a fixed size.
+ H5set_free_list_limits() will use the same settings specified for block
+ free lists for factory free lists. NAF - 2009/04/08
+ - Added support for dense attributes to H5Ocopy. XCao/NAF - 2009/01/29
+ - Added H5Pset_elink_cb and H5Pget_elink_cb functions to support a
+ user-defined callback function for external link traversal.
+ NAF - 2009/01/08
+ - Added H5Pset_elink_acc_flags and H5Pget_elink_acc_flags functions to
+ allow the user to specify the file access flags used to open the target
+ file of an external link. NAF - 2009/01/08
+ - Added H5Pset_chunk_cache() and H5Pget_chunk_cache() functions to allow
+ individual rdcc configuration for each dataset. Added
+ H5Dget_access_plist() function to retrieve a dataset access property
+ list from a dataset. NAF - 2008/11/12
+ - Added H5Iis_valid() function to check if an id is valid without
+ producing an error message. NAF - 2008/11/5
+ - Added code to maintain a min_clean_fraction in the metadata cache when
+ in serial mode. MAM - 2009/01/9
+
+ Parallel Library
+ ----------------
+ - Modified parallel tests to run with arbitrary number of processes. The
+ modified tests are testphdf5 (parallel dataset access), t_chunk_alloc
+ (chunk allocation), and t_posix_compliant (posix compliance). The rest of
+ the parallel tests already use in the code the number of processes
+ available in the communicator. (CMC - 2009/04/28)
+
+ Tools
+ -----
+ - h5diff new flag, -c, --compare, list objects that are not comparable.
+ PVN - 2009/4/2 - 1368
+ - h5diff new flag, -N, --nan, avoids NaNs detection. PVN - 2009/4/2
+ - h5dump correctly specifies XML dtd / schema urls ADB - 2009/4/3 - 1519
+ - h5repack now handles group creation order. PVN - 2009/4/2 - 1402
+ - h5repack: When user doesn't specify a chunk size, h5repack now
+ defines a default chunk size as the same size of the size of the
+ hyperslab used to read the chunks. The size of the hyperslabs are
+ defined as the size of each dimension or a predefined constant,
+ whatever is smaller. This assures that the chunk read fits in the
+ chunk cache. PVN - 2008/11/21
+
+ High-Level APIs
+ ---------------
+ - Table: In version 3.0 of Table, the writing of the "NROWS" attribute
+ (used to store number of records) was deprecated. PVN - 2008/11/24
+
+ F90 API
+ -------
+ - Added for the C APIs the Fortran wrappers:
+ h5dget_access_plist_f
+ h5iis_valid_f
+ h5pset_chunk_cache_f
+ h5pget_chunk_cache_f
+ MSB - 2009/04/17
+
+ C++ API
+ -------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+
+
+Bug Fixes since HDF5-1.8.2
+==========================
+
+ Configuration
+ -------------
+ - The --includedir=DIR configuration option now works as intended, and
+ can be used to specify the location to install C header files. The
+ default location remains unchanged, residing at ${prefix}/include.
+ MAM - 2009/03/10 - BZ #1381
+ - Configure no longer removes the '-g' flag from CFLAGS when in production
+ mode if it has been explicitly set in the CFLAGS environment variable
+ prior to configuration. MAM - 2009/03/09 - BZ #1401
+
+ Library
+ -------
+ - Added versioning to H5Z_class_t struct to allow compatibility with 1.6
+ API. NAF - 2009/04/20 - 1533
+ - Fixed a problem with using data transforms with non-native types in the
+ file. NAF - 2009/04/20 - 1548
+ - Added direct.h include file to windows section of H5private.h
+ to fix _getcwd() warning. ADB - 2009/04/14 - 1536
+ - Fixed a bug that prevented external links from working after calling
+ H5close(). NAF - 2009/04/10 - 1539
+ - Modified library to write cached symbol table information to the
+ superblock, to allow library versions 1.3.0 to 1.6.3 to read files created
+ by this version. NAF - 2009/04/08 - 1423
+ - Changed skip lists to use a deterministic algorithm. The library should
+ now never call rand() or srand(). NAF - 2009/04/08 - 503
+ - Fixed a bug where H5Lcopy and H5Lmove wouldn't create intermediate groups
+ when that property was set. NAF - 2009/04/07 - 1526
+ - Fixed a bug that caused files with a user block to grow by the size of the
+ user block every time they were opened. NAF - 2009/03/26 - 1499
+ - Fixed a rare problem that could occur with files using the old (pre 1.4)
+ array datatype. NAF - 2009/03/23
+ - Modified library to be able to open files with corrupt root group symbol
+ table messages, and correct these errors if they are found. Such files
+ can only be successfully opened with write access. NAF - 2009/03/23 - 1189
+ - Removed the long_long #define and replaced all instances with
+ "long long". This caused problems with third party products. All
+ currently supported compliers support the type. ADB - 2009/03/05
+ - Fixed various bugs that could prevent the fill value from being written
+ in certain rare cases. NAF - 2009/02/26 - 1469
+ - Fixed a bug that prevented more than one dataset chunk from being cached
+ at a time. NAF - 2009/02/12 - 1015
+ - Fixed an assertion failure caused by opening an attribute multiple times
+ through multiple file handles. NAF - 2009/02/12 - 1420
+ - Fixed a problem that could prevent the user from adding attributes (or any
+ object header message) in some circumstances. NAF - 2009/02/12 - 1427
+ - Fixed a bug that could cause problems when an attribute was added to a
+ committed datatype using the committed datatype's datatype.
+ NAF - 2009/02/12
+ - Fixed a bug that could cause problems when copying an object with a shared
+ message in its own object header. NAF - 2009/01/29
+ - Changed H5Tset_order to properly reject H5T_ORDER_NONE for most datatypes.
+ NAF - 2009/01/27 - 1443
+ - Fixed a bug where H5Tpack wouldn't remove trailing space from an otherwise
+ packed compound type. NAF - 2009/01/14
+ - Fixed up some old v2 btree assertions that get run in debug mode that
+ were previously failing on compilation, and removed some of the
+ more heavily outdated and non-rewritable ones. MAM - 2008/12/15
+ - Fixed a bug that could cause problems when "automatically" unmounting
+ multiple files. NAF - 2008/11/17
+ - H5Dset_extent: when shrinking dimensions, some chunks were not deleted.
+ PVN - 2009/01/8
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - Fixed many problems that could occur when using h5repack with named
+ datatypes. NAF - 2009/4/20 - 1516/1466
+ - h5dump, h5diff, h5repack were not reading (by hyperslabs) datasets
+ that have a datatype datum size greater than H5TOOLS_BUFSIZE, a constant
+ defined as 1024Kb, such as array types with large dimensions.
+ PVN - 2009/4/1 - 1501
+ - h5import: By selecting a compression type, a big endian byte order
+ was being selected. PVN - 2009/3/11 - 1462
+ - zip_perf.c had missing argument on one of the open() calls. Fixed.
+ AKC - 2008/12/9
+
+ F90 API
+ ------
+ - None
+
+ C++ API
+ ------
+ - None
+
+ High-Level APIs:
+ ------
+ - Dimension scales: The scale index return value in H5DSiterate_scales
+ was not always incremented. PVN - 2009/4/8 - 1538
+
+ Fortran High-Level APIs:
+ ------
+ - Lite: The h5ltget_dataset_info_f function (gets information about
+ a dataset) was not correctly returning the dimension array
+ PVN - 2009/3/23
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ AIX 5.3 xlc 7.0.0.8
+ (LLNL Up) xlf 09.01.0000.0008
+ xlC 7.0.0.8
+ mpcc_r 7.0.0.8
+ mpxlf_r 09.01.0000.0008
+
+ Cray XT3 (2.0.41) cc (pgcc) 7.1-4
+ (SNL red storm) ftn (pgf90) 7.1-4
+ CC (pgCC) 7.1-4
+
+ FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305
+ (duty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.3.4 20090419
+ g++ 4.3.4 20090419
+ gfortran 4.3.4 20090419
+
+ FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305
+ (liberty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.4.1 20090421
+ g++ 4.4.1 20090421
+ gfortran 4.4.1 20090421
+
+ IRIX64 6.5 (64 & n32) MIPSpro cc 7.4.4m
+ F90 MIPSpro 7.4.4m
+ C++ MIPSpro cc 7.4.4m
+
+ Linux 2.6.18-128.1.6.el5xen gcc (GCC) 4.1.2
+ #1 SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.92!) Feb 4 2009)
+ (jam) PGI C, Fortran, C++ 7.2-1 32-bit
+ PGI C, Fortran, C++ 8.0-1 32-bit
+ Intel(R) C Compiler for 32-bit
+ applications, Versions 10.1, 11.0
+ Intel(R) C++ Compiler for 32-bit
+ applications, Version 10.1, 11.0
+ Intel(R) Fortran Compiler for 32-bit
+ applications, Version 10.1, 11.0
+ Absoft 32-bit Fortran 95 10.0.7
+ MPICH mpich2-1.0.8 compiled with
+ gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!)
+
+ Linux 2.6.9-42.0.10.ELsmp #1 gcc (GCC) 3.4.6
+ SMP i686 i686 i386 G95 (GCC 4.0.3 (g95 0.92!) Feb 4 2009)
+ (kagiso) MPICH mpich2-1.0.8 compiled with
+ gcc 3.4.6 and G95 (GCC 4.0.3 (g95 0.92!)
+
+ Linux 2.6.16.60-0.37-smp #1 gcc 4.1.2
+ SMP x86_64 GNU/Linux G95 (GCC 4.0.3 (g95 0.92!) Feb 4 2009)
+ (smirom) Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64,
+ Versions 10.1, 11.0.
+ PGI C, Fortran, C++ Version 7.2-1, 8.0-1
+ for 64-bit target on x86-64
+ gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!)
+ MPICH mpich2-1.0.8 compiled with
+ gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!)
+ tested for both 32- and 64-bit binaries
+
+ Linux 2.6.16.54-0.2.5 #1 Intel(R) C++ Version 10.1.017
+ SGI Altix SMP ia64 Intel(R) Fortran Itanium(R) Version 10.1.017
+ (cobalt) SGI MPI 1.38
+
+ SunOS 5.10 32- and 64-bit Sun WorkShop 6 update 2 C 5.9 Patch 124867-09
+ (linew) Sun WorkShop 6 update 2 Fortran 95 8.3
+ Patch 127000-07
+ Sun WorkShop 6 update 2 C++ 5.8
+ Patch 124863-11
+
+ Intel Xeon Linux 2.6.18- gcc 3.4.6 20060404
+ 92.1.10.el5_lustre.1.6.6smp- Intel(R) C++ Version 10.0.026
+ perfctr #2 SMP Intel(R) Fortran Compiler Version 10.0.026
+ (abe) Open MPI 1.2.2
+ MVAPICH2-0.9.8p28p2patched-intel-ofed-1.2
+ compiled with icc v10.0.026 and ifort 10.0.026
+
+ IA-64 Linux 2.4.21-309.tg1 gcc (GCC) 3.2.2
+ #1 SMP ia64 Intel(R) C++ Version 8.1.037
+ (NCSA tg-login) Intel(R) Fortran Compiler Version 8.1.033
+ mpich-gm-1.2.7p1..16-intel-8.1.037-r1
+
+ Linux 2.6.9-55.0.9.EL_lustre Intel(R) C, C++, Fortran Compilers for
+ .1.4.11.1smp #1 SMP applications running on Intel(R) 64,
+ SMP x86_64 GNU/Linux Versions 9.1.
+ (SNL Spirit)
+
+ Linux 2.6.9-55.0.9.EL_lustre Intel(R) C, C++, Fortran Compilers for
+ .1.4.11.1smp #1 SMP applications running on Intel(R) 64,
+ SMP x86_64 GNU/Linux Versions 10.1.
+ (SNL Thunderbird)
+
+ Linux 2.6.18-63chaos #1 SMP Intel(R) C, C++, Fortran Compilers for
+ SMP x86_64 GNU/Linux applications running on Intel(R) 64,
+ (SNL Glory) Versions 10.1.
+
+ Linux 2.6.18-63chaos #1 SMP Intel(R) C, C++, Fortran Compilers for
+ SMP x86_64 GNU/Linux applications running on Intel(R) 64,
+ (LLNL Zeus) Versions 9.1.
+ gcc/gfortran/g++ (GCC) 4.1.2.
+
+ Windows XP Visual Studio .NET
+ Visual Studio 2005 w/ Intel Fortran 9.1
+ Cygwin(native gcc compiler and g95)
+
+ Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1
+
+ Windows Vista Visual Studio 2005
+
+ MAC OS 10.5.6 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1
+ GNU Fortran (GCC) 4.3.0 20070810
+ G95 (GCC 4.0.3 (g95 0.91!) Apr 24 2008)
+ Intel C, C++ and Fortran compilers 10.1
+
+
+Supported Configuration Features Summary
+========================================
+
+ In the tables below
+ y = tested and supported
+ n = not supported or not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+Solaris2.10 32-bit n y n y y y
+Solaris2.10 64-bit n y n y y y
+IRIX64_6.5 32-bit n n n n y y
+IRIX64_6.5 64-bit n y y y y y
+Windows XP n y(4) n(4) y y y
+Windows XP x64 n y(4) n(4) y y y
+Windows Vista n n n y y y
+Mac OS X 10.5 Intel n y n y y y
+AIX 5.3 32- and 64-bit n y n y y n
+FreeBSD 6.3-STABLE 32&64 bit n y n y y y
+RedHat EL4 2.6.9-42 i686 GNU (1) W y y y y y y
+RedHat EL5 2.6.18-128 i686 GNU (1)W y y(2) y y y y
+RedHat EL5 2.6.18-128 i686 Intel W n y n y y n
+RedHat EL5 2.6.18-128 i686 PGI W n y n y y n
+SuSe Linux 2.6.16 x86_64 GNU (1) W y y(3) y y y y
+SuSe Linux 2.6.16 x86_64 Intel W n y n y y n
+SuSe Linux 2.6.16 x86_64 PGI W n y n y y y
+SuSe Linux 2.6.16 SGI Altix ia64 C y y y y y y
+RedHat EL4 2.6.18 Xeon Lustre C y y y y y n
+SuSe Linux 2.4.21 ia64 Intel C y y y y y n
+Cray XT3 2.0.41 y y y y y n
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit y y y y
+IRIX64_6.5 32-bit y dna y y
+IRIX64_6.5 64-bit y y n y
+Windows XP y y(4) y y
+Windows XP x64 y y(4) y y
+Windows Vista y n n y
+Mac OS X 10.5 y n y n
+AIX 5.3 32- and 64-bit n n n n
+FreeBSD 6.3-STABLE 32&64 bit y n y y
+RedHat EL4 2.6.9-42 i686 GNU (1) W y y y y
+RedHat EL5 2.6.18-128 i686 GNU (1)W y y(2) y y
+RedHat EL5 2.6.18-128 i686 Intel W y y y n
+RedHat EL5 2.6.18-128 i686 PGI W y y y n
+SuSe Linux 2.6.16 x86_64 GNU (1) W y y y y
+SuSe Linux 2.6.16 x86_64 Intel W y y y n
+SuSe Linux 2.6.16 x86_64 PGI W y y y n
+SuSe Linux 2.6.16 SGI Altix ia64 C y n
+RedHat EL4 2.6.18 Xeon Lustre C y y y n
+SuSe Linux 2.4.21 ia64 Intel C y y y n
+Cray XT3 2.0.41 n n n n
+
+ (1) Fortran compiled with g95.
+ (2) With PGI and Absoft compilers.
+ (3) With PGI compiler for Fortran.
+ (4) Using Visual Studio 2005 or Cygwin
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO. CMC - 2009/04/28
+
+* There is a known issue in which HDF5 will change the timestamp on a file
+ simply by opening it with read/write permissions, even if the file is not
+ modified in any way. This is due to the way in which HDF5 manages the file
+ superblock. A fix is currently underway and should be included in the 1.8.4
+ release of HDF5. MAM - 2009/04/28
+
+* For gcc v4.3 and v4.4, with production mode, if -O3 is used, H5Tinit.c
+ would fail to compile. Actually bad H5Tinit.c is produced. If -O (same
+ as -O1) is used, H5Tinit.c compiled okay but test/dt_arith would fail.
+ When -O0 (no optimizatio) is used, H5Tinit.c compilete okay and all
+ tests passed. Therefore, -O0 is imposed for v4.3 and v4.4 of gcc.
+ AKC - 2009/04/20
+
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and
+ tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests
+ are expected to fail and should exit with a non-zero code but the yod
+ command does not propagate the exit code of the executables. Yod always
+ returns 0 if it can launch the executable. The test suite shell expects
+ a non-zero for this particular test, therefore it concludes the test has
+ failed when it receives 0 from yod. Skip all the "failing" test for now
+ by changing them as following.
+
+ ======== Original tools/h5ls/testh5ls.sh =========
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Change to ===============================
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ==================================================
+
+ ======== Original tools/h5copy/testh5copy.sh =========
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ H5LSTEST $FILEOUT
+ ======== Change to ===============================
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ echo SKIP H5LSTEST $FILEOUT
+ ==================================================
+ AKC - 2008/11/10
+
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message, "yod allocation delayed for node recovery". This interferes with
+ test suites that do not expect seeing this message. See the section of "Red
+ Storm" in file INSTALL_parallel for a way to deal with this problem.
+ AKC - 2008/05/28
+
+* We have discovered two problems when running collective IO parallel HDF5
+ tests with chunking storage on the ChaMPIon MPI compiler on tungsten, a
+ Linux cluster at NCSA.
+
+ Under some complex selection cases:
+ 1) MPI_Get_element returns the wrong value.
+ 2) MPI_Type_struct also generates the wrong derived datatype and corrupt
+ data may be generated.
+ These issues arise only when turning on collective IO with chunking storage
+ with some complex selections. We have not found these problems on other
+ MPI-IO compilers. If you encounter these problems, you may use independent
+ IO instead.
+
+ To avoid this behavior, change the following line in your code
+ H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ to
+ H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT);
+ KY - 2007/08/24
+
+* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* For LLNL, uP: both serial and parallel tests pass.
+ Zeus: Serial tests pass but parallel tests fail with a known problem in MPI.
+ ubgl: Serial tests pass but parallel tests fail.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* On IRIX6.5, when the C compiler version is greater than 7.4, complicated
+ MPI derived datatype code will work. However, the user should increase
+ the value of the MPI_TYPE_MAX environment variable to some appropriate value
+ to use collective irregular selection code. For example, the current
+ parallel HDF5 test needs to raise MPI_TYPE_MAX to 200,000 to pass the test.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ SLU - 2005/6/30
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use",
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+
+* The --enable-static-exec configure flag fails to compile for Solaris
+ platforms. This is due to the fact that not all of the system libraries on
+ Solaris are available in a static format.
+
+ The --enable-static-exec configure flag also fails to correctly compile
+ on IBM SP2 platforms for serial mode. The parallel mode works fine with
+ this option.
+
+ It is suggested that you do not use this option on these platforms
+ during configuration.
+
+* There is also a configure error on Altix machines that incorrectly reports
+ when a version of Szip without an encoder is being used.
+
+* Information about building with PGI and Intel compilers is available in
+ the INSTALL file sections 4.7 and 4.8.
+
+
+%%%%1.8.2%%%%
+
+
+HDF5 version 1.8.2 released on Mon Nov 10 15:43:09 CST 2008
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between HDF5-1.8.1 and HDF5 1.8.2,
+and contains information on the platforms tested and known problems in
+HDF5-1.8.2. For more details, see the files HISTORY-1_0-1_8_0_rc3.txt
+and HISTORY-1_8.txt in the release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.2 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.2 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.2 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.2 (current
+release) versus Release 1.8.1":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for new platforms and languages
+- Bug Fixes since HDF5-1.8.1
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - Upgraded libtool to version 2.2.6a. (MAM - 2008/10/15).
+
+ Library
+ -------
+ - Added two new public routines: H5Pget_elink_fapl() and
+ H5Pset_elink_fapl(). (see bug #1247) (VC - 2008/10/13)
+ - Improved free space tracking in file to be faster. (QAK - 2008/10/06)
+ - Added 'mounted' field to H5G_info_t struct. (QAK - 2008/07/15)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5repack: added new options -u and -b to add a userblock to an HDF5
+ file during the repack. (PVN - 2008/08/26)
+ - h5repack: added options -t and -a to call H5Pset_alignment while
+ creating a repacked file. (PVN - 2008/08/29)
+ - h5ls: added capability to traverse through external links when the -r
+ (recursive) flag is given. (NAF - 2008/09/16)
+ - h5ls: added -E option to enable traversal of external links.
+ h5ls will not traverse external links without this flag being set.
+ (NAF - 2008/10/06)
+ - h5dump: when -b flag is used without a keyword after it, binary
+ output defaults to NATIVE. MEMORY keyword was deprecated
+ and replaced by NATIVE keyword. (PVN - 2008/10/30)
+ - h5diff: returns 1 when file graphs differ by any object.
+ Error return code was changed to 2 from -1. (PVN - 2008/10/30)
+ - h5import: TEXTFPE (scientific format) was deprecated. Use TEXTFP
+ instead (PVN - 2008/10/30)
+
+
+
+ F90 API
+ ------
+ - Added optional parameter 'mounted' to H5Gget_info_f,
+ H5Gget_info_by_idx_f, H5Gget_info_by_name_f (MSB - 2008/09/24)
+ - Added H5Tget_native_type_f (MSB - 2008/09/30)
+
+
+ C++ API
+ ------
+ - These member functions were added as wrapper for H5Rdereference to
+ replace the incorrect IdComponent::dereference().
+ void H5Object::dereference(H5Object& obj, void* ref,
+ H5R_type_t ref_type=H5R_OBJECT)
+ void H5Object::dereference(H5File& h5file, void* ref,
+ H5R_type_t ref_type=H5R_OBJECT)
+ void H5Object::dereference(Attribute& obj, void* ref,
+ H5R_type_t ref_type=H5R_OBJECT)
+
+ In addition, these constructors were added to create the associated
+ objects by way of dereference:
+ DataSet(H5Object& obj, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ DataSet(H5File& file, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ DataSet(Attribute& attr, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ Group(H5Object& obj, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ Group(H5File& obj, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ Group(Attribute& attr, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ DataType(H5Object& obj, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ DataType(H5File& file, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ DataType(Attribute& attr, void* ref, H5R_type_t ref_type=H5R_OBJECT)
+ (BMR - 2008/10/29)
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - Intel 10.1 is supported on Mac OS X 10.5.4.
+ Note:
+ When Fortran is enabled, configure automatically
+ disables the build of shared libraries (i.e., only
+ static C and C++ HDF5 libraries will be built
+ along with the static HDF5 Fortran library).
+ Intel 10.1 C and C++ compilers require
+ "-no-multibyte-chars" compilation flag due to the known
+ bug in the compilers.
+ (EIP - 2008/10/30)
+
+
+Bug Fixes since HDF5-1.8.1
+==========================
+
+ Configuration
+ -------------
+ - Fixed error with 'make check install' failing due to h5dump
+ needing other tools built first. (MAM - 2008/10/15).
+ - When using shared szip, it is no longer necessary to specify
+ the path to the shared szip libraries in LD_LIBRARY_PATH.
+ (MAM - 2008/10/15).
+ - The file libhdf5_fortran.settings is not installed since its content
+ is included in libhdf5.settings now. (AKC - 2008/10/21)
+ - "make DESTDIR=xxx install" failed to install some tools and files
+ (e.g., h5cc and fortran modules). Fixed. (AKC - 2008/10/8).
+
+ Library
+ -------
+ - H5Ovisit and H5Ovisit_by_name will now properly terminate when the
+ callback function returns a positive value on the starting object.
+ (NAF - 2008/11/03)
+ - Fixed an error where a null message could be created that was larger
+ than could be written to the file. (NAF - 2008/10/23)
+ - Corrected error with family/split/multi VFD not updating driver info
+ when "latest" version of the file format used. (QAK - 2008/10/14)
+ - Corrected alignment+threshold errors to work correctly when metadata
+ aggregation is enabled. (QAK - 2008/10/06)
+ - Changed H5Fget_obj_count and H5Fget_obj_ids to ignore objects
+ registered by the library for internal library use.
+ (NAF - 2008/10/06)
+ - Fixed potential memory leak during compound conversion.
+ (NAF - 2008/10/06)
+ - Changed the return value of H5Fget_obj_count from INT to SSIZE_T.
+ Also changed the return value of H5Fget_obj_ids from HERR_T to
+ SSIZE_T and the type of the parameter MAX_OBJS from INT to SIZE_T.
+ (SLU - 2008/09/26)
+ - Fixed an issue that could cause data to be improperly overwritten
+ during compound type conversion. (NAF - 2008/09/19)
+ - Fixed pointer alignment violations that could occur during vlen
+ conversion. (NAF - 2008/09/16)
+ - Fixed problem where library could cause a segmentation fault when
+ an invalid location ID was given to H5Giterate(). (QAK - 2008/08/19)
+ - Fixed improper shutdown when objects have reference count > 1. The
+ library now tracks reference count due to the application separately
+ from that due to internal library routines. (NAF - 2008/08/19)
+ - Fixed assertion failure caused by incorrect array datatype version.
+ (NAF - 2008/08/08)
+ - Fixed an issue where mount point traversal would fail when using
+ multiple handles for the child. (NAF - 2008/08/07)
+ - Fixed an issue where mount points were inaccessible when using
+ multiple file handles for the parent. The mount table is now in
+ the shared file structure (the parent pointer is still in the
+ top structure). (NAF - 2008/08/07)
+ - Fixed assertion failure caused by incorrect array datatype version.
+ (NAF - 2008/08/04)
+ - Fixed issue where a group could have a file mounted on it twice.
+ (QAK - 2008/07/15)
+ - When an attribute was opened twice and data was written with
+ one of the handles, the file didn't have the data. It happened
+ because each handle had its own object structure, and the empty
+ one overwrote the data with fill value. This is fixed by making
+ some attribute information like the data be shared in the
+ attribute structure. (SLU - 2008/07/07)
+ - Fixed a Windows-specific issue in the ohdr test which was causing
+ users in some timezones to get false errors. This a deficiency in
+ the Windows mktime() function, and has been handled properly.
+ (SJW - 2008/06/19)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5dump now checks for uniqueness of committed datatypes.
+ (NAF - 2008/10/15)
+ - Fixed unnecessary indentation of committed datatypes in h5dump.
+ (NAF - 2008/10/15)
+ - Fixed bugs in h5stat: segmemtation fault when printing groups and
+ print warning message when traversal of objects is unsuccessful.
+ (see bug #1253) (VC- 2008/10/13)
+ - Fixed bug in h5ls that prevented relative group listings (like
+ "h5ls foo.h5/bar") from working correctly (QAK - 2008/06/03)
+ - h5dump: when doing binary output (-b), the stdout printing of
+ attributes was done incorrectly. Removed printing of attributes
+ when doing binary output. (PVN - 2008/06/05)
+
+
+ F90 API
+ ------
+ - h5sselect_elements_f: Added additional operators H5S_SELECT_APPEND
+ and H5S_SELECT_PREPEND (MSB - 2008/09/30)
+ - h5sget_select_elem_pointlist: Fixed list of returned points by
+ rearranging the point list correctly by accounting for C
+ conventions. (MSB - 2008/09/30)
+ - h5sget_select_hyper_blocklist_f: Fixed error in transposed dimension
+ of arrays.(MSB - 2008/9/30)
+ - h5sget_select_bounds_f: Swapped array bounds to account for C and
+ Fortran reversed array notation (MSB - 2008/9/30)
+ - Changed to initializing string to a blank character instead of a
+ null type in tH5P.f90 to fix compiling error using AIX 5.3.0
+ (MSB - 2008/7/29)
+ - Fixed missing commas in H5test_kind.f90 detected by NAG compiler
+ (MSB - 2008/7/29)
+ - Fixed passing and array to a scalar in tH5A_1_8.f90 detected by
+ NAG compiler (MSB - 2008/7/29)
+ - Added the ability of the test programs to use the status of
+ HDF5_NOCLEANUP to determine if the *.h5 files should be removed
+ or not after the tests are completed (MSB - 2008/10/1)
+ - In nh5tget_offset_c: (MSB 9/12/2008)
+ If offset was equal to 0 it returned the error code of -1,
+ this was changed to return an error code of -1 when the offset
+ value is < 0.
+ - Uses intrinsic Fortran function SIZEOF if available when detecting
+ type of INTEGERs and REALs in H5test_kind.f90 (MSB - 2008/9/3)
+ - Put the DOUBLE PRECISION interfaces in a separate module and
+ added a USE statement for the module. The interfaces are
+ included/excluded depending on the state of FORTRAN_DEFAULT_REAL
+ is DBLE_F which detects if the default REAL is DOUBLE PRECISION.
+ This allows the library to be compiled with -r8 Fortran flag
+ without the user needing to edit the source code.
+ (MSB - 200/8/27)
+ - Enable building shared library for fortran by adding the flag -fPIC
+ to the compile flags for versions of Intel Fortran compiler >=9
+ (MSB - 2008/8/26)
+
+ C++ API
+ ------
+ - Fixed a design bug which allowed an Attribute object to create/modify
+ attributes (bugzilla #1068). The API class hierarchy was revised
+ to address the problem. Classes AbstractDS and Attribute are moved
+ out of H5Object. Class Attribute now multiply inherits from
+ IdComponent and AbstractDs and class DataSet from H5Object and
+ AbstractDs. In addition, the data member IdComponent::id was
+ moved into subclasses: Attribute, DataSet, DataSpace, DataType,
+ H5File, Group, and PropList. (BMR - 2008/05/20)
+ - IdComponent::dereference was incorrect and replaced as described
+ in "New Features" section.
+ (BMR - 2008/10/29)
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ AIX 5.3 xlc 7.0.0.8
+ xlf 09.01.0000.0008
+ xlC 7.0.0.8
+ mpcc_r 7.0.0.8
+ mpxlf_r 09.01.0000.0008
+
+ Cray XT3 (2.0.41) cc (pgcc) 7.1-4
+ (red storm) ftn (pgf90) 7.1-4
+ CC (pgCC) 7.1-4
+
+ FreeBSD 6.3-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305
+ (duty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.2.5 20080702
+ g++ 4.2.5 20080702
+ gfortran 4.2.5 20080702
+
+ FreeBSD 6.3-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305
+ (liberty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.2.5 20080702
+ g++ 4.2.5 20080702
+ gfortran 4.2.5 20080702
+
+ IRIX64 6.5 (64 & n32) MIPSpro cc 7.4.4m
+ F90 MIPSpro 7.4.4m
+ C++ MIPSpro cc 7.4.4m
+
+ Linux 2.6.9-42.0.10.ELsmp #1 gcc (GCC) 3.4.6
+ SMP i686 i386 G95 (GCC 4.0.3 (g95 0.92!) April 18 2007)
+ (kagiso) PGI C, Fortran, C++ 7.2-1 32-bit
+ Intel(R) C Compiler for 32-bit
+ applications, Version 10.1
+ Intel(R) C++ Compiler for 32-bit
+ applications, Version 10.1
+ Intel(R) Fortran Compiler for 32-bit
+ applications, Version 10.1
+ Absoft 32-bit Fortran 95 10.0.4
+ MPICH mpich-1.2.7 compiled with
+ gcc 3.4.6 and G95 (GCC 4.0.3 (g95 0.92!)
+ MPICH mpich2-1.0.6p1 compiled with
+ gcc 3.4.6 and G95 (GCC 4.0.3 (g95 0.92!)
+
+ Linux 2.6.16.46-0.14-smp #1 Intel(R) C++ for Intel(R) EM64T
+ SMP x86_64 GNU/Linux Ver. 10.1.013
+ (smirom) Intel(R) Fortran Intel(R) EM64T
+ Ver. 10.1.013
+ PGI C, Fortran, C++ Version 7.2-1
+ for 64-bit target on x86-64
+ MPICH mpich-1.2.7 compiled with
+ gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!)
+ MPICH mpich2-1.0.7 compiled with
+ gcc 4.1.2 and G95 (GCC 4.0.3 (g95 0.92!)
+ tested for both 32- and 64-bit binaries
+
+ Linux 2.6.16.54-0.2.5 #1 Intel(R) C++ Version 10.1.017
+ Altix SMP ia64 Intel(R) Fortran Itanium(R) Version 10.1.017
+ (cobalt) SGI MPI 1.16
+
+ SunOS 5.10 32- and 64-bit Sun WorkShop 6 update 2 C 5.8
+ (linew) Sun WorkShop 6 update 2 Fortran 95 8.2
+ Sun WorkShop 6 update 2 C++ 5.8
+ Patch 121019-06
+
+ Xeon Linux 2.6.9-42.0.10.EL_lustre-1.4.10.1smp
+ (abe) Intel(R) C++ Version 10.0.026
+ Intel(R) Fortran Compiler Version 10.0.026
+ Open MPI 1.2.2
+ MVAPICH2-0.9.8p28p2patched-intel-ofed-1.2
+ compiled with icc v10.0.026 and
+ ifort 10.0.026
+
+ IA-64 Linux 2.4.21-309.tg1 #1 SMP
+ ia64 gcc (GCC) 3.2.2
+ (NCSA tg-login) Intel(R) C++ Version 8.1.037
+ Intel(R) Fortran Compiler Version 8.1.033
+ mpich-gm-1.2.7p1..16-intel-8.1.037-r1
+
+ Intel 64 Linux 2.6.9-42.0.10.EL_lustre-1.4.10.1smp
+ (abe) gcc 3.4.6 20060404
+ Intel(R) C++ Version 10.0
+ Intel (R) Fortran Compiler Version 10.0
+ mvapich2-0.9.8p2patched-intel-ofed-1.2
+
+ Windows XP Visual Studio .NET
+ Visual Studio 2005 w/ Intel Fortran 9.1
+ Cygwin(native gcc compiler and g95)
+
+ Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1
+
+ Windows Vista Visual Studio 2005
+
+ MAC OS 10.5.4 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1
+ GNU Fortran (GCC) 4.3.0 20070810
+ G95 (GCC 4.0.3 (g95 0.91!) Apr 24 2008)
+ Intel C, C++ and Fortran compilers 10.1
+
+
+Supported Configuration Features Summary
+========================================
+
+ In the tables below
+ y = tested and supported
+ n = not supported or not tested in this release
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+Solaris2.10 32-bit n y n y y y
+Solaris2.10 64-bit n y n y y y
+IRIX64_6.5 32-bit n n n n y y
+IRIX64_6.5 64-bit n y y y y y
+Windows XP n y(15) n(15) y y y
+Windows XP x64 n y(15) n(15) y y y
+Windows Vista n n n y y y
+Mac OS X 10.5 Intel n y n y y y
+AIX 5.3 32- and 64-bit n y n y y n
+FreeBSD 6.3-STABLE
+32&64 bit n y n y y y
+RedHat EL4 (3) W y(1) y(10) y(1) y y y
+RedHat EL4 Intel (3) W n y n y y n
+RedHat EL4 PGI (3) W n y n y y n
+SuSe x86_64 gcc(3,12) W y(2) y(11) y(2) y y y
+SuSe x86_64 Int(3,12) W n y(13) n y y n
+SuSe x86_64 PGI(3,12) W n y(8) n y y y
+Linux 2.6 SuSE ia64 C
+ Intel (3,7) y y y y y n
+Linux 2.6 SGI Altix
+ ia64 Intel (3) y y y y y y
+Linux 2.6 RHEL C
+ Lustre Intel (5) y(4) y y(4) y y n
+Cray XT3 2.0.41 y y y y y n
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 32-bit y y y y
+Solaris2.10 64-bit y y y y
+IRIX64_6.5 32-bit y dna y y
+IRIX64_6.5 64-bit y y n y
+Windows XP y y(15) y y
+Windows XP x64 y y(15) y y
+Windows Vista y n n y
+Mac OS X 10.5 y n y n
+AIX 5.3 32- and 64-bit n n n n
+FreeBSD 6.2 32&64 bit y n y y
+RedHat EL4 (3) W y y(10) y y
+RedHat EL4 Intel (3) W y y y n
+RedHat EL4 PGI (3) W y y y n
+SuSe x86_64 GNU(3,12) W y y y y
+SuSe x86_64 Int(3,12) W y y y n
+SuSe x86_64 PGI(3,12) W y y y n
+Linux 2.4 SuSE C
+ ia64 C Intel (7) y y y n
+Linux 2.4 SGI Altix C
+ ia64 Intel y n
+Linux 2.6 RHEL C
+ Lustre Intel (5) y y y n
+Cray XT3 2.0.41 n n n n
+
+ Notes: (1) Using mpich2 1.0.6.
+ (2) Using mpich2 1.0.7.
+ (3) Linux 2.6 with GNU, Intel, and PGI compilers, as indicated.
+ W or C indicates workstation or cluster, respectively.
+ (4) Using mvapich2 0.9.8.
+ (5) Linux 2.6.9-42.0.10. Xeon cluster with ELsmp_perfctr_lustre
+ and Intel compilers
+ (6) Linux 2.4.21-32.0.1. Xeon cluster with ELsmp_perfctr_lustre
+ and Intel compilers
+ (7) Linux 2.4.21, SuSE_292.till. Ia64 cluster with Intel compilers
+ (8) pgf90
+ (9) With Compaq Visual Fortran 6.6c compiler.
+ (10) With PGI and Absoft compilers.
+ (11) PGI and Intel compilers for both C and Fortran
+ (12) AMD Opteron x86_64
+ (13) ifort
+ (14) Yes with C and Fortran, but not with C++
+ (15) Using Visual Studio 2005 or Cygwin
+ (16) Not tested for this release.
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh and
+ tools/h5copy/testh5copy.sh will fail some of its sub-tests. These sub-tests
+ are expected to fail and should exit with a non-zero code but the yod
+ command does not propagate the exit code of the executables. Yod always
+ returns 0 if it can launch the executable. The test suite shell expects
+ a non-zero for this particular test, therefore it concludes the test has
+ failed when it receives 0 from yod. Skip all the "failing" test for now
+ by changing them as following.
+
+ ======== Original tools/h5ls/testh5ls.sh =========
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Change to ===============================
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ==================================================
+
+ ======== Original tools/h5copy/testh5copy.sh =========
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ H5LSTEST $FILEOUT
+ ======== Change to ===============================
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d grp_rename
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -v -s grp_dsets -d /grp_rename/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_dsets -d /E/F/grp_dsets
+ echo SKIP TOOLTEST_FAIL -i $TESTFILE -o $FILEOUT -vp -s /grp_nested -d /G/H/grp_nested
+ echo SKIP H5LSTEST $FILEOUT
+ ==================================================
+ AKC - 2008/11/10
+
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message, "yod allocation delayed for node recovery". This interferes with
+ test suites that do not expect seeing this message. See the section of "Red
+ Storm" in file INSTALL_parallel for a way to deal with this problem.
+ AKC - 2008/05/28
+
+* We have discovered two problems when running collective IO parallel HDF5
+ tests with chunking storage on the ChaMPIon MPI compiler on tungsten, a
+ Linux cluster at NCSA.
+
+ Under some complex selection cases:
+ 1) MPI_Get_element returns the wrong value.
+ 2) MPI_Type_struct also generates the wrong derived datatype and corrupt
+ data may be generated.
+ These issues arise only when turning on collective IO with chunking storage
+ with some complex selections. We have not found these problems on other
+ MPI-IO compilers. If you encounter these problems, you may use independent
+ IO instead.
+
+ To avoid this behavior, change the following line in your code
+ H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ to
+ H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT);
+
+ KY - 2007/08/24
+
+* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* For LLNL, uP: both serial and parallel tests pass.
+ Zeus: Serial tests pass but parallel tests fail with a known problem in MPI.
+ ubgl: Serial tests pass but parallel tests fail.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* On IRIX6.5, when the C compiler version is greater than 7.4, complicated
+ MPI derived datatype code will work. However, the user should increase
+ the value of the MPI_TYPE_MAX environment variable to some appropriate value
+ to use collective irregular selection code. For example, the current
+ parallel HDF5 test needs to raise MPI_TYPE_MAX to 200,000 to pass the test.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ SLU - 2005/6/30
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use",
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+
+* The --enable-static-exec configure flag fails to compile for Solaris
+ platforms. This is due to the fact that not all of the system libraries on
+ Solaris are available in a static format.
+
+ The --enable-static-exec configure flag also fails to correctly compile
+ on IBM SP2 platforms for serial mode. The parallel mode works fine with
+ this option.
+
+ It is suggested that you do not use this option on these platforms
+ during configuration.
+
+* There is also a configure error on Altix machines that incorrectly reports
+ when a version of Szip without an encoder is being used.
+
+* Information about building with PGI and Intel compilers is available in
+ the INSTALL file sections 4.7 and 4.8.
+
+
+
+
+%%%%1.8.1%%%%
+
+
+HDF5 version 1.8.1 released on Thu May 29 15:28:55 CDT 2008
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between the HDF5-1.8.1 release
+and HDF5 1.8.0, and contains information on the platforms tested and known
+problems in HDF5-1.8.1. For more details, see the files
+HISTORY-1_0-1_8_0_rc3.txt and HISTORY-1_8.txt in the release_docs/ directory
+of the HDF5 source.
+
+Links to the HDF5 1.8.1 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.1 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.1 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.8.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "What's New
+in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.1 (current
+release) versus Release 1.8.0":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Support for new platforms and languages
+- Bug Fixes since HDF5-1.8.0
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ Configuration
+ -------------
+ - The lib/libhdf5.settings file contains much more configure
+ information. (AKC - 2008/05/18)
+
+ - The new configure option "--disable-sharedlib-rpath" disables
+ embedding the '-Wl,-rpath' information into executables when
+ shared libraries are produced, and instead solely relies on the
+ information in LD_LIBRARY_PATH. (MAM - 2008/05/15)
+
+ - Configuration suite now uses Autoconf 2.61, Automake 1.10.1, and
+ Libtool 2.2.2 (MAM - 2008/05/01)
+
+ Source code distribution
+ ========================
+
+ Library
+ -------
+ - None
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - h5repack: Reinstated the -i and -o command line flags to specify
+ input and output files. h5repack now understands both the old
+ syntax (with -i and -o) and the new syntax introduced in Release
+ 1.8.0. (PVN - 2008/05/23)
+ - h5dump: Added support for external links, displaying the object that
+ an external link points to. (PVN - 2008/05/12)
+ - h5dump: Added an option, -m, to allow user-defined formatting in the
+ output of floating point numbers. (PVN - 2008/05/06)
+ - h5dump, in output of the -p option: Added effective data compression
+ ratio to the dataset storage layout output when a compression filter
+ has been applied to a dataset. (PVN - 2008/05/01)
+
+ F90 API
+ ------
+ New H5A, H5G, H5L, H5O, and H5P APIs to enable 1.8 features were
+ added. See "Release 1.8.1 (current release) versus Release 1.8.0" in
+ the document "HDF5 Software Changes from Release to Release"
+ (http://hdfgroup.org/HDF5/doc/ADGuide/Changes.html) for the
+ complete list of the new APIs.
+
+ C++ API
+ ------
+ - None
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - Both serial and parallel HDF5 are supported for the Red Storm machine
+ which is a Cray XT3 system.
+
+ - The Fortran library will work correctly if compiled with the -i8
+ flag. This has been tested with the g95, PGI and Intel Fortran
+ compilers.
+
+
+Bug Fixes since HDF5-1.8.0
+==========================
+
+ Configuration
+ -------------
+ - None
+
+ Source code distribution
+ ========================
+
+ Library
+ -------
+ - Chunking: Chunks greater than 4GB are disallowed.
+ (QAK - 2008/05/16)
+ - Fixed the problem with searching for a target file when following
+ an external link. The search pattern will depend on whether the
+ target file's pathname is an absolute or a relative path.
+ Please see the H5Lcreate_external description in the "HDF5
+ Reference Manual" (http://hdfgroup.org/HDF5/doc/RM/RM_H5L.html).
+ (VC - 2008/04/08)
+ - Fixed possible file corruption bug when encoding datatype
+ descriptions for compound datatypes whose size was between
+ 256 and 511 bytes and the file was opened with the "use the
+ latest format" property enabled (with H5Pset_libver_bounds).
+ (QAK - 2008/03/13)
+ - Fixed bug in H5Aget_num_attrs() routine to correctly handle an
+ invalid location identifier. (QAK - 2008/03/11)
+
+ Parallel Library
+ ----------------
+ - None
+
+ Tools
+ -----
+ - Fixed bug in h5diff that prevented datasets and attributes with
+ variable-length string elements from comparing correctly.
+ (QAK - 2008/02/28)
+ - Fixed bug in h5dump that caused binary output to be made only for
+ the first dataset, when several datasets were requested.
+ (PVN - 2008/04/07)
+
+ F90 API
+ ------
+ - The h5tset(get)_fields subroutines were missing the parameter to
+ specify a sign position; fixed. (EIP - 2008/05/23)
+ - Many APIs were fixed to work with the 8-byte integers in Fortran vs.
+ 4-byte integers in C. This change is trasparent to user applications.
+
+ C++ API
+ ------
+ - The class hierarchy was revised to address the problem reported
+ in bugzilla #1068, Attribute should not be derived from base
+ class H5Object. Classes AbstractDS was moved out of H5Object.
+ Class Attribute now multiply inherits from IdComponent and
+ AbstractDs and class DataSet from H5Object and AbstractDs.
+ In addition, data member IdComponent::id was moved into subclasses:
+ Attribute, DataSet, DataSpace, DataType, H5File, Group, and PropList.
+ (BMR - 2008/05/20)
+ - IdComponent::dereference was incorrect; it was changed from:
+ void IdComponent::dereference(IdComponent& obj, void* ref)
+ to:
+ void H5Object::dereference(H5File& h5file, void* ref)
+ void H5Object::dereference(H5Object& obj, void* ref)
+ (BMR - 2008/05/20)
+ - Revised Attribute::write and Attribute::read wrappers to handle
+ memory allocation/deallocation properly. (bugzilla 1045)
+ (BMR - 2008/05/20)
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for this release.
+
+ Cray XT3 (2.0.41) cc (pgcc) 7.1-4
+ (red storm) ftn (pgf90) 7.1-4
+ CC (pgCC) 7.1-4
+ mpicc 1.0.2
+ mpif90 1.0.2
+
+ FreeBSD 6.2-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305
+ (duty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.2.1 20080123
+ g++ 4.2.1 20080123
+ gfortran 4.2.1 20070620
+
+ FreeBSD 6.2-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305
+ (liberty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.2.1 20080123
+ g++ 4.2.1 20080123
+ gfortran 4.2.1 20080123
+
+ IRIX64 6.5 (64 & n32) MIPSpro cc 7.4.4m
+ F90 MIPSpro 7.4.4m
+ C++ MIPSpro cc 7.4.4m
+
+ Linux 2.6.9 (RHEL4) Intel 10.0 compilers
+ (abe.ncsa.uiuc.edu)
+
+ Linux 2.4.21-47 gcc 3.2.3 20030502
+ (osage)
+
+ Linux 2.6.9-42.0.10 gcc,g++ 3.4.6 20060404, G95 (GCC 4.0.3)
+ (kagiso) PGI 7.1-6 (pgcc, pgf90, pgCC)
+ Intel 9.1 (icc, ifort, icpc)
+
+ Linux 2.6.16.27 x86_64 AMD gcc 4.1.0 (SuSE Linux), g++ 4.1.0,
+ (smirom) g95 (GCC 4.0.3)
+ PGI 7.1-6 (pgcc, pgf90, pgCC)
+ Intel 9.1 (icc, ifort, icpc)
+
+ Linux 2.6.5-7.252.1-rtgfx #1 Intel(R) C++ Version 9.0
+ SMP ia64 Intel(R) Fortran Itanium(R) Version 9.0
+ (cobalt) SGI MPI
+
+ SunOS 5.8 32,46 Sun WorkShop 6 update 2 C 5.3
+ (Solaris 2.8) Sun WorkShop 6 update 2 Fortran 95 6.2
+ Sun WorkShop 6 update 2 C++ 5.3
+
+ SunOS 5.10 cc: Sun C 5.8
+ (linew) f90: Sun Fortran 95 8.2
+ CC: Sun C++ 5.8
+
+ Xeon Linux 2.4.21-32.0.1.ELsmp-perfctr-lustre
+ (tungsten) gcc 3.2.2 20030222
+ Intel(R) C++ Version 9.0
+ Intel(R) Fortran Compiler Version 9.0
+
+ IA-64 Linux 2.4.21.SuSE_309.tg1 ia64
+ (NCSA tg-login) gcc 3.2.2
+ Intel(R) C++ Version 8.1
+ Intel(R) Fortran Compiler Version 8.1
+ mpich-gm-1.2.6..14b-intel-r2
+
+ Intel 64 Linux 2.6.9-42.0.10.EL_lustre-1.4.10.1smp
+ (abe) gcc 3.4.6 20060404
+ Intel(R) C++ Version 10.0
+ Intel (R) Fortran Compiler Version 10.0
+ mvapich2-0.9.8p2patched-intel-ofed-1.2
+
+ Windows XP Visual Studio .NET
+ Visual Studio 2005 w/ Intel Fortran 9.1
+ Cygwin(native gcc compiler and g95)
+ MinGW(native gcc compiler and g95)
+
+ Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1
+
+ Windows Vista Visual Studio 2005
+
+ MAC OS 10.5.2 (Intel) i686-apple-darwin9-gcc-4.0.1 (GCC) 4.0.1
+ GNU Fortran (GCC) 4.3.0 20070810
+ G95 (GCC 4.0.3 (g95 0.91!) Apr 24 2008)
+
+
+Supported Configuration Features Summary
+========================================
+
+ In the tables below
+ y = tested and supported
+ n = not supported or not tested in this release
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+SunOS5.10 64-bit n y n y y y
+SunOS5.10 32-bit n y n y y y
+IRIX64_6.5 64-bit n y y y y y
+IRIX64_6.5 32-bit n n n n y y
+Windows XP n y(15) n(15) y y y
+Windows XP x64 n y(15) n(15) y y y
+Windows Vista n n n y y y
+Mac OS X 10.5 Intel n y n y y y
+FreeBSD 4.11 n n n y y y
+RedHat EL3 W (3) y(1) y(10) y(1) y y y
+RedHat EL3 W Intel (3) n y n y y n
+RedHat EL3 W PGI (3) n y n y y n
+SuSe x86_64 gcc (3,12) y(2) y(11) y(2) y y y
+SuSe x86_64 Int (3,12) n y(13) n y y n
+SuSe x86_64 PGI (3,12) n y(8) n y y y
+Linux 2.4 Xeon C
+ Lustre Intel (3,6) n y n y y n
+Linux 2.6 SuSE ia64 C
+ Intel (3,7) y y y y y n
+Linux 2.6 SGI Altix
+ ia64 Intel (3) y y y y y y
+Linux 2.6 RHEL C
+ Lustre Intel (5) y(4) y y(4) y y n
+Cray XT3 2.0.41 y y y y y n
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.10 64-bit y y y y
+Solaris2.10 32-bit y y y y
+IRIX64_6.5 64-bit y y n y
+IRIX64_6.5 32-bit y dna y y
+Windows XP y y(15) y y
+Windows XP x64 y y(15) y y
+Windows Vista y n n y
+Mac OS X 10.3 y n
+FreeBSD 4.11 y n y y
+RedHat EL3 W (3) y y(10) y y
+RedHat EL3 W Intel (3) y y y n
+RedHat EL3 W PGI (3) y y y n
+SuSe x86_64 W GNU (3,12) y y y y
+SuSe x86_64 W Int (3,12) y y y n
+SuSe x86_64 W PGI (3,12) y y y n
+Linux 2.4 Xeon C
+ Lustre Intel (6) y y y n
+Linux 2.4 SuSE
+ ia64 C Intel (7) y y y n
+Linux 2.4 SGI Altix
+ ia64 Intel y n
+Linux 2.6 RHEL C
+ Lustre Intel (5) y y y n
+Cray XT3 2.0.41 n n n n n
+
+ Notes: (1) Using mpich2 1.0.6.
+ (2) Using mpich2 1.0.7.
+ (3) Linux 2.6 with GNU, Intel, and PGI compilers, as indicated.
+ W or C indicates workstation or cluster, respectively.
+ (4) Using mvapich2 0.9.8.
+ (5) Linux 2.6.9-42.0.10. Xeon cluster with ELsmp_perfctr_lustre
+ and Intel compilers
+ (6) Linux 2.4.21-32.0.1. Xeon cluster with ELsmp_perfctr_lustre
+ and Intel compilers
+ (7) Linux 2.4.21, SuSE_292.till. Ia64 cluster with Intel compilers
+ (8) pgf90
+ (9) With Compaq Visual Fortran 6.6c compiler.
+ (10) With PGI and Absoft compilers.
+ (11) PGI and Intel compilers for both C and Fortran
+ (12) AMD Opteron x86_64
+ (13) ifort
+ (14) Yes with C and Fortran, but not with C++
+ (15) Using Visual Studio 2005 or Cygwin
+ (16) Not tested for this release.
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* For Red Storm, a Cray XT3 system, the yod command sometimes gives the
+ message, "yod allocation delayed for node recovery". This interferes with
+ test suites that do not expect seeing this message. See the section of "Red
+ Storm" in file INSTALL_parallel for a way to deal with this problem.
+ AKC - 2008/05/28
+
+* For Red Storm, a Cray XT3 system, the tools/h5ls/testh5ls.sh will fail on
+ the test "Testing h5ls -w80 -r -g tgroup.h5" fails. This test is
+ expected to fail and exit with a non-zero code but the yod command does
+ not propagate the exit code of the executables. Yod always returns 0 if it
+ can launch the executable. The test suite shell expects a non-zero for
+ this particular test, therefore it concludes the test has failed when it
+ receives 0 from yod. To bypass this problem for now, change the following
+ lines in the tools/h5ls/testh5ls.sh.
+ ======== Original =========
+ # The following combination of arguments is expected to return an error message
+ # and return value 1
+ TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== Skip the test =========
+ echo SKIP TOOLTEST tgroup-1.ls 1 -w80 -r -g tgroup.h5
+ ======== end of bypass ========
+ AKC - 2008/05/28
+
+* We have discovered two problems when running collective IO parallel HDF5
+ tests with chunking storage on the ChaMPIon MPI compiler on tungsten, a
+ Linux cluster at NCSA.
+
+ Under some complex selection cases:
+ 1) MPI_Get_element returns the wrong value.
+ 2) MPI_Type_struct also generates the wrong derived datatype and corrupt
+ data may be generated.
+ These issues arise only when turning on collective IO with chunking storage
+ with some complex selections. We have not found these problems on other
+ MPI-IO compilers. If you encounter these problems, you may use independent
+ IO instead.
+
+ To avoid this behavior, change the following line in your code
+ H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+ to
+ H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT);
+
+ KY - 2007/08/24
+
+* For SNL, spirit/liberty/thunderbird: The serial tests pass but parallel
+ tests failed with MPI-IO file locking message. AKC - 2007/6/25
+
+* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* For LLNL, uP: both serial and parallel tests pass.
+ Zeus: Serial tests pass but parallel tests fail with a known problem in MPI.
+ ubgl: Serial tests pass but parallel tests fail.
+
+* Configuring with --enable-debug=all produces compiler errors on most
+ platforms: Users who want to run HDF5 in debug mode should use
+ --enable-debug rather than --enable-debug=all to enable debugging
+ information on most modules.
+
+* On Mac OS 10.4, test/dt_arith.c has some errors in conversion from long
+ double to (unsigned) long long and from (unsigned) long long to long double.
+
+* On Altix SGI with Intel 9.0, testmeta.c would not compile with -O3
+ optimization flag.
+
+* On VAX, the Scaleoffset filter is not supported. The Scaleoffset filter
+ supports only the IEEE standard for floating-point data; it cannot be applied
+ to HDF5 data generated on VAX.
+
+* On Cray X1, a lone colon on the command line of h5dump --xml (as in
+ the testh5dumpxml.sh script) is misinterpereted by the operating system
+ and causes an error.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* On IRIX6.5, when the C compiler version is greater than 7.4, complicated
+ MPI derived datatype code will work. However, the user should increase
+ the value of the MPI_TYPE_MAX environment variable to some appropriate value
+ to use collective irregular selection code. For example, the current
+ parallel HDF5 test needs to raise MPI_TYPE_MAX to 200,000 to pass the test.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculation of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ SLU - 2005/6/30
+
+* For version 6 (6.02 and 6.04) of the Portland Group compiler on the AMD
+ Opteron processor, there is a bug in the compiler for optimization(-O2).
+ The library failed in several tests, all related to the MULTI driver.
+ The problem has been reported to the vendor.
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use",
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+
+* The --enable-static-exec configure flag fails to compile for Solaris
+ platforms. This is due to the fact that not all of the system libraries on
+ Solaris are available in a static format.
+
+ The --enable-static-exec configure flag also fails to correctly compile
+ on IBM SP2 platforms for serial mode. The parallel mode works fine with
+ this option.
+
+ It is suggested that you do not use this option on these platforms
+ during configuration.
+
+* With the gcc 2.95.2 compiler, HDF5 uses the `-ansi' flag during
+ compilation. The ANSI version of the compiler complains about not being
+ able to handle the `long long' datatype with the warning:
+
+ warning: ANSI C does not support `long long'
+
+ This warning is innocuous and can be safely ignored.
+
+* The ./dsets tests fail on the TFLOPS machine if the test program,
+ dsets.c, is compiled with the -O option. The HDF5 library still works
+ correctly with the -O option. The test program works fine if it is
+ compiled with -O1 or -O0. Only -O (same as -O2) causes the test
+ program to fail.
+
+* Not all platforms behave correctly with Szip's shared libraries. Szip is
+ disabled in these cases, and a message is relayed at configure time. Static
+ libraries should be working on all systems that support Szip and should be
+ used when shared libraries are unavailable.
+
+ There is also a configure error on Altix machines that incorrectly reports
+ when a version of Szip without an encoder is being used.
+
+* On some platforms that use Intel and Absoft compilers to build the HDF5
+ Fortran library, compilation may fail for fortranlib_test.f90, fflush1.f90
+ and fflush2.f90 complaining about the exit subroutine. Comment out the line
+ IF (total_error .ne. 0) CALL exit (total_error).
+
+* Information about building with PGI and Intel compilers is available in
+ the INSTALL file sections 4.7 and 4.8.
+
+* On at least one system, SDSC DataStar, the scheduler (in this case
+ LoadLeveler) sends job status updates to standard error when you run
+ any executable that was compiled with the parallel compilers.
+
+ This causes problems when running "make check" on parallel builds, as
+ many of the tool tests function by saving the output from test runs,
+ and comparing it to an exemplar.
+
+ The best solution is to reconfigure the target system so it no longer
+ inserts the extra text. However, this may not be practical.
+
+ In such cases, one solution is to "setenv HDF5_Make_Ignore yes" prior to
+ the configure and build. This will cause "make check" to continue after
+ detecting errors in the tool tests. However, in the case of SDSC DataStar,
+ it also leaves you with some 150 "failed" tests to examine by hand.
+
+ A second solution is to write a script to run serial tests and filter
+ out the text added by the scheduler. A sample script used on SDSC
+ DataStar is given below, but you will probably have to customize it
+ for your installation.
+
+ Observe that the basic idea is to insert the script as the first item
+ on the command line which executes the the test. The script then
+ executes the test and filters out the offending text before passing
+ it on.
+
+ #!/bin/csh
+
+ set STDOUT_FILE=~/bin/serial_filter.stdout
+ set STDERR_FILE=~/bin/serial_filter.stderr
+
+ rm -f $STDOUT_FILE $STDERR_FILE
+
+ ($* > $STDOUT_FILE) >& $STDERR_FILE
+
+ set RETURN_VALUE=$status
+
+ cat $STDOUT_FILE
+
+ tail +3 $STDERR_FILE
+
+ exit $RETURN_VALUE
+
+ You get the HDF5 make files and test scipts to execute your filter script
+ by setting the environment variable "RUNSERIAL" to the full path of the
+ script prior to running configure for parallel builds. Remember to
+ "unsetenv RUNSERIAL" before running configure for a serial build.
+
+ Note that the RUNSERIAL environment variable exists so that we can
+ prefix serial runs as necessary on the target system. On DataStar,
+ no prefix is necessary. However on an MPICH system, the prefix might
+ have to be set to something like "/usr/local/mpi/bin/mpirun -np 1" to
+ get the serial tests to run at all.
+
+ In such cases, you will have to include the regular prefix in your
+ filter script.
+
+* H5Ocopy() does not copy reg_ref attributes correctly when shared-message
+ is turn on. The value of the reference in the destination attriubte is
+ wrong. This H5Ocopy problem will affect the h5copy tool.
+
+* In the C++ API, it appears that there are bugs in Attribute::write/read
+ and DataSet::write/read for fixed- and variable-len strings. The problems
+ are being worked on and a patch will be provided when the fixes are
+ available.
+
+
+%%%%1.8.0%%%%
+
+
+HDF5 version 1.8.0 released on Tue Feb 12 20:41:19 CST 2008
+================================================================================
+
+INTRODUCTION
+============
+
+This document describes the differences between the HDF5-1.6.x release series
+and HDF5 1.8.0, and contains information on the platforms tested and known
+problems in HDF5-1.8.0. For more details, see the HISTORY-1_0-1_8_0_rc3.txt
+file in the
+release_docs/ directory of the HDF5 source.
+
+Links to the HDF5 1.8.0 source code, documentation, and additional materials
+can be found on the HDF5 web page at:
+
+ http://www.hdfgroup.org/products/hdf5/
+
+The HDF5 1.8.0 release can be obtained from:
+
+ http://www.hdfgroup.org/HDF5/release/obtain5.html
+
+User documentation for 1.8.0 can be accessed directly at this location:
+
+ http://www.hdfgroup.org/HDF5/doc/
+
+New features in 1.8.0, including brief general descriptions of some new
+and modified APIs, are described in the "What's New in 1.8.0?" document:
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.8.0 (current
+release) versus Release 1.6.x":
+
+ http://www.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+========
+
+- New Features
+- Removed Feature
+- Support for new platforms and languages
+- Bug Fixes since HDF5-1.6.0
+- Platforms Tested
+- Supported Configuration Features Summary
+- Known Problems
+
+
+New Features
+============
+
+ HDF5 Release 1.8.0 is a major release with many changes and new features.
+
+ New format and interface features discussed in the "What's New in
+ HDF5 1.8.0" document include the following:
+
+ Enhanced group object management
+ Enhanced attribute management and more efficient meta data handling
+ Expanded datatype features
+ Creation order tracking and indexing
+ Improved meta data caching and cache control
+ UTF-8 encoding
+ New I/O filters: n-bit and scale+offset compression
+ New link (H5L) and object (H5O) interfaces and features
+ External and user-defined links
+ New high-level APIs:
+ HDF5 Packet Table (H5PT) and HDF5 Dimension Scale (H5DS)
+ C++ and Fortran interfaces for older high-level APIs:
+ H5Lite (H5LT), H5Image (H5IM), and H5Table (H5TB)
+ New and improved tools
+ And more...
+
+ http://hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+
+
+ New APIs associated with these features, other interface changes
+ (e.g., ENUM and struct definitions), and new library configuration flags
+ are listed in the "Release 1.8.0 (current release) versus Release 1.6.x"
+ section of "HDF5 Software Changes from Release to Release."
+
+ http://hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+
+Compatibility
+-------------
+ Many HDF5 users and user communities have existing applications that
+ they may wish to port to Release 1.8.0. Alternatively, some users may
+ wish to take advantage of Release 1.8.0's improved performance without
+ having to port such applications. To facilitate managing application
+ compatibility and porting applications from release to release, the HDF
+ Team has implemented the following features:
+ Individually-configurable macros that selectively map common
+ interface names to the old and new interfaces
+ Library configuration options to configure the macro mappings
+
+ Two related documents accompany this release:
+ "API Compatibility Macros in HDF5" discusses the specifics of the
+ new individually-configurable macros and library configuration
+ options.
+ http://hdfgroup.org/HDF5/doc/RM/APICompatMacros.html
+
+ "New Features in HDF5 Release 1.8.0 and Backward/Forward Format
+ Compatibility Issues" discusses each new feature with regard to
+ its impact on format compatibility.
+ http://hdfgroup.org/HDF5/doc/ADGuide/CompatFormat180.html
+
+Referenced documents
+--------------------
+ http://hdfgroup.org/HDF5/doc/ADGuide/WhatsNew180.html
+ "What's New in HDF5 1.8.0"
+
+ http://hdfgroup.org/HDF5/doc/ADGuide/Changes.html
+ The "Release 1.8.0 (current release) versus Release 1.6.x "
+ section in "HDF5 Software Changes from Release to Release"
+
+ http://hdfgroup.org/HDF5/doc/RM/APICompatMacros.html
+ "API Compatibility Macros in HDF5"
+
+ http://hdfgroup.org/HDF5/doc/ADGuide/CompatFormat180.html
+ "New Features in HDF5 Release 1.8.0 and Backward/Forward Format
+ Compatibility Issues"
+
+
+Removed Feature
+===============
+The stream virtual file driver (H5FD_STREAM) have been removed in this
+release. This affects the functions H5Pset_fapl_stream and H5Pget_fapl_stream
+and the constant H5FD_STREAM.
+
+This virtual file driver will be available at
+http://hdf5-addons.origo.ethz.ch/. Note that at the time of this release,
+the transition is still in progress; the necessary integration tools may
+not be available when HDF5 Release 1.8.0 first comes out.
+
+
+Support for New Platforms, Languages, and Compilers
+===================================================
+ - Support for Open VMS 7.3 was added.
+
+
+Bug Fixes since HDF5-1.6.0
+==========================
+ This release contains numerous bug fixes. For details, see the
+ "Changes from 1.6.0 to 1.8.0-rc3" section of the HISTORY.txt file for
+ this release.
+
+
+Platforms Tested
+================
+The following platforms and compilers have been tested for for this release.
+
+ AIX 5.2 (32/64 bit) xlc 8.0.0.11
+ xlC 8.0
+ xlf 10.01.0000.0
+ mpcc_r 6.0.0.8
+ mpxlf_r 8.1.1.7
+
+ FreeBSD 6.2-STABLE i386 gcc 3.4.6 [FreeBSD] 20060305
+ (duty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.2.1 20080123
+ g++ 4.2.1 20080123
+ gfortran 4.2.1 20070620
+
+ FreeBSD 6.2-STABLE amd64 gcc 3.4.6 [FreeBSD] 20060305
+ (liberty) g++ 3.4.6 [FreeBSD] 20060305
+ gcc 4.2.1 20080123
+ g++ 4.2.1 20080123
+ gfortran 4.2.1 20080123
+
+ IRIX64 6.5 (64 & n32) MIPSpro cc 7.4.4m
+ F90 MIPSpro 7.4.4m
+ C++ MIPSpro cc 7.4.4m
+
+ Linux 2.6.9 (RHEL4) Intel 10.0 compilers
+ (abe.ncsa.uiuc.edu)
+
+ Linux 2.4.21-47 gcc 3.2.3 20030502
+ (osage)
+
+ Linux 2.6.9-42.0.10 gcc 3.4.6 20060404
+ (kagiso) PGI 7.0-7 (pgcc, pgf90, pgCC)
+ Intel 9.1 (icc, ifort, icpc)
+
+ Linux 2.6.16.27 x86_64 AMD gcc 4.1.0 (SuSE Linux), g++ 4.1.0,
+ (smirom) g95 (GCC 4.0.3)
+ PGI 6.2-5 (pgcc, pgf90, pgCC)
+ Intel 9.1 (icc, iort, icpc)
+
+ Linux 2.6.5-7.252.1-rtgfx #1 Intel(R) C++ Version 9.0
+ SMP ia64 Intel(R) Fortran Itanium(R) Version 9.0
+ (cobalt) SGI MPI
+
+ SunOS 5.8 32,46 Sun WorkShop 6 update 2 C 5.3
+ (Solaris 2.8) Sun WorkShop 6 update 2 Fortran 95 6.2
+ Sun WorkShop 6 update 2 C++ 5.3
+
+ SunOS 5.10 cc: Sun C 5.8
+ (linew) f90: Sun Fortran 95 8.2
+ CC: Sun C++ 5.8
+
+ Xeon Linux 2.4.21-32.0.1.ELsmp-perfctr-lustre
+ (tungsten) gcc 3.2.2 20030222
+ Intel(R) C++ Version 9.0
+ Intel(R) Fortran Compiler Version 9.0
+
+ IA-64 Linux 2.4.21.SuSE_292.til1 ia64
+ (NCSA tg-login) gcc 3.2.2
+ Intel(R) C++ Version 8.1
+ Intel(R) Fortran Compiler Version 8.1
+ mpich-gm-1.2.5..10-intel-r2
+
+ Windows XP Visual Studio .NET
+ Visual Studio 2005 w/ Intel Fortran 9.1
+ Cygwin(native gcc compiler and g95)
+ MinGW(native gcc compiler and g95)
+
+ Windows XP x64 Visual Studio 2005 w/ Intel Fortran 9.1
+
+ Windows Vista Visual Studio 2005
+
+ MAC OS 10.4 (Intel) gcc i686-apple-darwin8-gcc-4.0.1 (GCC) 4.0.1
+ G95 (GCC 4.0.3 (g95 0.91!) Nov 21 2006)
+
+ Alpha Open VMS 7.3 Compaq C V6.5-001-48BCD
+ HP Fortran V7.6-3276
+ Compaq C++ V6.5-004
+
+
+Supported Configuration Features Summary
+========================================
+
+ In the tables below
+ y = tested and supported
+ n = not supported or not tested in this release
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90 F90 C++ zlib SZIP
+ parallel parallel
+SunOS5.8 64-bit n y n y y y
+SunOS5.8 32-bit n y n y y y
+SunOS5.10 64-bit y(1) y n y y y
+SunOS5.10 32-bit y(1) y n y y y
+IRIX64_6.5 64-bit n y y y y y
+IRIX64_6.5 32-bit n n n n y y
+AIX-5.2 32-bit y y y y y y
+AIX-5.2 64-bit y y y y y y
+Windows XP n y(15) n(15) y y y
+Windows XP x64 n y(15) n(15) y y y
+Windows Vista n n n y y y
+Mac OS X 10.4 PowerPC n n
+Mac OS X 10.4 Intel n y n y y y
+FreeBSD 4.11 n n n y y y
+RedHat EL3 W (3) y(1a) y(10) y(1a) y y y
+RedHat EL3 W Intel (3) n y n y y n
+RedHat EL3 W PGI (3) n y n y y n
+SuSe x86_64 gcc (3,12) y(1a) y(11) n y y y
+SuSe x86_64 Int (3,12) n y(13) n y y n
+SuSe x86_64 PGI (3,12) n y(8) n y y y
+Linux 2.4 Xeon C
+ Lustre Intel (3,6) n y n y y n
+Linux 2.6 SuSE ia64 C
+ Intel (3,7) y y y y y n
+Linux 2.6 SGI Altix
+ ia64 Intel (3) y y y y y y
+Alpha OpenVMS 7.3.2 n y n y n n
+
+
+
+Platform Shared Shared Shared static- Thread-
+ C libs F90 libs C++ libs exec safe
+Solaris2.8 64-bit y y y x y
+Solaris2.8 32-bit y y y x y
+Solaris2.10 64-bit y x y
+Solaris2.10 32-bit y x y
+IRIX64_6.5 64-bit y y n y y
+IRIX64_6.5 32-bit y dna y y y
+AIX-5.2 & 5.3 32-bit n n n y n
+AIX-5.2 & 5.3 64-bit n n n y n
+Windows XP y y(15) y y y
+Windows XP x64 y y(15) y y y
+Windows Vista y n n y y
+Mac OS X 10.3 y y n
+FreeBSD 4.11 y n y y y
+RedHat EL3 W (3) y y(10) y y y
+RedHat EL3 W Intel (3) y y y y n
+RedHat EL3 W PGI (3) y y y y n
+SuSe x86_64 W GNU (3,12) y y y y y
+SuSe x86_64 W Int (3,12) y y y y(14) n
+SuSe x86_64 W PGI (3,12) y y y y(14) n
+Linux 2.4 Xeon C
+ Lustre Intel (6) y y y y n
+Linux 2.4 SuSE
+ ia64 C Intel (7) y y y y n
+Linux 2.4 SGI Altix
+ ia64 Intel y y n
+Alpha OpenVMS 7.3.2 n n n y n
+
+ Notes: (1) Using mpich 1.2.6.
+ (1a) Using mpich2 1.0.6.
+ (2) Using mpt and mpich 1.2.6.
+ (3) Linux 2.6 with GNU, Intel, and PGI compilers, as indicated.
+ W or C indicates workstation or cluster, respectively.
+
+ (6) Linux 2.4.21-32.0.1. Xeon cluster with ELsmp_perfctr_lustre
+ and Intel compilers
+ (7) Linux 2.4.21, SuSE_292.till. Ia64 cluster with Intel
+compilers
+ (8) pgf90
+ (9) With Compaq Visual Fortran 6.6c compiler.
+ (10) With PGI and Absoft compilers.
+ (11) PGI and Intel compilers for both C and Fortran
+ (12) AMD Opteron x86_64
+ (13) ifort
+ (14) Yes with C and Fortran, but not with C++
+ (15) Using Visual Studio 2005 or Cygwin
+ (16) Not tested for this release.
+ Compiler versions for each platform are listed in the preceding
+ "Platforms Tested" table.
+
+
+Known Problems
+==============
+* We have discovered two problems when running collective IO parallel HDF5
+ tests with chunking storage on the ChaMPIon MPI compiler on tungsten, a
+ Linux cluster at NCSA.
+
+ Under some complex selection cases:
+ 1) MPI_Get_element returns the wrong value.
+ 2) MPI_Type_struct also generates the wrong derived datatype and corrupt
+ data may be generated.
+ These issues arise only when turning on collective IO with chunking storage
+ with some complex selections. We have not found these problems on other
+ MPI-IO compilers. If you encounter these problems, you may use independent
+ IO instead.
+
+ To avoid this behavior, change the following line in your code
+ H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_COLLECTIVE);
+
+ to
+ H5Pset_dxpl_mpio(xfer_plist, H5FD_MPIO_INDEPENDENT);
+
+ KY - 2007/08/24
+
+* For SNL, spirit/liberty/thunderbird: The serial tests pass but parallel
+ tests failed with MPI-IO file locking message. AKC - 2007/6/25
+
+* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers,
+ use -mp -O1 compilation flags to build the libraries. A higher level of
+ optimization causes failures in several HDF5 library tests.
+
+* For SNL, Red Storm: Only parallel HDF5 is supported. The serial tests pass
+ when run against the parallel library; the parallel tests also pass, but
+ with lots of non-fatal error messages.
+
+* For LLNL, uP: both serial and parallel tests pass.
+ Zeus: Serial tests pass but parallel tests fail with a known problem in MPI.
+ ubgl: Serial tests pass but parallel tests fail.
+
+* On SUN 5.10 C++, testing fails in the "Testing Shared Datatypes with
+ Attributes" test.
+
+* Configuring with --enable-debug=all produces compiler errors on most
+ platforms: Users who want to run HDF5 in debug mode should use
+ --enable-debug rather than --enable-debug=all to enable debugging
+ information on most modules.
+
+* On Mac OS 10.4, test/dt_arith.c has some errors in conversion from long
+ double to (unsigned) long long and from (unsigned) long long to long double.
+
+* On Altix SGI with Intel 9.0, testmeta.c would not compile with -O3
+ optimization flag.
+
+* On VAX, the Scaleoffset filter is not supported. The filter cannot be
+ applied to HDF5 data generated on VAX. The Scaleoffset filter only supports
+ the IEEE standard for floating-point data.
+
+* On Cray X1, a lone colon on the command line of h5dump --xml (as in
+ the testh5dumpxml.sh script) is misinterpereted by the operating system
+ and causes an error.
+
+* On mpich 1.2.5 and 1.2.6, if more than two processes contribute no IO and
+ the application asks to do collective IO, we have found that when using 4
+ processors, a simple collective write will sometimes be hung. This can be
+ verified with t_mpi test under testpar.
+
+* On IRIX6.5, when the C compiler version is greater than 7.4, complicated
+ MPI derived datatype code will work. However, the user should increase
+ the value of the MPI_TYPE_MAX environment variable to some appropriate value
+ to use collective irregular selection code. For example, the current
+ parallel HDF5 test needs to raise MPI_TYPE_MAX to 200,000 to pass the test.
+
+* A dataset created or rewritten with a v1.6.3 library or after cannot be read
+ with the v1.6.2 library or before when the Fletcher32 EDC filter is enabled.
+ There was a bug in the calculating code of the Fletcher32 checksum in the
+ library before v1.6.3; the checksum value was not consistent between big-
+ endian and little-endian systems. This bug was fixed in Release 1.6.3.
+ However, after fixing the bug, the checksum value was no longer the same as
+ before on little-endian system. Library releases after 1.6.4 can still read
+ datasets created or rewritten with an HDF5 library of v1.6.2 or before.
+ SLU - 2005/6/30
+
+* For version 6 (6.02 and 6.04) of the Portland Group compiler on the AMD
+ Opteron processor, there is a bug in the compiler for optimization(-O2).
+ The library failed in several tests, all related to the MULTI driver.
+ The problem has been reported to the vendor.
+
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command `poe'.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+
+ The tests may fail with messages like "The socket name is already in use",
+ but HDF5 does not use sockets. This failure is due to problems with the
+ poe command trying to set up the debug socket. To resolve this problem,
+ check to see whether there are many old /tmp/s.pedb.* files staying around.
+ These are sockets used by the poe command and left behind due to failed
+ commands. First, ask your system administrator to clean them out.
+ Lastly, request IBM to provide a means to run poe without the debug socket.
+
+* The --enable-static-exec configure flag fails to compile for Solaris
+ platforms. This is due to the fact that not all of the system libraries on
+ Solaris are available in a static format.
+
+ The --enable-static-exec configure flag also fails to correctly compile
+ on IBM SP2 platform for the serial mode. The parallel mode works fine with
+ this option.
+
+ It is suggested that you do not use this option on these platforms
+ during configuration.
+
+* With the gcc 2.95.2 compiler, HDF5 uses the `-ansi' flag during
+ compilation. The ANSI version of the compiler complains about not being
+ able to handle the `long long' datatype with the warning:
+
+ warning: ANSI C does not support `long long'
+
+ This warning is innocuous and can be safely ignored.
+
+* The ./dsets tests fail on the TFLOPS machine if the test program,
+ dsets.c, is compiled with the -O option. The HDF5 library still works
+ correctly with the -O option. The test program works fine if it is
+ compiled with -O1 or -O0. Only -O (same as -O2) causes the test
+ program to fail.
+
+* Not all platforms behave correctly with Szip's shared libraries. Szip is
+ disabled in these cases, and a message is relayed at configure time. Static
+ libraries should be working on all systems that support Szip and should be
+ used when shared libraries are unavailable.
+
+ There is also a configure error on Altix machines that incorrectly reports
+ when a version of Szip without an encoder is being used.
+
+* On some platforms that use Intel and Absoft compilers to build the HDF5
+ Fortran library, compilation may fail for fortranlib_test.f90, fflush1.f90
+ and fflush2.f90 complaining about the exit subroutine. Comment out the line
+ IF (total_error .ne. 0) CALL exit (total_error).
+
+* Information about building with PGI and Intel compilers is available in
+ the INSTALL file sections 4.7 and 4.8.
+
+* On at least one system, SDSC DataStar, the scheduler (in this case
+ LoadLeveler) sends job status updates to standard error when you run
+ any executable that was compiled with the parallel compilers.
+
+ This causes problems when running "make check" on parallel builds, as
+ many of the tool tests function by saving the output from test runs,
+ and comparing it to an exemplar.
+
+ The best solution is to reconfigure the target system so it no longer
+ inserts the extra text. However, this may not be practical.
+
+ In such cases, one solution is to "setenv HDF5_Make_Ignore yes" prior to
+ the configure and build. This will cause "make check" to continue after
+ detecting errors in the tool tests. However, in the case of SDSC DataStar,
+ it also leaves you with some 150 "failed" tests to examine by hand.
+
+ A second solution is to write a script to run serial tests and filter
+ out the text added by the scheduler. A sample script used on SDSC
+ DataStar is given below, but you will probably have to customize it
+ for your installation.
+
+ Observe that the basic idea is to insert the script as the first item
+ on the command line which executes the the test. The script then
+ executes the test and filters out the offending text before passing
+ it on.
+
+ #!/bin/csh
+
+ set STDOUT_FILE=~/bin/serial_filter.stdout
+ set STDERR_FILE=~/bin/serial_filter.stderr
+
+ rm -f $STDOUT_FILE $STDERR_FILE
+
+ ($* > $STDOUT_FILE) >& $STDERR_FILE
+
+ set RETURN_VALUE=$status
+
+ cat $STDOUT_FILE
+
+ tail +3 $STDERR_FILE
+
+ exit $RETURN_VALUE
+
+ You get the HDF5 make files and test scipts to execute your filter script
+ by setting the environment variable "RUNSERIAL" to the full path of the
+ script prior to running configure for parallel builds. Remember to
+ "unsetenv RUNSERIAL" before running configure for a serial build.
+
+ Note that the RUNSERIAL environment variable exists so that we can
+ can prefix serial runs as necessary on the target system. On DataStar,
+ no prefix is necessary. However on an MPICH system, the prefix might
+ have to be set to something like "/usr/local/mpi/bin/mpirun -np 1" to
+ get the serial tests to run at all.
+
+ In such cases, you will have to include the regular prefix in your
+ filter script.
+
+* H5Ocopy() does not copy reg_ref attributes correctly when shared-message
+ is turn on. The value of the reference in the destination attriubte is
+ wrong. This H5Ocopy problem will affect the h5copy tool.
+
diff --git a/release_docs/HISTORY-1_8_0-1_10_0.txt b/release_docs/HISTORY-1_8_0-1_10_0.txt
new file mode 100644
index 0000000..a364274
--- /dev/null
+++ b/release_docs/HISTORY-1_8_0-1_10_0.txt
@@ -0,0 +1,1742 @@
+HDF5 HISTORY
+=============
+
+
+INTRODUCTION
+
+This document describes the development history between the HDF5-1.8.0 and
+HDF5 1.10.0 releases. For more iformation see the SVN log.
+
+Information about supported and tested platforms is provided for historical
+reasons only and may not be accurate.
+
+
+For more information, see the HDF5 home page:
+
+ http://www.hdfgroup.org/HDF5/
+
+If you have any questions or comments, please send them to the HDF Help Desk:
+
+ help@hdfgroup.org
+
+
+CONTENTS
+
+- New Features
+- Support for new platforms and languages
+- Bug Fixes since HDF5-1.8.0
+- Supported Platforms
+- Tested Configuration Features Summary
+- More Tested Platforms
+- Known Problems
+
+
+New Features
+============
+
+ Configuration:
+ -------------
+ - Java JNI library API wrappers and supporting files added as HDF_JAVA language
+ option. Both configure and CMake disable this option by default.
+ HDFFV-9552 (ADB 2016/02.28)
+ - CMake minimum is now 3.1.0. (ADB 2015/11/14)
+ - cmakehdf5: configure options added to enable or disable the building of
+ different API's and testings. See "cmakehdf5 --help" for details.
+ (AKC - 2014/12/09 HDFFV-8932)
+ - Autotools: Automake updated to 1.14.1 (ADB - 2014/04/08)
+ - CMake: Moved minimum CMake version to 2.8.11 which enables better library
+ include processing. (ADB - 2014/03/26)
+ - New configuration option added to change the default plugin path.
+ configure option is --with-default-plugin=location
+ cmake option is -DH5_DEFAULT_PLUGINDIR:PATH=location
+ HDFFV-8513. (ADB 2013/09/04)
+ - Rename FFLAGS to FCFLAGS in configure (ADB 2013/08/13)
+ - CMake minimum is now 2.8.10. (ADB 2013/01/14)
+ - A new tool, cmakehdf5, which is a build command script similar to
+ buildhdf5 is added and is available in the bin directory.
+ (AKC - 2012/12/12)
+ - Fixed AIX Fortran compiler flags to use appropriate settings for
+ debugging, profiling, optimization situations. HDFFV-8069. (AKC
+ 2012/09/27)
+ - Updated to latest autotools and changed all hard *.sh scripts to
+ configure managed *.sh.in files. Removed overloading of autotools
+ TESTS variable by examples and tests. Renamed configure.in to
+ configure.ac. (ADB - 2012/08/23 - HDFFV-8129)
+ - Added code to display the version information of XL fortran and C++
+ in the summary of configure. (AKC - 2012/02/28 - HDFFV-7793)
+ - Configure now generates Makefiles that build in "silent make mode"
+ by default in which compile and link lines are significantly
+ simplified for clarity. To override this and view actual compile and
+ link lines during building, the --disable-silent-rules flag can be used
+ at configure time, or the 'make' command can be followed by V=1, to
+ indicate a "verbose" make. (MAM - 2011/4/14).
+ - Added mpicc and mpif90 as the default C and Fortran compilers for Linux
+ systems when --enable-parallel is specified but no $CC or $FC is defined.
+ (AKC - 2011/2/7)
+ - Added a new configure option, "--enable-unsupported", which can
+ be used to stop configure from preventing the use of unsupported
+ configure option combinations, such as c++ in parallel or parallel
+ HDF5 with threadsafe. Use at your own risk, as it may result in a
+ library that won't compile or run as expected!
+ (MAM - 2010/11/17 - Bug 2061)
+ - PHDF5 changed to use "mpiexec", instead of mpirun, as the default MPI
+ applications startup command as defined in the MPI-2 definition, section
+ 4.1. (AKC - 2010/6/11 - Bug 1921)
+ - Configure now adds appropriate defines for supporting large (64-bit)
+ files on all systems, where supported, by default, instead of only linux.
+ This largefile support is controllable with the --enable-largefile
+ configure option. This is replacing the linux-specific --enable-linux-lfs
+ option, which has been removed from configure.
+ (MAM - 2010/05/05 - 1772/1434)
+ - Upgraded versions of autotools used to generate configuration suite.
+ We now use Automake 1.11.1, Autoconf 2.65, and Libtool 2.2.6b.
+ MAM 2010/04/15.
+ - Added the xlc-* and mpcc_r-* BASENAME patterns to be recognized as IBM
+ compilers so that the ibm compiler options can be added properly. This
+ allows non-system-default compiler command names (e.g. xlc-m.n.k.l) be
+ recognized. AKC 2009/11/26.
+ - Configuration suite now uses Automake 1.11 and Autoconf 2.64.
+ MAM 2009/08/11.
+ - Changed default Gnu fortran compiler from g95 to gfortran since
+ gfortran is more likely installed with gcc now. -AKC 2009/07/19-
+ - Added libtool version numbers to generated c++, fortran, and
+ hl libraries. MAM 2009/04/19.
+ - Regenerated Makefile.ins using Automake 1.10.2. MAM 2009/04/19.
+ - Added a Make target of check-all-install to test the correctness of
+ installing via the prefix= or $DESTDIR options. AKC - 2009/04/14
+ - Configuration suite now uses Libtool 2.2.6a. MAM 2008/10/24
+
+ - Configuration suite now uses Autoconf 2.61, Automake 1.10.1.
+ MAM 2008/05/05.
+
+ - The new configure option "--disable-sharedlib-rpath" disables
+ embedding the '-Wl,-rpath' information into executables when
+ shared libraries are produced, and instead solely relies on the
+ information in LD_LIBRARY_PATH. (MAM - 2008/05/15)
+
+ Library:
+ --------
+
+ - Virtual Dataset feature was added
+ (NAF - 2015-10-05, VDS-193)
+
+ - H5F_ACC_DEBUG labeled "deprecated"
+
+ The symbol was originally used to emit some extra debugging
+ informationi in the multi VFD. The underlying functionality
+ was removed due to disuse in HDF5 1.8.16 though the symbol
+ remained defined since it was visible in H5Fpublic.h.
+
+ In this release, the symbol has been labeled deprecated and will
+ not be defined when H5_NO_DEPRECATED_SYMBOLS is defined.
+
+ (DER - 2015-04-30, HDFFV-1074)
+
+ - The library can load filter libraries dynamically during runtime. Users
+ can set the search path through environment variable HDF5_PLUGIN_PATH
+ and call H5Pset_filter to enable a dynamic filter. (SLU - 2013/04/08)
+ - Added new API functions H5Dscatter and H5Dgather to scatter data to and
+ and gather data from a selection within a memory buffer.
+ (NAF - 2013/02/05)
+ - The library now supports the data conversion from enumeration to numeric
+ (integer and floating-point number) datatypes. See Issue 8221.
+ (SLU - 2012/10/23)
+ - The data sieve buffer size was for all the datasets in the file. It
+ could waste memory if any dataset size is smaller than the sieve buffer
+ size. Now the library picks the smaller one between the dataset size
+ and the sieve buffer size from the file access property. See Issue 7934.
+ (SLU - 2012/4/2)
+ - I added a new parameter of object access property list to the function
+ H5Rdereference (Issue 2763). It's called H5Rdereference2 now. The former
+ H5Rdereference function has been deprecated to H5Rdereference1. (SLU -
+ 2011/7/18)
+ - H5Tcreate now supports string type (fixed-length and variable-length).
+ (SLU - 2011/05/20)
+ - Added ability to cache files opened through external links. Added new
+ public functions H5Pset_elink_file_cache_size(),
+ H5Pget_elink_file_cache_size(), and H5Fclear_elink_file_cache().
+ (NAF - 2011/02/17)
+ - Removed all old code for Metraowerks compilers, bracketed by
+ __MWERKS__). Metraowerks compiler is long gone. (AKC - 2010/11/17)
+ - Added support for threadsafety on windows using the windows threads
+ library. Use the HDF5_ENABLE_THREADSAFE option in CMake while on a
+ windows platform to enable this functionality. This is supported on
+ Windows Vista and newer Windows operating systems. (MAM - 2010/09/10)
+ - When a mandatory filter failed to write data chunks, the dataset
+ couldn't close (bug 1260). The fix releases all resources and closes
+ the dataset but returns a failure. (SLU - 2010/9/8)
+ - H5Tset_order and H5Tget_order now support all data types. A new byte
+ order H5T_ORDER_MIXED has been added specifically for compound datatype
+ and its derived type. Please see bug #1934. (SLU - 2010/8/23)
+ - Improved performance of the chunk cache by avoiding unnecessary b-tree
+ lookups of chunks already in cache. (NAF - 2010/06/15)
+ - Greatly improved performance of extending a dataset with early
+ allocation. (NAF - 2010/03/24 - 1637)
+ - Added support for filtering densely stored groups. Many of the API
+ functions related to filters have been extended to support dense groups
+ as well as datasets. Pipeline messages can now be stored in a group's
+ object header. (NAF/QAK - 2009/10/8)
+ - The embedded library information is displayed by H5check_version() if a
+ version mismatch is detected. Also changed H5check_version() to
+ suppress the warning message totally if $HDF5_DISABLE_VERSION_CHECK is 2
+ or higher. (Old behavior treated 3 or higher the same as 1, that is
+ print a warning and allows the program to continue. (AKC - 2009/9/28)
+ - If a user does not care for the extra library information insert
+ in the executables, he may turn it off by --disable-embedded-libinfo
+ during configure. (AKC - 2009/9/15)
+ - Corrected problem where library would re-write the superblock in a file
+ opened for R/W access, even when no changes were made to the file.
+ (QAK - 2009/08/20, Bz#1473)
+ - Separated "factory" free list class from block free lists. These free
+ lists are dynamically created and manage blocks of a fixed size.
+ H5set_free_list_limits() will use the same settings specified for block
+ free lists for factory free lists. (NAF - 2009/04/08)
+ - Added support for dense attributes to H5Ocopy. (XCao/NAF - 2009/01/29)
+ - Added H5Pset_elink_cb and H5Pget_elink_cb functions to support a
+ user-defined callback function for external link traversal.
+ (NAF - 2009/01/08)
+ - Added H5Pset_elink_acc_flags and H5Pget_elink_acc_flags functions to
+ allow the user to specify the file access flags used to open the target
+ file of an external link. (NAF - 2009/01/08)
+ - Added H5Pset_chunk_cache() and H5Pget_chunk_cache() functions to allow
+ individual rdcc configuration for each dataset. Added
+ H5Dget_access_plist() function to retrieve a dataset access property
+ list from a dataset. (NAF - 2008/11/12)
+ - Added H5Iis_valid() function to check if an id is valid without producing
+ an error message. (NAF - 2008/11/5)
+ - Added two new public routines: H5Pget_elink_fapl() and
+ H5Pset_elink_fapl(). (see bug #1247) (VC - 2008/10/13)
+ - Improved free space tracking in file to be faster. (QAK - 2008/10/06)
+ - Added 'mounted' field to H5G_info_t struct. (QAK - 2008/07/15)
+
+ Parallel Library:
+ -----------------
+ - Add H5Pget_mpio_no_collective_cause() function that retrive reasons
+ why the collective I/O was broken during read/write IO access.
+ (JKM - 2012/08/30 HDFFV-8143)
+ - Special Collective IO (IO when some processes do not contribute to the
+ IO) and Complex Derived Datatype MPI functionalities are no longer
+ conditionally enabled in the library by configure. They are always
+ enabled in order to take advantage of performance boosts from these
+ behaviors. Older MPI implementations that do not allow for these
+ functionalities can no longer by used by HDF5. (MAM - 2011/07/08).
+ - Modified parallel tests to run with arbitrary number of processes. The
+ modified tests are testphdf5 (parallel dataset access), t_chunk_alloc
+ (chunk allocation), and t_posix_compliant (posix compliance). The rest of
+ the parallel tests already use in the code the number of processes
+ available in the communicator. (CMC - 2009/04/28)
+
+ Fortran Library:
+ ----------------
+
+ - Added parallel routine H5Pget_mpio_actual_io_mode_f (MSB - 2012/09/27)
+
+ - Added for the C API the Fortran wrapper:
+ h5ocopy_f (MSB - 2012/03/22)
+
+
+ HDF5 Fortran library was enhanced to support Fortran 2003 standard.
+ The following features are available when the HDF5 library is configured
+ using --enable-fortran --enable-fortran2003 configure flags AND
+ if fortran compiler is Fortran2003 compliant:
+
+ - Subroutines overloaded with the C_PTR derived type:
+ h5pget_f
+ h5pget_fill_value_f
+ h5pinsert_f
+ h5pregister_f
+ h5pset_f
+ h5pset_fill_value_f
+ h5rcreate_f
+ h5rderefrence_f
+ h5rget_name_f
+ h5rget_obj_type_f
+ - Subroutines overloaded with the C_PTR derived type
+ and simplified signatures:
+ h5aread_f
+ h5awrite_f
+ h5dread_f
+ h5dwrite_f
+ - New subroutines
+ h5dvlen_reclaim_f
+ h5literate_by_name_f
+ h5literate_f
+ h5ovisit_f
+ h5tconvert_f
+
+ - Subroutines with additional optional parameters:
+ h5pcreate_class_f
+ (EIP - 2011/10/14)
+
+ - Added for the C APIs the Fortran wrappers:
+ h5dget_access_plist_f
+ h5iis_valid_f
+ h5pset_chunk_cache_f
+ h5pget_chunk_cache_f
+ (MSB - 2009/04/17)
+
+
+
+ C++ Library:
+ ------------
+ - New member function added
+
+ The assignment operator ArrayType::operator= is added because ArrayType
+ has pointer data members.
+
+ (BMR, 2016/03/07, HDFFV-9562)
+
+ - New member functions
+ + Overloaded CommonFG::getObjnameByIdx to take char* for name
+ + Overloaded CommonFG::getObjTypeByIdx to return type name as a char*.
+ (BMR - 2010/05/02)
+ + DataSet::getInMemDataSize() to simplify getting the dataset's
+ data size in memory. (BMR - 2009/07/26)
+ - These member functions were added as wrapper for H5Rdereference to
+ replace the incorrect IdComponent::dereference().
+ void H5Object::dereference(H5File& h5file, void* ref)
+ void H5Object::dereference(H5Object& obj, void* ref)
+ In addition, these constructors were added to create the associated
+ objects by way of dereference:
+ Attribute(H5Object& obj, void* ref);
+ Attribute(H5File& file, void* ref);
+ DataSet(H5Object& obj, void* ref);
+ DataSet(H5File& file, void* ref);
+ DataType(H5Object& obj, void* ref);
+ DataType(H5File& file, void* ref);
+ Group(H5Object& obj, void* ref);
+ Group(H5File& obj, void* ref);
+ (BMR - 2008/08/10)
+
+
+
+ Tools:
+ ------
+ - h5repack: Added ability to use plugin filters. HDFFV-8345 (ADB - 2013/09/04).
+ - h5dump: Added option -N --any_path, which searches the file for paths that
+ match the search path. HDFFV-7989 (ADB - 2013/08/12).
+ - h5dump: Added optional arg 0 to -A, which excludes attributes from display.
+ HDFFV-8134 (ADB - 2013/08/01).
+ - h5dump: Fixed displaying compression ratio for unknown or user-defined
+ filters. HDFFV-8344 (XCAO 2013/03/19)
+ - h5dump: Changed UNKNOWN_FILTER to USER_DEFINED_FILTER for user defined filter.
+ HDFFV-8346 (XCAO 2013/03/19)
+ - h5dump: Added capability for "-a" option to show attributes containing "/"
+ by using an escape character. For example, for a dataset "/dset"
+ containing attribute "speed(m/h)", use "h5dump -a "/dset/speed(\/h)"
+ to show the content of the attribute. See details at HDFFV-7523
+ (PC -- 2012/03/12)
+ - h5dump: Added ability to apply command options across multiple files using a
+ wildcard in the filename. Example; "h5dump -H -d Dataset1 tarr*.h5".
+ HDFFV-7876 (ADB - 2012/03/12).
+ - h5repack: Improved performance for big chunked datasets (size > 128MB)
+ when used with layout (-l) or compression (-f) option.
+ It would perform much better prior to the improvement,
+ especially for cases that chunk dimentions looks like
+ "1024x5x1" (compare to "1x5x1024"). When bigger numbers
+ are toward front and smaller number is toward back in chunk
+ dimentions. HDFFV-7862 (JKM - 2012/03/01)
+ - h5dump: Added new option --no-compact-subset. This option will not
+ interpret the '[' character as starting the compact form of
+ subsetting. This is useful when the "h5dump error: unable to
+ open dataset "datset_name"" message is output because a dataset
+ name contains a '[' character. HDFFV-7689 (ADB - 2012/01/31)
+ - h5dump: Corrected schema location:
+ <hdf5:HDF5-File
+ xmlns:hdf5="http://hdfgroup.org/HDF5/XML/schema/HDF5-File"
+ xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
+ xsi:schemaLocation="http://hdfgroup.org/HDF5/XML/schema/HDF5-File
+ http://www.hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd">
+ (ADB - 2011/08/10)
+ - h5diff: Added new level for -v (verbose) option. The new levels are
+ 1 and 2. So -v1 and -v2 can be specified to view more
+ information about attributes differences.
+ Bug#2121 (JKM 2011/3/23)
+ - h5dump: Added new option --enable-error-stack. This option will display
+ error stack information in the output stream. This is useful
+ when the "h5dump: Unable to print data" message is output.
+ (ADB - 2011/02/24)
+ - h5diff: Add a new flag --exclude-path. Specified path to an object will
+ be excluded from comparing the two files or two groups. If group
+ is specified all the member objects will be excluded.
+ (JKM - 2010/09/16).
+ - h5ls: Add new flag --no-dangling-links. (refer to --help for details)
+ (JKM - 2010/06/15)
+ - h5ls: Add new flag --follow-symlinks. (refer to --help for details)
+ (JKM - 2010/05/25)
+ - h5diff: Add new flag --no-dangling-links. (refer to --help for details)
+ (JKM - 2010/02/10)
+ - h5diff: Add new flag --follow-symlinks. (refer to --help for details)
+ (JKM - 2010/01/25)
+ - h5diff: fix for displaying garbage value on LE machine for BE data.
+ (JKM - 2009/11/20)
+ - h5dump: subsetting now allows default for count. Also trailing ; in short form
+ can be omitted after last specified value.
+ (ADB - 2009/09/04)
+ - h5dump/h5ls: now can display data in region references
+ using new -R, --region flag.
+ (ADB - 2009/09/04)
+ - h5diff: new flag, -c, --compare, list objects that are not comparable.
+ (PVN - 2009/4/10 - 1368)
+ - h5diff new flag, -N, --nan, avoids NaNs detection. (PVN - 2009/4/10)
+ - h5dump correctly specifies XML dtd / schema urls (ADB - 2009/4/3 - 1519)
+ - h5repack now handles group creation order. (PVN - 2009/4/2 - 1402)
+ - h5dump: added a printing of the compression ratio of uncompressed and compressed
+ sizes for cases where compression filters are present. (PVN - 2008/05/01)
+ - h5dump: added an option to allow a user defined formatting string for printf
+ regarding floating point numbers. (PVN - 2008/05/06)
+ - h5dump: support for external links, display the object that the external link
+ points to. (PVN - 2008/05/12)
+ - h5repack: add a userblock to an HDF5 file during the repack. (PVN - 2008/08/26)
+ - h5repack: add 2 options that call H5Pset_alignment in the repacked file. (PVN - 2008/08/29)
+ - h5ls: added capability to traverse through external links when the -r
+ (recursive) flag is given. (NAF - 2008/09/16)
+ - h5ls: added -E option to enable traversal of external links. h5ls will
+ not traverse external links without this flag being set.
+ (NAF - 2008/10/06)
+ - h5diff: added support for long double (PVN - 2008/10/28)
+ - h5dump: binary output defaults to NATIVE with -b optionally accepting
+ the form of binary output (NATIVE, FILE, BE, LE). (PVN - 2008/10/30)
+ - h5diff: return 1 for file differences when both file graphs differ by any object.
+ Error return code was changed to 2 from -1. (PVN - 2008/10/30)
+ - h5import: TEXTFPE (scientific format) was deprecated. Use TEXTFP
+ instead (PVN - 2008/10/30)
+ - h5repack: When user doesn't specify a chunk size, h5repack now defines a default
+ chunk size as the same size of the size of the hyperslab used to read the chunks.
+ The size of the hyperslabs are defined as the size of each dimension or a
+ predefined constant, whatever is smaller. This assures that the chunk
+ read fits in the chunk cache. (PVN - 2008/11/21)
+ - h5diff: h5diff treats two INFINITY values different. Fixed by checking (value==expect)
+ before call ABS(...) at h5diff_array.c This will make that (INF==INF) is true
+ (INF is treated as an number instead of NaN) (PC -- 2009/07/28)
+ - h5diff: add option "--use-system-epsilon" to print difference if (|a-b| > EPSILON)
+ Change default to use strict equality (PC -- 2009/09/12)
+
+
+ High-Level APIs:
+ ---------------
+
+ C Packet Table API
+ ------------------
+ - Replacement of a public function
+
+ The existing function H5PTcreate_fl limits applications to deflate
+ compression only. The public function H5PTcreate is added to replace
+ H5PTcreate_fl. H5PTcreate takes a property list ID to provide
+ flexibility on creation properties.
+
+ hid_t H5PTcreate(hid_t loc_id, const char *dset_name,
+ hid_t dtype_id, hsize_t chunk_size, hid_t plist_id);
+ (BMR, 2016/03/04, HDFFV-8623)
+
+ - New public functions
+
+ Two accessor functions were added per HDFFV-8623/patch 003.
+ /* Returns the ID of the dataset associated with the packet table */
+ hid_t H5PTget_dataset(hid_t table_id);
+
+ /* Returns the ID of the datatype the packet table uses */
+ hid_t H5PTget_type(hid_t table_id);
+ (BMR, 2016/03/04, HDFFV-8623)
+
+ - Regarding #ifdef VLPT_REMOVED
+
+ The #ifdef VLPT_REMOVED blocks are removed from the PT library source
+ except the following cases:
+ + H5PTis_varlen() is made available again.
+ + H5PTfree_vlen_readbuff() now became H5PTfree_vlen_buff()
+ (BMR, 2016/03/04, HDFFV-442)
+
+ C++ Packet Table API
+ --------------------
+ - New constructor
+
+ An overloaded constructor is added to FL_PacketTable and takes a property
+ list ID to provide flexibility on creation properties.
+
+ FL_PacketTable(hid_t fileID, hid_t plist_id, const char* name, hid_t dtypeID, hsize_t chunkSize);
+ (BMR, 2016/03/08, HDFFV-8623)
+
+ - New public functions
+
+ Two accessor wrappers to class PacketTable, per HDFFV-8623/patch 004.
+ /* Returns the ID of the dataset associated with the packet table */
+ hid_t PacketTable::GetDataset()
+
+ /* Returns the ID of the datatype the packet table uses */
+ hid_t PacketTable::GetDataset()
+ (BMR, 2016/03/04, HDFFV-8623)
+
+ - Member functions having "char*" as an argument
+
+ Overloaded functions were added to provide "const char*" argument, the
+ existing version will be deprecated.
+ (BMR, 2016/03/04)
+
+ - Regarding #ifdef VLPT_REMOVED
+
+ The #ifdef VLPT_REMOVED blocks are removed from the PT library source
+ except the following cases:
+ + VL_PacketTable::IsVariableLength() is moved to PacketTable
+ + VL_PacketTable::FreeReadBuff() now became PacketTable::FreeBuff()
+
+ (BMR, 2016/03/04, HDFFV-442)
+
+
+ Internal header file
+ --------------------
+ - A new API function H5DOwrite_chunk. It writes a data chunk directly
+ into a file bypassing hyperslab selection, data conversion, and
+ filter pipeline. The user must be careful with the function and
+ clearly understand the I/O process of the library.
+ (SLU - 2013/2/11)
+ - New API: h5ltpath_valid (Fortran: h5ltpath_valid_f) which checks
+ if a path is correct and determines if a link resolves to a valid
+ object and checks that the link does not dangle. (MSB- 2012/3/15)
+
+ - Added Fortran wrappers for Dimension Scale APIs. HDFFV-3797
+ h5dsset_scale_f
+ h5dsattach_scale_f
+ h5dsdetach_scale_f
+ h5dsis_attached_f
+ h5dsis_scale_f
+ h5dsset_label_f
+ h5dsget_label_f
+ h5dsget_scale_name_f
+ h5dsget_num_scales_f
+ (EIP for SB - 2011/10/13)
+
+ - Table: In version 3.0 of Table, "NROWS" (used to store number of records) was
+ deprecated (PVN - 2008/11/24)
+
+ Documentation
+ -------------
+
+Support for new platforms, languages and compilers.
+=======================================
+ - Intel V11.1 uses now -O3 optimization in production mode (EIP - 2010/10/08)
+ - PathScale compilers are recognized and can build the HDF5 library
+ properly. AKC - 2009/7/28 -
+ - SunOS 5.11 (emu) 32-bit and 64-bit with Sun C/C++ 5.12 compiler and
+ Sun Fortran 95 8.6 compiler. (SLU - 2013/04/15)
+
+Bug Fixes since HDF5-1.8.0 release
+==================================
+
+ Library
+ -------
+ - Incorrect usage of list in CMake COMPILE_DEFINITIONS set_property
+
+ The CMake command, set_property with COMPILE_DEFINITIONS property
+ needs a quoted semi-colon separated list of values. CMake will
+ transform the list to a series of -D{value} for the compile.
+
+ (ADB - 2014/12/09, HDFV-9041)
+
+ - H5Z.c: H5Zfilter_avail(H5Z_filter_t id)
+ Added else block if the call to the internal H5Z_filter_avail(id) does not
+ fail and returns FALSE. This block calls the H5PL_load(H5PL_TYPE_FILTER, (int)id)
+ function to attempt to dynamically load the filter plugin.
+ (ADB - 2014/03/03 HDFFV-8629)
+ - Added const qualifier to source buffer parameters in H5Dgather and
+ H5D_scatter_func_t (H5Dscatter callback). (NAF - 2013/7/02)
+ - Fixed an error involving failure to write fill values to the user's
+ buffer when reading unallocated chunks from datasets that have a
+ fill value set to H5D_FILL_VALUE_DEFAULT. A consequence of this
+ was the reporting of spurious data values in h5dump and h5diff
+ output.
+ (HDFFV-8247; JP - 2013/05/03)
+ - Fixed an error that could occur when calling H5Ocopy within an
+ H5Literate callback (and possibly other situations).
+ (NAF - 2012/7/25 - HDFFV-5853)
+ - Fixed an error that would occur when copying an object with attribute
+ creation order tracked and indexed. (NAF - 2012/3/28 - HDFFV-7762)
+ - Fixed a bug in H5Ocopy(): When copying an opened object, call the
+ object's flush class action to ensure that cached data is flushed
+ so that H5Ocopy will get the correct data.
+ (VC - 2012/3/27 - HDFFV-7853)
+ - When an application tries to write or read many small data chunks and
+ runs out of memory, the library had a seg fault. The fix is to
+ return the error stack with proper information. (SLU - 2012/3/23.
+ Issue 7785)
+ - H5Pset_data_transform had seg fault in some cases like x*-100. It
+ works correctly now and handles other cases like 100-x or 2/x.
+ (SLU - 2012/3/15. Issue 7922)
+ - Fixed rare corruption bugs that could occur when using the new object
+ header format. (NAF - 2012/3/15 - HDFFV-7879)
+ - Creating a dataset in a read-only file caused seg fault when the file
+ is closed. It's fixed. The attemp to create a dataset will fail
+ with the error stack indicating the file is read-only. (SLU -
+ 2012/1/25. Issue 7756)
+ - Fixed a seg fault that could occur when shrinking a dataset with chunks
+ larger than 1 MB. (NAF - 2011/11/30 - HDFFV-7833)
+ - Fixed a bug that could cause file corruption when copying named
+ datatypes to a file using shared messages. (NAF - 2011/11/14)
+ - Fixed a bug that could cause H5Oget_info to return the wrong address
+ after copying a named datatype. (NAF - 2011/11/14)
+ - The library allowed the conversion of strings between ASCII and UTF8
+ (Issue 7582). We have corrected it to report an error under this
+ situation. (SLU - 2011/11/8)
+ - The library had seg fault when it tried to shrink the size of compound type
+ through H5Tset_size immediately after the type was created (Issue
+ 7618). It's fixed now. (SLU - 2011/10/26)
+ - Fixed a bug that occurred when using H5Ocopy on a committed datatype
+ containing an attribute using that committed datatype.
+ (NAF - 2011/10/13 - Issue 5854)
+ - #ifdef _WIN32 instances changed to #ifdef H5_HAVE_WIN32_API and added
+ H5_HAVE_VISUAL_STUDIO checks where necessary. CMake only as configure
+ never set _WIN32.
+ - CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ discovered 3 problems in tests and tools' library (Issue 7674):
+ 1. In dsets.c, left shifting an unsigned int for 32 bits or more
+ caused undefined behavior.
+ 2. In dt_arith.c, the INIT_INTEGER macro definition has an overflow
+ when the value is negative minimal and is being subtracted one.
+ 3. In tools/lib/h5tools_str.c, right shifting an int value for 32 bits
+ or more caused undefined behavior.
+ All the problems have been corrected. (SLU - 2011/9/2)
+ - In v1.6 library, there was EOA for the whole MULTI file saved in the
+ super block. We took it out in v1.8 library because it's meaningless
+ for the MULTI file. v1.8 library saves the EOA for the metadata file,
+ instead. But this caused some backward compatibility problem.
+ v1.8 library couldn't open the file created with v1.6 library. We
+ fixed the problem by checking the EOA value to detect the file
+ created with v1.6 library. (SLU - 2011/6/22)
+ - When a dataset had filters and reading data failed, the error message
+ didn't say which filter isn't registered. It's fixed now.
+ (SLU - 2011/6/3)
+ - The datatype handler created with H5Tencode/decode used to have the
+ reference count 0 (zero). I have fixed it. It is 1 (one) now.
+ (SLU - 2011/2/18)
+ - Fixed a bug that caused big endian machines to generate corrupt files
+ when using the scale-offset filter with floating point data or
+ fill values. Note that such datasets will no longer be readable
+ by any machine after this patch. (NAF - 2010/02/02 - Bug 2131)
+ - Retrieving a link's name by index in the case where the link is
+ external and the file that the link refers to doesn't exist will
+ now fail gracefully rather than cause a segmentation fault.
+ (MAM - 2010/11/17)
+ - Modified library to always cache symbol table information. Libraries
+ version 1.6.3 have a bug which causes them to require this
+ information for some operations. (NAF - 2010/09/21 - 1864)
+ - Fixed a bug that could occur when getting information for a new-style
+ group that was previously opened through a file handle that was
+ later closed. (NAF - 2010/09/15)
+ - Added define check in H5public.h if stdint.h is supported by the C++
+ compiler. This define is only available on Windows with VS2010 and
+ using CMake to build the library. (ADB - 2010/09/13 - Bug 1938)
+ - H5Eset_current_stack now also closes the error stack to be set as the
+ default. This is to avoid a potential problem (Bug 1799).
+ (SLU - 2010/9/7)
+ - Fixed the bug in the filter's public CAN_APPLY function. The return
+ value should be htri_t not herr_t (Bug #1239). (SLU - 2010/8/5)
+ - Fixed a bug in the direct I/O driver that could render files with
+ certain kinds of unaligned data unreadable or corrupt them.
+ (NAF - 2010/07/28)
+ - valgrind reported an error of copying data to itself when a new attribute
+ is written (Bug #1956). I fixed it by taking out the memcpy step in
+ the attribute code. (SLU - 2010/07/28)
+ - Fixed a bug that could cause file corruption when using non-default
+ sizes of addresses and/or lengths. This bug could also cause
+ uncorrupted files with this property to be unreadable. This bug
+ was introduced in 1.8.5. (NAF - 2010/07/16 - 1951)
+ - Fixed a file corruption bug that could happen when shrinking a
+ compressed dataset. (NAF - 2010/05/20)
+ - Fixed some memory leaks in VL datatype conversion when strings are
+ used as fill values. (MAM - 2010/05/12 - BZ# 1826)
+ - Fixed a bug when copying objects with NULL references with the
+ H5O_COPY_EXPAND_REFERENCE_FLAG flag set. (NAF - 2010/04/08 - 1815)
+ - Files can now be concurrently opened more than once using the core file
+ driver, as long as the backing store is used. (NAF - 2010/03/09)
+ - Added support for H5O_COPY_EXPAND_EXT_LINK_FLAG to H5Ocopy. External
+ links will now be expanded if this flag is set.
+ (NAF - 2010/03/05 - 1733)
+ - Fixed a bug where the library, when traversing an external link, would
+ reopen the source file if nothing else worked. (NAF - 2010/03/05)
+ - Fixed an intermittent bug in the b-tree code which could be triggered
+ by expanding and shrinking chunked datasets in certain ways.
+ (NAF - 2010/02/16)
+ - H5Tdetect_class said a VL string is a string type. But when it's
+ in a compound type, it said it's a VL type (Bug #1584). I fixed it
+ to be consistent. It always return string type. (SLU - 2009/12/10)
+ - Fixed a bug where writing and deleting many global heap objects (i.e.
+ variable length data) would render the file unreadable. Previously
+ created files exhibiting this problem should now be readable.
+ (NAF - 2009/10/27 - 1483)
+ - Fixed incorrect return value for H5Pget_preserve. (AKC - 2009/10/08 - 1628)
+ - Fixed an assertion failure that occurred when H5Ocopy was called on a
+ dataset using a vlen inside a compound. (NAF - 2009/10/02 - 1597)
+ - Fixed incorrect return value for H5Pget_filter_by_id1/2 in H5Ppublic.h.
+ (NAF - 2009/09/25 - 1620)
+ - Fixed a bug where properties weren't being compared with the registered
+ compare callback. (NAF - 2009/09/25 - 1555)
+ - Fixed a bug where H5Pget_fitler_by_id would succeed when called for a
+ filter that wasn't present. (NAF - 2009/06/25 - 1250)
+ - Fixed an issue with committed compound datatypes containing a vlen.
+ Also fixed memory leaks involving committed datatypes.
+ (NAF - 2009/06/10 - 1593)
+ - Added versioning to H5Z_class_t struct to allow compatibility with 1.6
+ API. (NAF - 2009/04/20 - 1533)
+ - Fixed a problem with using data transforms with non-native types in the
+ file. (NAF - 2009/04/20 - 1548)
+ - Added direct.h include file to windows section of H5private.h
+ to fix _getcwd() warning. (ADB - 2009/04/14 - 1536)
+ - Fixed a bug that prevented external links from working after calling
+ H5close(). (NAF - 2009/04/10 - 1539)
+ - Modified library to write cached symbol table information to the
+ superblock, to allow library versions 1.3.0 to 1.6.3 to read files
+ created by this version. (NAF - 2009/04/08 - 1423)
+ - Changed skip lists to use a deterministic algorithm. The library should
+ now never call rand() or srand(). (NAF - 2009/04/08 - 503)
+ - Fixed a bug where H5Lcopy and H5Lmove wouldn't create intermediate
+ groups when that property was set. (NAF - 2009/04/07 - 1526)
+ - Fixed a bug that caused files with a user block to grow by the size of
+ the user block every time they were opened.
+ (NAF - 2009/03/26 - 1499)
+ - Fixed a rare problem that could occur with files using the old (pre 1.4)
+ array datatype. (NAF - 2009/03/23)
+ - Modified library to be able to open files with corrupt root group symbol
+ table messages, and correct these errors if they are found. Such
+ files can only be successfully opened with write access.
+ (NAF - 2009/03/23 - 1189)
+ - Removed the long_long #define and replaced all instances with
+ "long long". This caused problems with third party products. All
+ currently supported compliers support the type. (ADB - 2009/03/05)
+ - Fixed various bugs that could prevent the fill value from being written
+ in certain rare cases. (NAF - 2009/02/26 - 1469)
+ - Fixed a bug that prevented more than one dataset chunk from being cached
+ at a time. (NAF - 2009/02/12 - 1015)
+ - Fixed an assertion failure caused by opening an attribute multiple times
+ through multiple file handles. (NAF - 2009/02/12 - 1420)
+ - Fixed a problem that could prevent the user from adding attributes (or
+ any object header message) in some circumstances.
+ (NAF - 2009/02/12 - 1427)
+ - Fixed a bug that could cause problems when an attribute was added to a
+ committed datatype using the committed datatype's datatype.
+ (NAF - 2009/02/12)
+ - Fixed a bug that could cause problems when copying an object with a
+ shared message in its own object header. (NAF - 2009/01/29)
+ - Changed H5Tset_order to properly reject H5T_ORDER_NONE for most
+ datatypes. (NAF - 2009/01/27 - 1443)
+ - Fixed a bug where H5Tpack wouldn't remove trailing space from an
+ otherwise packed compound type. (NAF - 2009/01/14)
+ - Fixed up some old v2 btree assertions that get run in debug mode that
+ were previously failing on compilation, and removed some of the
+ more heavily outdated and non-rewritable ones. (MAM - 2008/12/15)
+ - Fixed a bug that could cause problems when "automatically" unmounting
+ multiple files. (NAF - 2008/11/17)
+ - H5Ovisit and H5Ovisit_by_name will now properly terminate when the
+ callback function returns a positive value on the starting object.
+ (NAF - 2008/11/03)
+ - Fixed an error where a null message could be created that was larger
+ than could be written to the file. (NAF - 2008/10/23)
+ - Corrected error with family/split/multi VFD not updating driver info
+ when "latest" version of the file format used. (QAK - 2008/10/14)
+ - Corrected alignment+threshold errors to work correctly when metadata
+ aggregation is enabled. (QAK - 2008/10/06)
+ - Changed H5Fget_obj_count and H5Fget_obj_ids to ignore objects registered
+ by the library for internal library use. (NAF - 2008/10/06)
+ - Fixed potential memory leak during compound conversion.
+ (NAF - 2008/10/06)
+ - Changed the return value of H5Fget_obj_count from INT to SSIZE_T. Also
+ changed the return value of H5Fget_obj_ids from HERR_T to SSIZE_T and
+ the type of the parameter MAX_OBJS from INT to SIZE_T. (SLU - 2008/09/26)
+ - Fixed an issue that could cause data to be improperly overwritten
+ during compound type conversion. (NAF - 2008/09/19)
+ - Fixed pointer alignment violations that could occur during vlen
+ conversion. (NAF - 2008/09/16)
+ - Fixed problem where library could cause a segmentation fault when
+ an invalid location ID was given to H5Giterate(). (QAK - 2008/08/19)
+ - Fixed improper shutdown when objects have reference count > 1. The
+ library now tracks reference count due to the application separately
+ from that due to internal library routines. (NAF - 2008/08/19)
+ - Fixed assertion failure caused by incorrect array datatype version.
+ (NAF - 2008/08/08)
+ - Fixed an issue where mount point traversal would fail when using
+ multiple handles for the child. (NAF - 2008/08/07)
+ - Fixed an issue where mount points were inaccessible when using multiple
+ file handles for the parent. The mount table is now in the shared
+ file structure (the parent pointer is still in the top structure).
+ (NAF - 2008/08/07)
+ - when an attribute was opened twice and data was written with one of the handles,
+ the file didn't have the data. It happened because each handle had its own
+ object structure, and the empty one overwrote the data with fill value. This is
+ fixed by making some attribute information like the data be shared in the
+ attribute structure. SLU - 2008/07/22
+ - Fixed issue where a group could have a file mounted on it twice.
+ (QAK - 2008/07/15)
+ - Fixed a Windows-specific issue in the ohdr test which was causing users
+ in some timezones to get false errors. This a deficiency in the Windows
+ mktime() function, and has been handled properly. SJW - 2008/06/19
+ - Fixed the problem with the searching of target file for H5Lcreate_external().
+ The searching pattern will depend on whether the target file's
+ pathname is an absolute or a relative path. Please see the description
+ in the RM for H5Lcreate_external(). (VC - 2008/04/08)
+ - Fixed possible file corruption bug when encoding datatype
+ descriptions for compound datatypes whose size was between
+ 256 & 511 bytes and the file was opened with the "use the
+ latest format" property enabled (with H5Pset_libver_bounds).
+ (QAK - 2008/03/13)
+ - Fixed bug in H5Aget_num_attrs() routine to handle invalid location
+ ID correctly. (QAK - 2008/03/11)
+ - H5Dset_extent: when shrinking dimensions, some chunks were not deleted.
+ (PVN - 2009/01/8)
+ - Added code to maintain a min_clean_fraction in the metadata cache when
+ in serial mode. (MAM - 2009/01/9)
+
+
+
+ Configuration
+ -------------
+ - CMake: When CMake commands are executed individually on the command line
+ and the external filters are being built, the CMAKE_BUILD_TYPE define
+ must be set to the same value as the configuration
+ (-DCMAKE_BUILD_TYPE:STRING=Release if using -C Release). This is needed
+ by the the szip and zlib filter build commands. (ADB - HDFFV-8695)
+ - CMake: Remove use of XLATE_UTILITY program. (ADB - 2014/03/28 HDFFV-8640)
+ - CMake: Added missing quotes in setting the CMAKE_EXE_LINKER_FLAGS for the
+ MPI option. (ADB - 2014/02/27 HDFFV-8674)
+ - Modified H5detect.c to scan floating point types for padding bits before
+ analyzing the type further. This should fix problems with gcc 4.8
+ (NAF - 2013/09/19 - HDFFV-8523/HDFFV-8500)
+ - Fixed Makefile issue in which "-Wl," was not properly specified
+ prior to -rpath when building parallel fortran libraries with
+ an Intel compiler. (MAM - 2012/03/26)
+ - Makefiles generated by other packages using h5cc as the compiler
+ no longer error when 'make' is invoked more than once in order
+ to 'rebuild' after changes to source. (MAM - 2012/03/26)
+ - Added --enable-fortran2003 flag to enable Fortran2003 support
+ in the HDF5 Fortran library. The flag should be used along with the
+ --enable-fortran flag and takes affect only when Fortran compiler
+ is Fortran2003 compliant. (EIP - 2011/11/14)
+
+ - In Windows platform, the default VFD, was Windows VFD, is restored back
+ to the SEC2, aka POSIX, VFD. The Windows VFD is deprecated. HDFFV-7740
+ (AKC 2011/09/26)
+ - Removed config/ibm-aix6.x. All IBM-AIX settings are in one file,
+ ibm-aix. (AKC - 2011/4/14)
+ - Shared C libraries are no longer disabled on Mac when Fortran
+ is enabled. Shared Fortran libraries are still not supported on Mac,
+ so configure will disable them by default, but this is overridable
+ with the new --enable-unsupported configure option. The configure
+ summary has been updated to reflect the fact that the shared-ness of
+ the C++/Fortran wrapper libraries may not align with the C library.
+ (MAM - 04/11/2011 - HDFFV-4353).
+ - Removed recognition of the parallel compilers of LAM(hcc) and
+ ChMPIon(cmpicc) since we have no access to these two MPI implementations
+ and cannot verify their correctness. (AKC - 2010/7/14 - Bug 1921)
+ - Removed the following config files, as we no longer support them:
+ config/dec-osf*, config/hpux11.00, config/irix5.x,
+ config/powerpc-ibm-aix4.x config/rs6000-ibm-aix5.x config/unicos*
+ MAM - 2009/10/08
+ - Modified configure and make process to properly preserve user's CFLAGS
+ (and company) environment variables. Build will now properly use
+ automake's AM_CFLAGS for any compiler flags set by the configure
+ process. Configure will no longer modify CFLAGS directly, nor will
+ setting CFLAGS during make completely replace what configure has set up.
+ MAM - 2009/10/08
+ - Support for TFLOPS, config/intel-osf1, is removed since the TFLOPS
+ machine has long retired. AKC - 2009/10/06.
+ - Added $(EXEEXT) extension to H5detect when it's executed in the
+ src/Makfile to generate H5Tinit.c so it works correctly on platforms
+ that require the full extension when running executables.
+ MAM - 2009/10/01 - BZ #1613
+ - Configure will now set FC and CXX to "no" when fortran and c++
+ are not being compiled, respectively, so configure will not run
+ some of the compiler tests for these languages when they are not
+ being used. MAM - 2009/10/01
+ - The PathScale compiler (v3.2) was mistaken as gcc v4.2.0 but it fails to
+ recognize some gcc options. Fixed. (see bug 1301). AKC - 2009/7/28 -
+ - The --enable-static-exec flag will now properly place the -static flag
+ on the link line of all installed executables. This will force the
+ executable to link with static libraries over shared libraries, provided
+ the static libraries are available. MAM - 2009/08/31 - BZ #1583
+ - The --includedir=DIR configuration option now works as intended, and can
+ be used to specify the location to install C header files. The default
+ location remains unchanged, residing at ${prefix}/include.
+ MAM - 2009/03/10 - BZ #1381
+ - Configure no longer removes the '-g' flag from CFLAGS when in production
+ mode if it has been explicitly set in the CFLAGS environment variable
+ prior to configuration. MAM - 2009/03/09 - BZ #1401.
+ - Fixed error with 'make check install' failing due to h5dump
+ needing other tools built first. MAM - 2008/10/24.
+ - Wpen using shared szip, it is no longer necessary to specify
+ the path to the shared szip libraries in LD_LIBRARY_PATH. MAM -
+ 2008/10/24.
+ - The file libhdf5_fortran.settings is not installed since its content
+ is included in libhdf5.settings now. AKC - 2008/10/21
+ - "make DESTDIR=xxx install" failed to install some tools and files
+ (e.g., h5cc and fortran modules). Fixed. AKC - 2008/10/8.
+ - Autotools: An export of LD_LIBRARY_PATH=<szip library location> was
+ removed from configure and make installcheck was revised to run
+ scripts installed in share/hdf5_examples to use the installed h5cc, etc.
+ to compile and run example source files also installed there. Make
+ installcheck will now fail when a shared szip or other external lib file
+ cannot be found in the same manner that executables compiled and linked
+ with h5cc will fail to run when those lib files cannot be found after
+ install. Make installcheck should pass after setting LD_LIBRARY_PATH to the
+ szip location.
+ (LRK - 2014/04/16)
+
+ Performance
+ -------------
+ - Removed program perform/benchpar from the enable-build-all list. The
+ program will be retired or moved to another location. HDFFV-8156
+ (AKC 2012/10/01)
+ - Retired program perform/mpi-perf. Its purpose has been incorporated
+ into h5perf before. (AKC 2012/09/20)
+ - ifdefs added to tests around include unistd.h and function to simulate
+ getlogin() on Windows.
+ (ADB - 2011/08/15)
+ - perf_serial test added to Windows projects and check batch file.
+ (ADB - 2009/06/11)
+ Fortran
+ --------
+ - Fixed a typo in return value of the nh5dread_f_c function ( was 1
+ instead of 0 on success); fixed the return value to make it consistent
+ with other Fortran functions; cleaned the code from debug statements.
+ (EIP - 2012/06/23)
+
+ - Fixed problem writing/reading control characters to a dataset; writing
+ a string containing: alerts, backspace, carriage_return, form_feed,
+ horizontal_tab, vertical_tab, new_line is now tested and working.
+ (MSB - 2012/09/01)
+
+ - Corrected the integer type of H5S_UNLIMITED_F to HSIZE_T (MSB - 2012/09/01)
+
+ - Corrected the number of continuation lines in the src files
+ to be less then 32 lines for F95 compliance. (MSB - 2012/10/01)
+
+ Tools
+ -----
+ - h5dump subsetting fixed for dims greater then two
+ When a dataset has more then two dimensions, subsetting would incorrectly
+ calculate the data that needed to be displayed.
+ Added in block and stride calculation that account for dimensions greater
+ then two. NOTE: lines that have line breaks inserted because of display
+ length calculations, may have index info that is incorrect until the next
+ dimension break. (ADB, 2016/03/04, HDFFV-9698)
+ - h5repack: h5repack would not attempt to remove UD filters. Added a
+ check to h5repack for UD filters that checks if the filter can
+ be dynamically loaded. This will require a change in the library to
+ add the H5PL_load() to the H5Zfilter_avail(). (ADB - 2014/03/03 HDFFV-8629)
+ - h5repack: Fixed failure for converting a layout of small chunked dataset
+ (size < 1K) to contiguous layout. HDFFV-8214 (JKM 2013/03/18)
+ - h5diff: Fixed to return correct exit code 1 when detect unique extra
+ attribute. Prior to this fix, h5diff returned exit code 0 indicating
+ two files are identical. HDFFV-7643 (JKM 2013/02/15)
+ - h5diff: Improved speed when comparing HDF5 files with lots of
+ attributes. Much slower performance was identified with release
+ version from 1.8.7 to 1.8.10 compared to 1.8.6. (JKM 2012/10/19)
+ - h5repack: "h5repack -f NONE file1.h5 out.h5" command failed if
+ source file contains chunked dataset and a chunk dim is bigger than
+ the dataset dim. Another issue is that the command changed max dims
+ if chunk dim is smaller than the dataset dim.
+ These issue occurred when dataset size is smaller than 64k (compact
+ size limit) Fixed both.
+ HDFFV-8012 (JKM 2012/09/24)
+ - h5diff: Fixed not to accumulate attribute difference to dataset
+ difference in verbose mode (-v, -r), which caused incorrect
+ difference between dataset and group/datatype object if attribute
+ exist with any differences. This also lead to fix inconsistent
+ format indicating difference between dataset and group/datatype
+ object. HDFFV-5919 (JKM 2012/09/05)
+ - h5diff: Fixed the incorrect result when comparing attribute data
+ values and the data type has same class but different size.
+ HDFFV-7942 (JKM 2012/08/15)
+ - ph5diff: Fixed intermittent hang issue on a certain operation in
+ parallel mode. It was detected by daily test for comparing
+ non-comparable objects, but it could have occurred in other
+ operations depend on machine condition. HDFFV-8003 (JKM 2012/08/01)
+ - h5diff: Fixed test failure for "make check" due to failure of
+ copying test files when performed in HDF5 source tree. Also applied
+ to other tools.
+ HDFFV-8107 (JKM 2012/08/01)
+ - h5diff: Fixed the Function COPY_TESTFILES_TO_TESTDIR() of
+ testh5diff.sh to better report when there is an error in the file
+ copying. HDFFV-8105 (AKC -2012/07/22)
+ - h5diff: Fixed not to check and display dangling link status without
+ --follow-symlinks option. This also improved performance when
+ comparing lots of external links without the --follow-symlinks
+ option. HDFFV-7998 (JKM 2012/04/26)
+ - h5unjam: Fixed sefgault when used -V (show version) option.
+ HDFFV-8001 (JKM 2012/04/19)
+ - h5repack: Fixed a failure when change the chunk size of a specified
+ chunked dataset with unlimited max dims. HDFFV-7993 (JKM 2012/04/11)
+ - h5diff: Fixed failure for comparing same named object with different
+ object types in comparing groups. Prior to the fix, h5diff resulted
+ in error. After the fix, h5diff detects such case as non-comparable
+ and display messages accordingly. HDFFV-7664 (JKM 2012/03/28)
+ - h5diff: If unique objects exists only in one file and try to exclude
+ the unique objects with --exclude-path option, h5diff missed
+ excluding some objects.
+ Fixed to exclude objects correctly in such case.
+ HDFFV-7837 (JKM 2012/03/20)
+ - h5dump: Added tools library error stack to properly catch error
+ information generated within the library.
+ HDFFV-7958 (ADB 2012/03/12)
+ - h5dump: Dangling links no longer throw error message, change process
+ when open link fails.
+ HDFFV-7839 (ADB 2012/03/12)
+ - h5diff: When two symbolic dangling links are compared with
+ --follow-symlinks option, the result should be same. It worked for
+ comparing two files, but didn't work for comparing two objects.
+ HDFFV-7835 (JKM 2012/03/09)
+ - h5dump: Refactored code to remove duplicated functions. Split XML
+ functions from DDL functions. Corrected indentation and formatting
+ errors. Also fixed subsetting counting overflow (HDFFV-5874). Verified
+ all tools call tools_init() in main.
+ HDFFV-7560 (ADB 2012/02/17)
+ - h5diff: fixed to prevent from displaying error stack message when
+ comparing the two dangling symlinks with follow-symlinks option.
+ HDFFV-7836 (JKM 2012/01/13)
+ - h5repack: fixed memory leak for handling variable length string in
+ attribute. HDFFV-7840 (JKM 2012/01/06)
+ - h5ls: fixed segfault when access region reference data in an
+ attribute. HDFFV-7838 (JKM 2011/12/29)
+ - h5diff: fixed segfault over non-comparable attribute with different
+ dimention or rank, along with '-c' option to display details.
+ HDFFV-7770 (JKM 2011/10/24)
+ - Fixed h5diff to display all the comparable object and attribute
+ regardless of non-comparables. HDFFV-7693 (JKM 2011/09/16)
+ - Fixed h5repack to update values of references(object and region) of
+ attributes in h5repack for 1) references, 2) ARRAY of references,
+ 3) VLEN of references, and 4) COMPOUND of references.
+ (JIRA HDF5 5932) PC -2011/09/14
+ - h5diff: fixed segfault over dataset with container types
+ (array,lven) with multiple nested compound types.
+ (ex: compound->array->compound, compound->vlen->compound)
+ HDFFV-7712 JKM (2011/09/01)
+ - h5repack: added macro to handle failure in H5Dread/write when memory
+ allocation failed inside the library. (PC -- 2011/08/19)
+ - Fixed h5jam not to allow specifying an HDF5 formatted file as input
+ file for -u (user block file) option, because the original HDF5 file
+ will not be accessible if allows. HDFFV-5941 (JKM 2011/08/15)
+ - Revised command help pages of h5jam and h5unjam. The descriptions
+ were not up to date and some were missing.
+ HDFFV-7515 (JKM 2011/08/15)
+ - h5repack: h5repack failed to copy dataset if the layout is changed
+ from chunked with unlimited dims to contiguous. HDFFV-7649
+ (PC -- 2011/07/15)
+ - h5diff: "--delta" option considers two NaN of the same type are
+ different, which is wrong based on h5diff description in Reference
+ Manual. HDFFV-7656 (PC -- 2011/07/15)
+ - Fixed h5diff to display instructive error message and exit with 1
+ when mutually exclusive options (-d, -p and --use-system-epsilon)
+ are used together. HDFFV-7600 (JKM 2011/07/07)
+ - Fixed h5dump to display the first line of each element into correct
+ position for multiple dimention array type.
+ Before this fix, the first line of each element in array were
+ displayed after the last line of previous element without
+ moving to the next line (+indentation).
+ Bug #HDFFV-5878 (JKM 2011/06/15)
+ - Fixed h5dump to display correct value for H5T_STD_I8LE dataset
+ on a system (ppc64, linux, Big-Endian, clustering).
+ Bug #HDFFV-7594 (ABERT & JKM 2011/05/12)
+ - Fixed h5diff to compare file itself correctly. Previously h5diff
+ reported either different or not compatible in certain cases even
+ comparing file itself. This fix also improve performance when
+ comparing same target objects through verifying the obj&file
+ addresses before comparing the details in the objects (ex: datasets
+ or attributes) Bug #HDFFV-5928 (XCAO & JKM 2011/05/06)
+ - Updated h5dump test case script to prevent entire test failure upon
+ source directory is read-only. Bug# HDFFV-4342 (JKM 2011/4/12)
+ - Fixed h5dump displaying incorrect values for H5T_STD_I8BE type data in
+ attribute on Big-Endian machine. H5T_STD_I8BE is unsigned 8bit type,
+ so h5dump is supposed to display -2 instead of 254. It worked
+ correctly on Little-Endian system , but not on Big-Endian system.
+ Bug #HDFFV-4358 (JKM 2011/04/08)
+ - Updated to unify option name to '--enable-error-stack' for printing
+ HDF5 error stack messages for HDF5 tools. h5ls and h5dump for now.
+ For h5ls, this replaces "-e/--errors" option, which is deprecated.
+ Bug#2182 (JKM 2011/3/30)
+ - Fix h5diff for --use-system-epsilon option: the calculation changed
+ from ( |a - b| / b ) to ( |a - b| ). This was decided for better
+ performance. Bug#2184 (JKM 2011/3/24)
+ - Fixed output for H5T_REFERENCE in h5dump. According to the BNF document
+ the output of a H5T_REFERENCE should be followed by the type;
+ <reference> ::= H5T_REFERENCE { <ref_type> }
+ <ref_type> ::= H5T_STD_REF_OBJECT | H5T_STD_REF_DSETREG
+ Previously this was only displayed if the -R option was used.
+ Bug#1725 (ADB 2011/3/28)
+ - Fix h5diff issues for #1: h5diff compared attributes correctly only
+ when two objects have the same number of attributes and attribute
+ names are identical, #2: didn't display useful information about
+ attribute difference. Bug#2121 (JKM 2011/3/17)
+ - Fixed memory leak for h5diff when accessing symbolic links with
+ --follow-symlink option. Bug#2214 (JKM 2011/3/18)
+ - Fixed memory leak for h5diff when access variable length string
+ data. Bug#2216 (JKM 2011/3/18)
+ - Fixed and improved help page for -a option of h5ls.
+ Bug#1904 (JKM 2011/3/11)
+ - Fixed h5dump not to include attribute values in the output file when
+ h5dump "-y -o output_file" options were used. The problem was introduced
+ in HDF5 1.8.6 by showing data pointed by region references. (XCAO 2011/3/9)
+ - Fixed h5copy to be able to copy any object into the same HDF5 file.
+ Previously h5copy displayed error message when target file is same
+ as source file. (XCAO 2011/3/8)
+ - Fixed h5dump for skipping some values for long array type dataset on
+ Windows. This issue only occurred on Windows due to the different
+ return behavior from _vsnprintf() funtion. Bug#2161 (JKM 2011/3/3)
+ - Fixed h5dump for skipping array indices every certain number
+ when the array type dataset is relatively big. The certain number
+ varies according to the size of array. Bug#2092 (JKM 2011/2/15).
+ - Fixed h5diff for the segfault when compares compound datasets
+ with combination of fixed length string types and vlen string types
+ in certain orders. bug#2089 (JKM 2010/12/28)
+ - Improve h5diff performance. 1) use HDmemcmp() before comparing each
+ elements. 2) replace expensive H5Tequals() calls 3) retrieve datatype
+ information at dataset level not each element level for compound
+ datasets
+ - Fixed h5ls to display nested compound type with curly bracket
+ when -S (--simple) option is used with -l (--label), so it shows
+ which member (in curly bracket) belong to which nested compound type
+ and make the output make sense. bug#1979 (JKM 2010/11/09)
+ - Fixed h5diff to handle variable-length strings in a compound dataset
+ correctly. (also variable-length string array in a compound dataset)
+ Garbage values were displayed when h5diff compared multiple
+ variable-length strings in a compound type dataset.
+ Bug#1989 (JKM 2010/10/28)
+ - Fixed h5copy to fail gracefully when copying object to non-exist
+ group without -p option. Bug#2040 (JKM 2010/10/18)
+ - Fixed to compare member objects and groups recursively when two
+ files or groups are specified to be compared. Bug#1975
+ (JKM 2010/9/16)
+ - Make h5repack be able to convert a layout to COMPACT for small size
+ dataset as default. bug#1896 (JKM 2010/09/15)
+ - Change h5ls not to manipulate special characters in object name or
+ attribute name for smart display. bug#1784 (JKM 2010/06/28)
+ - Fixed h5ls to return exit code 1 (error) when non-existent file is
+ specified. bug#1793. (JKM 2010/04/27)
+ - h5copy failed to copy dangling link when the link is specified
+ directly. bug#1817. (JKM 2010/04/22)
+ - h5repack lost attributes from a dataset of reference type. bug#1726.
+ (JKM 2010/3/25)
+ - h5repack sets NULL for object reference value for group or
+ named datatype. bug#1814. (JKM 2010/03/19)
+ - h5diff: fixed incorrect behavior (hang) in parallel mode when
+ specify invalid options (ex: -v and -q) (JKM 2010/02/17)
+ - h5dump/h5ls display buffer resize fixed in tools library.
+ (ADB - 2009/07/21 - 1520)
+ - Fixed many problems that could occur when using h5repack with named
+ datatypes. (NAF - 2009/4/20 - 1516/1466)
+ - h5dump, h5diff, h5repack were not reading (by hyperslabs) datasets
+ that have a datatype datum size greater than H5TOOLS_BUFSIZE, a
+ constant defined as 1024Kb, such as array types with large
+ dimensions (PVN - 2009/4/1 - 1501)
+ - h5import: By selecting a compression type, a big endian byte order was being
+ selected (PVN - 2009/3/11 - 1462)
+ - zip_perf.c had missing argument on one of the open() calls. Fixed.
+ (AKC - 2008/12/9)
+ - h5dump now checks for uniqueness of committed datatypes.
+ (NAF - 2008/10/15)
+ - Fixed unnecessary indentation of committed datatypes in h5dump.
+ (NAF - 2008/10/15)
+ - Fixed bugs in h5stat:segmemtation fault when printing groups and
+ print warning message when traversal of objects is unsuccessful.
+ (see bug #1253) (VC- 2008/10/13)
+ - Fixed bug in h5ls that prevented relative group listings (like
+ "h5ls foo.h5/bar") from working correctly (QAK - 2008/06/03)
+ - Fixed bug in h5diff that prevented datasets & attributes with
+ variable-length string elements from comparing correctly.
+ (QAK - 2008/02/28)
+ - h5import bug on Windows w/binary datasets. fread in windows needs a
+ binary file to be open with 'rb' instead of 'r' otherwise it
+ terminates execution if an end of file character is found on the
+ input file. Besides that the binary file generated needs to be open
+ with 'wb' , otherwise an end of line character is read twice.
+ (PVN - 2008/02/19)
+ - Fixed bug in h5dump that caused binary output to be made only for the first
+ dataset, when several datasets were requested. (PVN - 2008/04/07)
+ - h5dump: when doing binary output (-b), the stdout printing of attributes
+ was done incorrectly. Removed printing of attributes when doing binary
+ output. PVN - 2008/06/05
+
+
+ High-Level APIs:
+ ------
+ - Packet Table is updated.
+
+ In the Packet Table C API, there are changes with the following functions,
+ which had been ifdef'ed out with VLPT_REMOVED since 2006
+ * H5PTcreate_vl, is removed from this release
+ * H5PTfree_vlen_readbuff, is renamed to H5PTfree_vlen_buff
+ * H5PTis_varlen, is made available again
+
+ Various cleanup: replacing 0/-1 with SUCCEED/FAIL and H5I_BADID with
+ H5I_INVALID_HID. (BMR, 2016/03/04, HDFFV-442)
+
+ - Fixed problem with H5DSget_scale_name including the NULL terminator in
+ the size calculation returned by the function. The API does not
+ include the NULL terminator in the size returned (MSB- 2013/2/10)
+
+ - Fixed problem with H5TBdelete_record destroying all data following the deletion
+ of a row. (MSB- 2012/7/26)
+
+ - Fixed H5LTget_attribute_string not closing an object identifier when an
+ error occurs. (MSB- 2012/7/21)
+
+ - Fixed the H5LTdtype_to_text function. It had some memory problems when
+ dealing with some complicated data types. HDFFVI-7701 (SLU - 2011/10/19)
+
+ - Fixed a bug in H5DSattach_scale, H5DSis_attached and H5DSdetach_scale
+ caused by using H5Tget_native_type function to determine the native
+ type for reading REFERENCE_LIST attribute. The bug was exposed
+ on Mac PPC.
+ (EIP - 2010/05/22 -1851)
+ - Fixed a bug in the H5DSdetach_scale function when 0 bytes
+ were allocated after the last reference to a dim. scale
+ was removed from the list of references in a VL element of the
+ DIMENSION_LIST attribute; modified the function to comply
+ with the Spec: DIMENSION_LIST attribute is deleted now when no
+ dimension scales left attached.
+ (EIP - 2010/05/14 -1822)
+ - Fixed a bug where the H5TB API would forget the order of fields when
+ added out of offset order. (NAF - 2009/10/27 - 1582)
+ - H5DSis_attached failed to account for different platform types. Added a
+ get native type call. (ADB - 2009/9/29 - 1562)
+ - Dimension scales: The scale index return value in H5DSiterate_scales was not always
+ incremented. (PVN - 2009/4/8 - 1538)
+
+ Fortran High-Level APIs:
+ ------
+
+ - Lite: The h5ltget_attribute_string_f used to return the C NULL character in the
+ returned character buffer. The returned Fortran charactor buffer now does
+ not return the C NULL character. (MSB - 2012/3/23)
+ - Lite: The h5ltget_dataset_info_f function (gets information about a dataset)
+ was not correctly returning the dimension array. (PVN - 2009/3/23)
+ - Lite: the h5ltread_dataset_string_f and h5ltget_attribute_string_f functions
+ had memory problems with the g95 fortran compiler. (PVN � 5/13/2009) 1522
+
+
+
+
+
+ Documentation
+ -------------
+
+
+ F90 APIs
+ --------
+ - Modified the h5open_f and h5close_f subroutines to not to call H5open
+ and H5close correspodningly. While the H5open call just adds overhead,
+ the H5close call called by an Fortran application shuts down the HDF5
+ library making it unaccessible to the application.
+ HDFFV-915 (EIP & SB - 2011/10/13)
+
+
+ C++ APIs
+ --------
+ - The constructor PropList::PropList(id) was fixed to act properly
+ according to the nature of 'id'. When 'id' is a property class id,
+ a new property list will be created. When 'id' id a property list id,
+ a copy of the property list will be made. (BMR - 2010/5/9)
+ - The parameters 'size' and 'bufsize' in CommonFG::getLinkval and
+ CommonFG::getComment, respectively, now have default values for
+ user's convenience. (BMR - 2009/10/23)
+ - NULL pointer accessing was fixed, bugzilla 1061. (BMR - 2009/10/05)
+ - read/write methods of DataSet and Attribute classes were fixed
+ to handle string correctly. (BMR - 2009/07/26)
+ - Fixed bug that caused segfaults in Attribute::read. (BMR - 2008/04/20)
+ - Fixed bug in PropList::getClassName to use portable HDfree instead
+ of free. (BMR - 2008/04/20)
+ - Fixed a design bug which allowed an Attribute object to create/modify
+ attributes (bugzilla #1068). The API class hierarchy was revised
+ to address the problem. Classes AbstractDS and Attribute are moved
+ out of H5Object. Class Attribute now multiply inherits from
+ IdComponent and AbstractDs and class DataSet from H5Object and
+ AbstractDs. In addition, the data member IdComponent::id was
+ moved into subclasses: Attribute, DataSet, DataSpace, DataType,
+ H5File, Group, and PropList. (BMR - 2008/08/10)
+ - IdComponent::dereference was incorrect and replaced as described
+ in "New Features" section.
+ (BMR - 2008/08/10)
+
+ Testing
+ -------
+ - tools/h5diff/testh5diff.sh is run in every "make check", even after it
+ has passed in the previous run. It should not run again if there is no
+ code changes. Fixed. (AKC - 2013/07/19 HDFFV-8392)
+ - In some Mac system, testlibinfo.sh failed with this error:
+ Check file ../src/.libs/libhdf5.7.dylib
+ strings: object: ../src/.libs/libhdf5.7.dylib malformed object \
+ (unknown load command 15)
+ The strings command of Mac inspects library files and older
+ versions of strings may not know newer library format, resulting
+ in errors. Fixed by sending the library file as stdin to the strings
+ coommand to avoid this problem. (AKC - 2013/03/08 HDFFV-8305)
+
+ - Fixed a typo in the ERROR macro in test/testhdf5.h. It segmentation
+ faulted when used before. (AKC - 2013/02/12 HDFFV-8267)
+
+
+Supported Platforms
+===================
+ AIX 6.1 xlc 10.1.0.5
+ (NASA G-ADA) xlC 10.1.0.5
+ xlf90 12.1.0.6
+
+ Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP i686 i686 i386 compilers for 32-bit applications;
+ (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.8.2
+ PGI C, Fortran, C++ Compilers for 32-bit
+ applications;
+ Version 13.7-0
+ Intel(R) C, C++, Fortran Compiler for 32-bit
+ applications;
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.18-371.6.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers for 64-bit applications;
+ (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
+ Version 4.8.2
+ Intel(R) C, C++, Fortran Compilers for
+ applications running on Intel(R) 64;
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.32-431.11.2.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (platypus) Version 4.4.7 20120313
+ Version 4.8.2
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 13.7-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 14.0.2 (Build 20140120)
+
+ Linux 2.6.32-431.29.2.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ #1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ (ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ IBM XL C/C++ V13.1
+ IBM XL Fortran V15.1
+
+ Linux 2.6.32-220.23.1.1chaos Intel C, C++, Fortran Compilers
+ ch5.x86_64 GNU/Linux Version 12.1.5.339
+ (LLNL Aztec)
+
+ IBM Blue Gene/P XL C for Blue Gene/P, bgxlc V9.0
+ (LLNL uDawn) XL C++ for Blue Gene/P, bgxlC V9.0
+ XL Fortran for Blue Gene/P, bgxlf90 V11.1
+
+ SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
+ (emu) Sun Fortran 95 8.6 SunOS_sparc
+ Sun C++ 5.12 SunOS_sparc
+
+ Windows 7 Visual Studio 2008 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 (cmake)
+ Cygwin(CYGWIN_NT-6.1 1.7.34(0.285/5/3) gcc(4.9.2) compiler and gfortran)
+ (cmake and autotools)
+
+ Windows 7 x64 Visual Studio 2008 (cmake)
+ Visual Studio 2010 w/ Intel Fortran 14 (cmake)
+ Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2015 (cmake)
+
+ Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+
+ Mac OS X Lion 10.7.3 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.2.1
+ 32- and 64-bit g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.2.1
+ (duck) gfortran GNU Fortran (GCC) 4.6.2
+
+ Mac OS X Mountain Lion 10.8.1 cc Apple clang version 4.0 from Xcode 4.5.1
+ (owl) c++ Apple clang version 4.0 from Xcode 4.5.1
+ gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.5.1
+ g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.5.1
+ gfortran GNU Fortran (GCC) 4.6.2
+
+
+Tested Configuration Features Summary
+=====================================
+
+ In the tables below
+ y = tested
+ n = not tested in this release
+ C = Cluster
+ W = Workstation
+ x = not working in this release
+ dna = does not apply
+ ( ) = footnote appears below second table
+ <blank> = testing incomplete on this feature or platform
+
+Platform C F90/ F90 C++ zlib SZIP
+ parallel F2003 parallel
+Solaris2.11 32-bit n y/y n y y y
+Solaris2.11 64-bit n y/n n y y y
+Windows 7 y y/y n y y y
+Windows 7 x64 y y/y n y y y
+Windows 7 Cygwin n y/n n y y y
+Windows 7 x64 Cygwin n y/n n y y y
+Windows 8 y y/y n y y y
+Windows 8 x64 y y/y n y y y
+Mac OS X Lion 10.7.3 32-bit n y/y n y y n
+Mac OS X Lion 10.7.3 64-bit n y/y n y y y
+Mac OS X Mountain Lion 10.8.1 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.1 64-bit n y/y n y y ?
+AIX 6.1 32- and 64-bit n y/n n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y
+CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 GNU n y/y n y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y
+CentOS 6.4 Linux 2.6.32 x86_64 PGI n y/y n y y y
+Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+
+
+Platform Shared Shared Shared Thread-
+ C libs F90 libs C++ libs safe
+Solaris2.11 32-bit y y y y
+Solaris2.11 64-bit y y y y
+Windows 7 y y y y
+Windows 7 x64 y y y y
+Windows 7 Cygwin n n n y
+Windows 7 x64 Cygwin n n n y
+Windows 8 y y y y
+Windows 8 x64 y y y y
+Mac OS X Lion 10.7.3 32-bit y n y y
+Mac OS X Lion 10.7.3 64-bit y n y y
+Mac OS X Mountain Lion 10.8.1 64-bit y n y y
+Mac OS X Mavericks 10.9.1 64-bit y n y y
+AIX 6.1 32- and 64-bit y n n y
+CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y
+CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n
+CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n
+CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n
+CentOS 6.4 Linux 2.6.32 x86_64 PGI y y y n
+Linux 2.6.32-431.11.2.el6.ppc64 y y y n
+
+Compiler versions for each platform are listed in the preceding
+"Supported Platforms" table.
+
+
+More Tested Platforms
+=====================
+The following platforms are not supported but have been tested for this release.
+
+ Linux 2.6.18-308.13.1.el5PAE MPICH mpich 3.1.2 compiled with
+ #1 SMP i686 i686 i386 gcc 4.9.1 and gfortran 4.9.1
+ (jam) g95 (GCC 4.0.3 (g95 0.94!)
+
+ Linux 2.6.18-431.11.2.el6 MPICH mpich 3.1.2 compiled with
+ #1 SMP x86_64 GNU/Linux gcc 4.9.1 and gfortran 4.9.1
+ (platypus) g95 (GCC 4.0.3 (g95 0.94!)
+
+ FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719
+ (loyalty) gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719
+ (freedom) gcc 4.6.1 20110422
+ g++ 4.6.1 20110422
+ gfortran 4.6.1 20110422
+
+ Debian7.5.0 3.2.0-4-686 #1 SMP Debian 3.2.51-1 i686 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
+ gcc (Debian 4.7.2-5) 4.7.2
+ GNU Fortran (Debian 4.7.2-5) 4.7.2
+ (cmake and autotools)
+
+ Fedora20 3.15.3-200.fc20.i6866 #1 SMP i686 i686 i386 GNU/Linux
+ gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ (cmake and autotools)
+
+ Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT i686 athlon i386 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
+ gcc (SUSE Linux) 4.8.1
+ GNU Fortran (SUSE Linux) 4.8.1
+ (cmake and autotools)
+
+ Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP i686 GNU/Linux
+ gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ (cmake and autotools)
+
+ Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux
+ gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ (cmake and autotools)
+
+ Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46
+ hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+ pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai
+
+
+Known Problems
+==============
+* "make check" fails on CYGWIN when building shared lib files is enabled. The
+ default on Cygwin has been changed to disable shared. It can be enabled with
+ the --enable-shared configure option but is likely to fail "make check"
+ with GCC compilers. (LK -2015/04/16)
+
+* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
+ catches some undefined behavior in the alignment algorithm of the macro DETECT_I
+ in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment
+ of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for
+ H5detect.c. In the future, we can separate flags for H5detect.c from the rest of
+ the library. (SLU - 2013/10/16)
+
+* The 5.9 C++ compiler on Sun failed to compile a C++ test ttypes.cpp. It
+ complains with this message:
+ "/home/hdf5/src/H5Vprivate.h", line 130: Error: __func__ is not defined.
+
+ The reason is that __func__ is a predefined identifier in C99 standard. The
+ HDF5 C library uses it in H5private.h. The test ttypes.cpp includes
+ H5private.h (H5Tpkg.h<-H5Fprivate.h<-H5Vprivate.h<-H5private.h). Sun's 5.9
+ C++ compiler doesn't support __func__, thus fails to compile the C++ test.
+ But 5.11 C++ compiler does. To check whether your Sun C++ compiler knows this
+ identifier, try to compile the following simple C++ program:
+ #include<stdio.h>
+
+ int main(void)
+ {
+ printf("%s\n", __func__);
+ return 0;
+ }
+ (SLU - 2012/11/5)
+
+* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
+ native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
+ ports (and probably gcc releases after that).
+ (QAK - 2012/10/19)
+
+* The data conversion test dt_arith.c has failures (segmentation fault) from
+ "long double" to other datatypes during hard conversion when the library
+ is built with the default GCC 4.2.1 on Mac Lion system. It only happens
+ with optimization (-O3, -O2, and -O1). Some newer versions of GCC do not
+ have this problem. Users should disable optimization or try newer version
+ of GCC. (Issue 8017. SLU - 2012/6/12)
+
+* The data conversion test dt_arith.c fails in "long double" to integer
+ conversion on Ubuntu 11.10 (3.0.0.13 kernal) with GCC 4.6.1 if the library
+ is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernal
+ (3.2.2 on Fedora) doesn't have the problem. Users should lower down the
+ optimization level (-O1 or -O0) by defining CFLAGS in the command line of
+ "configure" like:
+
+ CFLAGS=-O1 ./configure
+
+ It will overwrite the library's default optimization level. (Issue 7829.
+ SLU - 2012/2/7)
+
+* --with-mpe configure option does not work with Mpich2. AKC - 2011/03/10)
+
+* While working on the 1.8.6 release of HDF5, a bug was discovered that can
+ occur when reading from a dataset in parallel shortly after it has been
+ written to collectively. The issue was exposed by a new test in the parallel
+ HDF5 test suite, but had existed before that. We believe the problem lies with
+ certain MPI implementations and/or filesystems.
+
+ We have provided a pure MPI test program, as well as a standalone HDF5
+ program, that can be used to determine if this is an issue on your system.
+ They should be run across multiple nodes with a varying number of processes.
+ These programs can be found at:
+ http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
+
+* Parallel mode in AIX will fail some of the testcheck_version.sh tests where
+ it treats "exit(134) the same as if process 0 had received an abort signal.
+ This is fixed and will be available in the next release. AKC - 2009/11/3
+
+* The PathScale MPI implementation, accessing a Panasas file system, would
+ cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file is not
+ existing. This is due to the MPI_File_open() call failing if the amode has
+ the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11
+
+* Parallel tests failed with 16 processes with data inconsistency at testphdf5
+ / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
+ collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
+ with MPI IO. (CMC - 2009/04/28)
+
+* For SNL, spirit/liberty/thunderbird: The serial tests pass but parallel
+ tests failed with MPI-IO file locking message. AKC - 2007/6/25.
+* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers use
+ -mp -O1 compilation flags to build the libraries. Higher level of optimization
+ causes failures in several HDF5 library tests.
+* For HPUX 11.23 many tools tests failed for 64-bit version when linked to the
+ shared libraries (tested for 1.8.0-beta2)
+* For SNL, Red Storm: only paralle HDF5 is supported. The serial tests pass
+ and the parallel tests also pass with lots of non-fatal error messages.
+* on SUN 5.10 C++ test fails in the "Testing Shared Datatypes with Attributes" test
+* configuring with --enable-debug=all produces compiler errors on most
+ platforms. Users who want to run HDF5 in debug mode should use
+ --enable-debug rather than --enable-debug=all to enable debugging
+ information on most modules.
+* On Mac OS 10.4, test/dt_arith.c has some errors in conversion from long
+ double to (unsigned) long long and from (unsigned)long long to long double.
+* On Altix SGI with Intel 9.0 testmeta.c would not compile with -O3
+ optimization flag.
+* On VAX, Scaleoffset filter isn't supported. The filter cannot be applied to
+ HDF5 data generated on VAX. Scaleoffset filter only supports IEEE standard
+ for floating-point data.
+* On Cray X1, a lone colon on the command line of h5dump --xml (as in
+ the testh5dumpxml.sh script) is misinterpereted by the operating system
+ and causes an error.
+* On mpich 1.2.5 and 1.2.6, we found that if more than two processes
+ contribute no IO and the application asks to do IO with collective, we found
+ that when using 4 processors, a simple collective write will be hung
+ sometimes. This can be verified with t_mpi test under testpar.
+* The dataset created or rewritten with the v1.6.3 library or after can't
+ be read with the v1.6.2 library or before when Fletcher32 EDC(filter) is
+ enabled. There was a bug in the calculating code of the Fletcher32
+ checksum in the library before v1.6.3. The checksum value wasn't consistent
+ between big-endian and little-endian systems. This bug was fixed in
+ Release 1.6.3. However, after fixing the bug, the checksum value is no
+ longer the same as before on little-endian system. The library release
+ after 1.6.4 can still read the dataset created or rewritten with the library
+ of v1.6.2 or before. SLU - 2005/6/30
+* For the version 6(6.02 and 6.04) of Portland Group compiler on AMD Opteron
+ processor, there's a bug in the compiler for optimization(-O2). The library
+ failed in several tests but all related to multi driver. The problem has
+ been reported to the vendor.
+* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
+ messages like "INFO: 0031-XXX ...". This is from the command poe.
+ Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
+ and run the tests again.
+ The tests may fail with messages like "The socket name is already
+ in use". HDF5 does not use sockets (except for stream-VFD). This is
+ due to problems of the poe command trying to set up the debug socket.
+ Check if there are many old /tmp/s.pedb.* staying around. These are
+ sockets used by the poe command and left behind due to failed commands.
+ Ask your system administrator to clean them out. Lastly, request IBM
+ to provide a mean to run poe without the debug socket.
+
+* The C++ library's tests fails when compiling with PGI C++ compiler. The
+ workaround until the problem is correctly handled is to use the
+ flag "--instantiate=local" prior to the configure and build steps, as:
+ setenv CXX "pgCC --instantiate=local" for pgCC 5.02 and higher
+
+
+* The stream-vfd test uses ip port 10007 for testing. If another
+ application is already using that port address, the test will hang
+ indefinitely and has to be terminated by the kill command. To try the
+ test again, change the port address in test/stream_test.c to one not
+ being used in the host.
+
+* The --enable-static-exec configure flag will only statically link libraries
+ if the static version of that library is present. If only the shared version
+ of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
+ for example, only have shared versions), the flag should still result in a
+ successful compilation, but note that the installed executables will not be
+ fully static. Thus, the only guarantee on these systems is that the
+ executable is statically linked with just the HDF5 library.
+
+* With the gcc 2.95.2 compiler, HDF 5 uses the `-ansi' flag during
+ compilation. The ANSI version of the compiler complains about not being
+ able to handle the `long long' datatype with the warning:
+
+ warning: ANSI C does not support `long long'
+
+ This warning is innocuous and can be safely ignored.
+
+* Certain platforms give false negatives when testing h5ls:
+ - Cray J90 and Cray T90IEEE give errors during testing when displaying
+ some floating-point values. These are benign differences due to
+ the different precision in the values displayed and h5ls appears to
+ be dumping floating-point numbers correctly.
+
+* Not all platforms behave correctly with szip's shared libraries. Szip is
+ disabled in these cases, and a message is relayed at configure time. Static
+ libraries should be working on all systems that support szip, and should be
+ used when shared libraries are unavailable. There is also a configure error
+ on Altix machines that incorrectly reports when a version of szip without
+ an encoder is being used.
+
+* On some platforms that use Intel and Absoft compilers to build HDF5 fortran library,
+ compilation may fail for fortranlib_test.f90, fflush1.f90 and fflush2.f90
+ complaining about exit subroutine. Comment out the line
+ IF (total_error .ne. 0) CALL exit (total_error)
+
+* Information about building with PGI and Intel compilers is available in
+ INSTALL file sections 5.7 and 5.8
+
+* On at least one system, (SDSC DataStar), the scheduler (in this case
+ LoadLeveler) sends job status updates to standard error when you run
+ any executable that was compiled with the parallel compilers.
+
+ This causes problems when running "make check" on parallel builds, as
+ many of the tool tests function by saving the output from test runs,
+ and comparing it to an exemplar.
+
+ The best solution is to reconfigure the target system so it no longer
+ inserts the extra text. However, this may not be practical.
+
+ In such cases, one solution is to "setenv HDF5_Make_Ignore yes" prior to
+ the configure and build. This will cause "make check" to continue after
+ detecting errors in the tool tests. However, in the case of SDSC DataStar,
+ it also leaves you with some 150 "failed" tests to examine by hand.
+
+ A second solution is to write a script to run serial tests and filter
+ out the text added by the scheduler. A sample script used on SDSC
+ DataStar is given below, but you will probably have to customize it
+ for your installation.
+
+ Observe that the basic idea is to insert the script as the first item
+ on the command line which executes the the test. The script then
+ executes the test and filters out the offending text before passing
+ it on.
+
+ #!/bin/csh
+
+ set STDOUT_FILE=~/bin/serial_filter.stdout
+ set STDERR_FILE=~/bin/serial_filter.stderr
+
+ rm -f $STDOUT_FILE $STDERR_FILE
+
+ ($* > $STDOUT_FILE) >& $STDERR_FILE
+
+ set RETURN_VALUE=$status
+
+ cat $STDOUT_FILE
+
+ tail +3 $STDERR_FILE
+
+ exit $RETURN_VALUE
+
+ You get the HDF make files and test scipts to execute your filter script
+ by setting the environment variable "RUNSERIAL" to the full path of the
+ script prior to running configure for parallel builds. Remember to
+ "unsetenv RUNSERIAL" before running configure for a serial build.
+
+ Note that the RUNSERIAL environment variable exists so that we can
+ can prefix serial runs as necessary on the target system. On DataStar,
+ no prefix is necessary. However on an MPICH system, the prefix might
+ have to be set to something like "/usr/local/mpi/bin/mpirun -np 1" to
+ get the serial tests to run at all.
+
+ In such cases, you will have to include the regular prefix in your
+ filter script.
+
+* H5Ocopy() does not copy reg_ref attributes correctly when shared-message
+ is turn on. The value of the reference in the destination attriubte is
+ wrong. This H5Ocopy problem will affect h5copy tool
+
diff --git a/release_docs/HISTORY-1_9.txt b/release_docs/HISTORY-1_9.txt
deleted file mode 100644
index 8e7648a..0000000
--- a/release_docs/HISTORY-1_9.txt
+++ /dev/null
@@ -1,6 +0,0 @@
- HDF5 HISTORY
- ============
- This file contains history of the HDF5 1.9 branch
-
- CONTENTS
-
diff --git a/release_docs/INSTALL_CMake.txt b/release_docs/INSTALL_CMake.txt
index ee331a8..7cb0225 100644
--- a/release_docs/INSTALL_CMake.txt
+++ b/release_docs/INSTALL_CMake.txt
@@ -30,7 +30,7 @@ Obtaining HDF5 source code
CMake version
1. We suggest you obtain the latest CMake from the Kitware web site.
- The HDF5 1.10."X" product requires a minimum CMake version 3.1.0,
+ The HDF5 1.10."X" product requires a minimum CMake version 3.2.2,
where "X" is the current HDF5 release version.
Note:
@@ -51,7 +51,7 @@ The following files referenced below are available at the HDF web site:
http://www.hdfgroup.org/HDF5/release/cmakebuild.html
Single compressed file with all the files needed, including source:
- hdf5-1.10.0-CMake.zip or hdf5-1.10.0-CMake.tar.gz
+ hdf5-1.10.X-CMake.zip or hdf5-1.10.X-CMake.tar.gz
Individual files
-----------------------------------------------
@@ -249,7 +249,7 @@ IV. Further considerations
========================================================================
1. We suggest you obtain the latest CMake for windows from the Kitware
- web site. The HDF5 1.10."X" product requires a minimum CMake version 3.1.0.
+ web site. The HDF5 1.10."X" product requires a minimum CMake version 3.2.2.
2. If you plan to use Zlib or Szip:
A. Download the binary packages and install them in a central location.
@@ -263,10 +263,10 @@ IV. Further considerations
B. Use source packages from an SVN server by adding the following CMake
options:
- HDF5_ALLOW_EXTERNAL_SUPPORT:STRING="SVN"
- ZLIB_SVN_URL:STRING="http://some_location/zlib/trunk"
- SZIP_SVN_URL:STRING="http://some_location/szip/trunk"
- where "some_location" is the URL to the SVN repository. Also set
+ HDF5_ALLOW_EXTERNAL_SUPPORT:STRING="GIT"
+ ZLIB_GIT_URL:STRING="http://some_location/zlib"
+ SZIP_GIT_URL:STRING="http://some_location/szip"
+ where "some_location" is the URL to the GIT repository. Also set
CMAKE_BUILD_TYPE to the configuration type.
C. Use source packages from a compressed file by adding the following
@@ -376,10 +376,10 @@ These five steps are described in detail below.
* MinGW Makefiles
* NMake Makefiles
* Unix Makefiles
- * Visual Studio 12 2013
- * Visual Studio 12 2013 Win64
* Visual Studio 11 2012
* Visual Studio 11 2012 Win64
+ * Visual Studio 12 2013
+ * Visual Studio 12 2013 Win64
* Visual Studio 14 2015
* Visual Studio 14 2015 Win64
@@ -403,7 +403,7 @@ These five steps are described in detail below.
set (HDF5_ENABLE_SZIP_SUPPORT ON CACHE BOOL "Use SZip Filter" FORCE)
set (HDF5_ENABLE_SZIP_ENCODING ON CACHE BOOL "Use SZip Encoding" FORCE)
set (HDF5_ENABLE_HSIZET ON CACHE BOOL "Enable datasets larger than memory" FORCE)
- set (HDF5_ENABLE_UNSUPPORTED OFF CACHE BOOL "Enable unsupported combinations of configuration options" FORCE)
+ set (ALLOW_UNSUPPORTED OFF CACHE BOOL "Enable unsupported combinations of configuration options" FORCE)
set (HDF5_ENABLE_DEPRECATED_SYMBOLS ON CACHE BOOL "Enable deprecated public API symbols" FORCE)
set (HDF5_ENABLE_DIRECT_VFD OFF CACHE BOOL "Build the Direct I/O Virtual File Driver" FORCE)
set (HDF5_ENABLE_PARALLEL OFF CACHE BOOL "Enable parallel build (requires MPI)" FORCE)
@@ -413,14 +413,16 @@ These five steps are described in detail below.
set (HDF5_ENABLE_USING_MEMCHECKER OFF CACHE BOOL "Indicate that a memory checker is used" FORCE)
set (HDF5_MEMORY_ALLOC_SANITY_CHECK OFF CACHE BOOL "Indicate that internal memory allocation sanity checks are enabled" FORCE)
set (HDF5_DISABLE_COMPILER_WARNINGS OFF CACHE BOOL "Disable compiler warnings" FORCE)
+ set (HDF5_ENABLE_ALL_WARNINGS ON CACHE BOOL "Enable all warnings" FORCE)
set (HDF5_USE_FOLDERS ON CACHE BOOL "Enable folder grouping of projects in IDEs." FORCE)
set (HDF5_USE_16_API_DEFAULT OFF CACHE BOOL "Use the HDF5 1.6.x API by default" FORCE)
+ set (HDF5_USE_18_API_DEFAULT OFF CACHE BOOL "Use the HDF5 1.8.x API by default" FORCE)
set (HDF5_ENABLE_THREADSAFE OFF CACHE BOOL "(WINDOWS)Enable Threadsafety" FORCE)
set (HDF_TEST_EXPRESS "2" CACHE STRING "Control testing framework (0-3)" FORCE)
set (HDF5_PACKAGE_EXTLIBS OFF CACHE BOOL "(WINDOWS)CPACK - include external libraries" FORCE)
set (HDF5_NO_PACKAGES OFF CACHE BOOL "CPACK - Disable packaging" FORCE)
- set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO SVN TGZ)" FORCE)
- set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO SVN TGZ)
+ set (HDF5_ALLOW_EXTERNAL_SUPPORT "NO" CACHE STRING "Allow External Library Building (NO GIT SVN TGZ)" FORCE)
+ set_property (CACHE HDF5_ALLOW_EXTERNAL_SUPPORT PROPERTY STRINGS NO GIT SVN TGZ)
set (ZLIB_TGZ_NAME "ZLib.tar.gz" CACHE STRING "Use ZLib from compressed file" FORCE)
set (SZIP_TGZ_NAME "SZip.tar.gz" CACHE STRING "Use SZip from compressed file" FORCE)
set (ZLIB_PACKAGE_NAME "zlib" CACHE STRING "Name of ZLIB package" FORCE)
@@ -477,15 +479,15 @@ These five steps are described in detail below.
Release and build the solution.
3.2.1 The external libraries (zlib and szip) can be configured
- to allow building the libraries by downloading from an SVN repository.
+ to allow building the libraries by downloading from an GIT repository.
The option is 'HDF5_ALLOW_EXTERNAL_SUPPORT'; by adding the following
configuration option:
- -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING="SVN"
+ -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING="GIT"
- The options to control the SVN URL (config/cmake/cacheinit.cmake file) are:
- ZLIB_SVN_URL:STRING="http://${svn_url}/zlib/trunk"
- SZIP_SVN_URL:STRING="http://${svn_url}/szip/trunk"
- ${svn_url} should be changed to your location. Also define CMAKE_BUILD_TYPE
+ The options to control the GIT URL (config/cmake/cacheinit.cmake file) are:
+ ZLIB_GIT_URL:STRING="http://${git_url}/zlib"
+ SZIP_GIT_URL:STRING="http://${git_url}/szip"
+ ${git_url} should be changed to your location. Also define CMAKE_BUILD_TYPE
to be the configuration type.
3.2.2 Or the external libraries (zlib and szip) can be configured
@@ -525,8 +527,8 @@ These five steps are described in detail below.
NOTE: See note 8 of this document for NSIS information.
See note 9 of this document for WiX information.
- Also, if you are using a Visual Studio Express version or do not
- want to enable the packaging components, set HDF5_NO_PACKAGES
+ Also, if you are using a Visual Studio Express version or
+ want to disable the packaging components, set HDF5_NO_PACKAGES
to ON (on the command line add -DHDF5_NO_PACKAGES:BOOL=ON)
6. The files that support building HDF5 with CMake are all the files in the
@@ -563,7 +565,7 @@ The config/cmake/cacheinit.cmake file can override the following values.
---------------- General Build Options ---------------------
BUILD_SHARED_LIBS "Build Shared Libraries" ON
-BUILD_STATIC_EXECS "Build Static Executabless" OFF
+BUILD_STATIC_EXECS "Build Static Executables" OFF
BUILD_TESTING "Build HDF5 Unit Testing" ON
---------------- HDF5 Build Options ---------------------
@@ -589,7 +591,7 @@ HDF5_ENABLE_LARGE_FILE "Enable support for large (64-bit) files on Linux
HDF5_ENABLE_PARALLEL "Enable parallel build (requires MPI)" OFF
HDF5_ENABLE_TRACE "Enable API tracing capability" OFF
HDF5_ENABLE_USING_MEMCHECKER "Indicate that a memory checker is used" OFF
-HDF5_GENERATE_HEADERS "Rebuild Generated Files" OFF
+HDF5_GENERATE_HEADERS "Rebuild Generated Files" ON
HDF5_JAVA_PACK_JRE "Package a JRE installer directory" OFF
HDF5_MEMORY_ALLOC_SANITY_CHECK "Indicate that internal memory allocation sanity checks are enabled" OFF
HDF5_METADATA_TRACE_FILE "Enable metadata trace file collection" OFF
@@ -624,9 +626,9 @@ if (HDF5_ENABLE_SZIP_SUPPORT)
HDF5_ENABLE_SZIP_ENCODING "Use SZip Encoding" OFF
if (WINDOWS)
H5_DEFAULT_PLUGINDIR "%ALLUSERSPROFILE%/hdf5/lib/plugin"
-else (WINDOWS)
+else ()
H5_DEFAULT_PLUGINDIR "/usr/local/hdf5/lib/plugin"
-endif (WINDOWS)
+endif ()
@@ -647,9 +649,9 @@ build and test process.
VIII. Options for Platform Configuration Files
========================================================================
-Below is the HDF5config.cmake ctest script with extra comments.
+Below is the HDF5config.cmake and HDF5options.cmake ctest scripts.
Execute:
- ctest -S HDF5config.cmake,BUILD_GENERATOR=xxx -C Release -V -O hdf5.log
+ ctest -S HDF5config.cmake,BUILD_GENERATOR=xxx -C Release -VV -O hdf5.log
The same scripts can be used on Linux, Mac OSX or a Windows machine by
adding an option (${CTEST_SCRIPT_ARG}) to the platform configuration script.
@@ -657,10 +659,10 @@ adding an option (${CTEST_SCRIPT_ARG}) to the platform configuration script.
#############################################################################################
### ${CTEST_SCRIPT_ARG} is of the form OPTION=VALUE ###
### BUILD_GENERATOR required [Unix, VS2015, VS201564, VS2013, VS201364, VS2012, VS201264] ###
-### ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201264 -C Release -V -O hdf5.log ###
+### ctest -S HDF5config.cmake,BUILD_GENERATOR=VS201264 -C Release -VV -O hdf5.log ###
#############################################################################################
-cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
+cmake_minimum_required (VERSION 3.2.2 FATAL_ERROR)
############################################################################
# Usage:
# ctest -S HDF5config.cmake,OPTION=VALUE -C Release -VV -O test.log
@@ -677,14 +679,14 @@ cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
# INSTALLDIR - root folder where hdf5 is installed
# CTEST_CONFIGURATION_TYPE - Release, Debug, etc
# CTEST_SOURCE_NAME - source folder
-# STATIC_LIBRARIES - Build/use static libraries
+# STATIC_ONLY - Build/use static libraries
# FORTRAN_LIBRARIES - Build/use fortran libraries
# JAVA_LIBRARIES - Build/use java libraries
# NO_MAC_FORTRAN - Yes to be SHARED on a Mac
##############################################################################
-set(CTEST_SOURCE_VERSION 1.10.0)
-set(CTEST_SOURCE_VERSEXT "-pre1")
+set (CTEST_SOURCE_VERSION 1.10.1)
+set (CTEST_SOURCE_VERSEXT "")
##############################################################################
# handle input parameters to script.
@@ -692,243 +694,290 @@ set(CTEST_SOURCE_VERSEXT "-pre1")
#INSTALLDIR - HDF5-1.10.0 root folder
#CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo
#CTEST_SOURCE_NAME - name of source folder; HDF5-1.10.0
-#STATIC_LIBRARIES - Default is YES
+#STATIC_ONLY - Default is YES
#FORTRAN_LIBRARIES - Default is NO
#JAVA_LIBRARIES - Default is NO
#NO_MAC_FORTRAN - set to TRUE to allow shared libs on a Mac
-if(DEFINED CTEST_SCRIPT_ARG)
+if (DEFINED CTEST_SCRIPT_ARG)
# transform ctest script arguments of the form
# script.ctest,var1=value1,var2=value2
# to variables with the respective names set to the respective values
- string(REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
- foreach(current_var ${script_args})
+ string (REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
+ foreach (current_var ${script_args})
if ("${current_var}" MATCHES "^([^=]+)=(.+)$")
- set("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
- endif()
- endforeach()
-endif()
+ set ("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
+ endif ()
+ endforeach ()
+endif ()
# build generator must be defined
-if(NOT DEFINED BUILD_GENERATOR)
- message(FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2015, VS201564, VS2013, VS201364, VS2012, or VS201264")
-else()
- if(${BUILD_GENERATOR} STREQUAL "Unix")
- set(CTEST_CMAKE_GENERATOR "Unix Makefiles")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2015")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 14 2015")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201564")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 14 2015 Win64")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2013")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 12 2013")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201364")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 12 2013 Win64")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2012")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 11 2012")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201264")
- set(CTEST_CMAKE_GENERATOR "Visual Studio 11 2012 Win64")
- else()
- message(FATAL_ERROR "Invalid BUILD_GENERATOR must be - Unix, VS2015, VS201564, VS2013, VS201364, VS2012, or VS201264")
- endif()
-endif()
-
-if(NOT DEFINED INSTALLDIR)
- if(WIN32)
- set(INSTALLDIR "C:\\Program\ Files\\myhdf5")
- else()
- set(INSTALLDIR "/usr/local/myhdf5")
- endif()
-endif()
-if(NOT DEFINED CTEST_CONFIGURATION_TYPE)
- set(CTEST_CONFIGURATION_TYPE "Release")
-endif()
-if(NOT DEFINED CTEST_SOURCE_NAME)
- set(CTEST_SOURCE_NAME "hdf5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}")
-endif()
-if(NOT DEFINED STATIC_LIBRARIES)
- set(STATICLIBRARIES "YES")
-else()
- set(STATICLIBRARIES "NO")
-endif()
-if(NOT DEFINED FORTRAN_LIBRARIES)
- set(FORTRANLIBRARIES "NO")
-else()
- set(FORTRANLIBRARIES "YES")
-endif()
-if(NOT DEFINED JAVA_LIBRARIES)
- set(JAVALIBRARIES "NO")
-else()
- set(JAVALIBRARIES "YES")
-endif()
-
-set(CTEST_BINARY_NAME "build")
-set(CTEST_DASHBOARD_ROOT "${CTEST_SCRIPT_DIRECTORY}")
-if(WIN32)
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
-else()
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
-endif()
+if (NOT DEFINED BUILD_GENERATOR)
+ message (FATAL_ERROR "BUILD_GENERATOR must be defined - Unix, VS2015, VS201564, VS2013, VS201364, VS2012, or VS201264")
+else ()
+ if (${BUILD_GENERATOR} STREQUAL "Unix")
+ set (CTEST_CMAKE_GENERATOR "Unix Makefiles")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2015")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 14 2015")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201564")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 14 2015 Win64")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2013")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 12 2013")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201364")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 12 2013 Win64")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2012")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 11 2012")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201264")
+ set (CTEST_CMAKE_GENERATOR "Visual Studio 11 2012 Win64")
+ else ()
+ message (FATAL_ERROR "Invalid BUILD_GENERATOR must be - Unix, VS2015, VS201564, VS2013, VS201364, VS2012, or VS201264")
+ endif ()
+endif ()
+
+###################################################################
+### Following Line is one of [Release, RelWithDebInfo, Debug] #####
+set (CTEST_CONFIGURATION_TYPE "$ENV{CMAKE_CONFIG_TYPE}")
+###################################################################
+
+if (NOT DEFINED INSTALLDIR)
+ if (WIN32)
+ set (INSTALLDIR "C:/Program Files/HDF_Group/HDF5/${CTEST_SOURCE_VERSION}")
+ else ()
+ set (INSTALLDIR "${CTEST_SCRIPT_DIRECTORY}/HDF_Group/HDF5/${CTEST_SOURCE_VERSION}")
+ endif ()
+endif ()
+if (NOT DEFINED CTEST_CONFIGURATION_TYPE)
+ set (CTEST_CONFIGURATION_TYPE "Release")
+endif ()
+if (NOT DEFINED CTEST_SOURCE_NAME)
+ set (CTEST_SOURCE_NAME "hdf5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}")
+endif ()
+if (NOT DEFINED STATIC_ONLY)
+ set (STATICONLYLIBRARIES "YES")
+else ()
+ set (STATICONLYLIBRARIES "NO")
+endif ()
+if (NOT DEFINED FORTRAN_LIBRARIES)
+ set (FORTRANLIBRARIES "NO")
+else ()
+ set(FORTRANLIBRARIES "YES")
+endif ()
+if (NOT DEFINED JAVA_LIBRARIES)
+ set (JAVALIBRARIES "NO")
+else ()
+ set (JAVALIBRARIES "YES")
+endif ()
+
+set (CTEST_BINARY_NAME "build")
+set (CTEST_DASHBOARD_ROOT "${CTEST_SCRIPT_DIRECTORY}")
+if (WIN32)
+ set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
+ set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
+else ()
+ set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
+ set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
+endif ()
###################################################################
######### Following describes compiler ############
-if(WIN32)
- set(SITE_OS_NAME "Windows")
- set(SITE_OS_VERSION "WIN7")
- if(${BUILD_GENERATOR} STREQUAL "VS201564")
- set(SITE_OS_BITS "64")
- set(SITE_COMPILER_NAME "vs2015")
- set(SITE_COMPILER_VERSION "14")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2015")
- set(SITE_OS_BITS "32")
- set(SITE_COMPILER_NAME "vs2015")
- set(SITE_COMPILER_VERSION "14")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201364")
- set(SITE_OS_BITS "64")
- set(SITE_COMPILER_NAME "vs2013")
- set(SITE_COMPILER_VERSION "12")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2013")
- set(SITE_OS_BITS "32")
- set(SITE_COMPILER_NAME "vs2013")
- set(SITE_COMPILER_VERSION "12")
- elseif(${BUILD_GENERATOR} STREQUAL "VS201264")
- set(SITE_OS_BITS "64")
- set(SITE_COMPILER_NAME "vs2012")
- set(SITE_COMPILER_VERSION "11")
- elseif(${BUILD_GENERATOR} STREQUAL "VS2012")
- set(SITE_OS_BITS "32")
- set(SITE_COMPILER_NAME "vs2012")
- set(SITE_COMPILER_VERSION "11")
- endif()
+if (WIN32)
+ set (SITE_OS_NAME "Windows")
+ set (SITE_OS_VERSION "WIN7")
+ if (${BUILD_GENERATOR} STREQUAL "VS201564")
+ set (SITE_OS_BITS "64")
+ set (SITE_COMPILER_NAME "vs2015")
+ set (SITE_COMPILER_VERSION "14")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2015")
+ set (SITE_OS_BITS "32")
+ set (SITE_COMPILER_NAME "vs2015")
+ set (SITE_COMPILER_VERSION "14")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201364")
+ set (SITE_OS_BITS "64")
+ set (SITE_COMPILER_NAME "vs2013")
+ set (SITE_COMPILER_VERSION "12")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2013")
+ set (SITE_OS_BITS "32")
+ set (SITE_COMPILER_NAME "vs2013")
+ set (SITE_COMPILER_VERSION "12")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS201264")
+ set (SITE_OS_BITS "64")
+ set (SITE_COMPILER_NAME "vs2012")
+ set (SITE_COMPILER_VERSION "11")
+ elseif (${BUILD_GENERATOR} STREQUAL "VS2012")
+ set (SITE_OS_BITS "32")
+ set (SITE_COMPILER_NAME "vs2012")
+ set (SITE_COMPILER_VERSION "11")
+ endif ()
## Set the following to unique id your computer ##
- set(CTEST_SITE "WIN7${BUILD_GENERATOR}.XXXX")
-else()
+ set (CTEST_SITE "WIN7${BUILD_GENERATOR}.XXXX")
+else ()
+ set (CTEST_CMAKE_GENERATOR "Unix Makefiles")
## Set the following to unique id your computer ##
- if(APPLE)
- set(CTEST_SITE "MAC.XXXX")
- else()
- set(CTEST_SITE "LINUX.XXXX")
- endif()
-endif()
+ if (APPLE)
+ set (CTEST_SITE "MAC.XXXX")
+ else ()
+ set (CTEST_SITE "LINUX.XXXX")
+ endif ()
+ if (APPLE)
+ execute_process (COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
+ execute_process (COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set (ENV{CC} "${XCODE_CC}")
+ set (ENV{CXX} "${XCODE_CXX}")
+ set (CTEST_USE_LAUNCHERS 1)
+ set (RR_WARNINGS_COMMON "-Wno-format-nonliteral -Wno-cast-align -Wno-unused -Wno-unused-variable -Wno-unused-function -Wno-self-assign -Wno-unused-parameter -Wno-sign-compare")
+ set (RR_WARNINGS_C "${RR_WARNINGS_COMMON} -Wno-deprecated-declarations -Wno-uninitialized")
+ set (RR_WARNINGS_CXX "${RR_WARNINGS_COMMON} -Woverloaded-virtual -Wshadow -Wwrite-strings -Wc++11-compat")
+ set (RR_FLAGS_COMMON "-g -O0 -fstack-protector-all -D_FORTIFY_SOURCE=2")
+ set (RR_FLAGS_C "${RR_FLAGS_COMMON}")
+ set (RR_FLAGS_CXX "${RR_FLAGS_COMMON}")
+ set (ENV{CFLAGS} "${RR_WARNINGS_C} ${RR_FLAGS_C}")
+ set (ENV{CXXFLAGS} "${RR_WARNINGS_CXX} ${RR_FLAGS_CXX}")
+ endif ()
+endif ()
###################################################################
###################################################################
######### Following is for submission to CDash ############
###################################################################
-set(MODEL "Experimental")
+set (MODEL "Experimental")
###################################################################
###################################################################
##### Following controls CDash submission #####
-#set(LOCAL_SUBMIT "TRUE")
+#set (LOCAL_SUBMIT "TRUE")
##### Following controls test process #####
-#set(LOCAL_SKIP_TEST "TRUE")
-#set(LOCAL_MEMCHECK_TEST "TRUE")
-#set(LOCAL_COVERAGE_TEST "TRUE")
+#set (LOCAL_SKIP_TEST "TRUE")
+#set (LOCAL_MEMCHECK_TEST "TRUE")
+#set (LOCAL_COVERAGE_TEST "TRUE")
##### Following controls cpack command #####
-#set(LOCAL_NO_PACKAGE "TRUE")
+#set (LOCAL_NO_PACKAGE "TRUE")
##### Following controls source update #####
-#set(LOCAL_UPDATE "TRUE")
-set(REPOSITORY_URL "http://svn.hdfgroup.uiuc.edu/hdf5/trunk")
+#set (LOCAL_UPDATE "TRUE")
+set (REPOSITORY_URL "https://git@bitbucket.hdfgroup.org/scm/hdffv/hdf5.git")
+set (REPOSITORY_BRANCH "develop")
+
#uncomment to use a compressed source file: *.tar on linux or mac *.zip on windows
#set(CTEST_USE_TAR_SOURCE "${CTEST_SOURCE_VERSION}")
###################################################################
###################################################################
-#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
-#### format: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ")
-
-###################################################################
-if(${STATICLIBRARIES})
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
+if (${STATICONLYLIBRARIES})
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
######### Following describes computer ############
## following is optional to describe build ##
- set(SITE_BUILDNAME_SUFFIX "STATIC")
-endif()
+ set (SITE_BUILDNAME_SUFFIX "STATIC")
+endif ()
###################################################################
+#### fortran ####
+if (${FORTRANLIBRARIES})
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=ON")
+ ### enable Fortran 2003 depends on HDF5_BUILD_FORTRAN
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_F2003:BOOL=ON")
+else ()
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF")
+ ### enable Fortran 2003 depends on HDF5_BUILD_FORTRAN
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_F2003:BOOL=OFF")
+endif ()
+#### java ####
+if (${JAVALIBRARIES})
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=ON")
+else ()
+ set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_JAVA:BOOL=OFF")
+endif ()
+
+### change install prefix
+set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_INSTALL_PREFIX:PATH=${INSTALLDIR}")
+set (ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCTEST_CONFIGURATION_TYPE:STRING=$ENV{CMAKE_CONFIG_TYPE}")
+
+###################################################################
+
+if (WIN32)
+ set (BINFILEBASE "HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}")
+ include (${CTEST_SCRIPT_DIRECTORY}\\HDF5options.cmake)
+ include (${CTEST_SCRIPT_DIRECTORY}\\CTestScript.cmake)
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.exe")
+ file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.exe" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.msi")
+ file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.msi" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.zip")
+ file (COPY "${CTEST_BINARY_DIRECTORY}\\${BINFILEBASE}.zip" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+else ()
+ set (BINFILEBASE "HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}")
+ include (${CTEST_SCRIPT_DIRECTORY}/HDF5options.cmake)
+ include (${CTEST_SCRIPT_DIRECTORY}/CTestScript.cmake)
+ if (APPLE)
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.dmg")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.dmg" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.tar.gz")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.sh")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Darwin.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ else ()
+ if (CYGWIN)
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.sh")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.tar.gz")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-CYGWIN.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ else ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.sh")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ if (EXISTS "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.tar.gz")
+ file (COPY "${CTEST_BINARY_DIRECTORY}/${BINFILEBASE}-Linux.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
+ endif ()
+ endif ()
+ endif ()
+endif ()
+
+HDF5options.cmake:
+#############################################################################################
+#### Change default configuration of options in config/cmake/cacheinit.cmake file ###
+#### format: set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DXXX:YY=ZZZZ") ###
+#############################################################################################
### uncomment/comment and change the following lines for other configuration options
+#############################################################################################
+#### alternate toolsets ####
+#set(CMAKE_GENERATOR_TOOLSET "Intel C++ Compiler 17.0")
+
+#############################################################################################
#### ext libraries ####
+
### ext libs from tgz
set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=TGZ -DTGZPATH:PATH=${CTEST_SCRIPT_DIRECTORY}")
-### ext libs from svn
-#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=SVN")
+### ext libs from git
+#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ALLOW_EXTERNAL_SUPPORT:STRING=GIT")
### ext libs on system
#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DZLIB_LIBRARY:FILEPATH=some_location/lib/zlib.lib -DZLIB_INCLUDE_DIR:PATH=some_location/include")
#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DSZIP_LIBRARY:FILEPATH=some_location/lib/szlib.lib -DSZIP_INCLUDE_DIR:PATH=some_location/include")
-### disable ext libs building
+
+### disable ext zlib building
#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_Z_LIB_SUPPORT:BOOL=OFF")
+### disable ext szip building
#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_SUPPORT:BOOL=OFF")
#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_ENABLE_SZIP_ENCODING:BOOL=OFF")
-#### fortran ####
-if(${FORTRANLIBRARIES})
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=ON")
-else()
- set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_BUILD_FORTRAN:BOOL=OFF")
-endif()
-#### java ####
-if(${JAVALIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=ON")
-else()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=OFF")
-endif()
+#############################################################################################
### disable test program builds
+
#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DBUILD_TESTING:BOOL=OFF")
+#############################################################################################
### disable packaging
+
#set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_NO_PACKAGES:BOOL=ON")
-### Create install package with external libraries (szip, zlib, jpeg)
+### Create install package with external libraries (szip, zlib)
set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DHDF5_PACKAGE_EXTLIBS:BOOL=ON")
-### change install prefix
-set(ADD_BUILD_OPTIONS "${ADD_BUILD_OPTIONS} -DCMAKE_INSTALL_PREFIX:PATH=${INSTALLDIR}")
-
-###################################################################
-
-if(WIN32)
- include(${CTEST_SCRIPT_DIRECTORY}\\CTestScript.cmake)
- if(EXISTS "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.exe")
- file(COPY "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.exe" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.msi")
- file(COPY "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.msi" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.zip")
- file(COPY "${CTEST_BINARY_DIRECTORY}\\HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-win${SITE_OS_BITS}.zip" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
-else()
- include(${CTEST_SCRIPT_DIRECTORY}/CTestScript.cmake)
- if(APPLE)
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.dmg")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.dmg" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.tar.gz")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.sh")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Darwin.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- else()
- if(CYGWIN)
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-CYGWIN.sh")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-CYGWIN.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-CYGWIN.tar.gz")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-CYGWIN.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- else()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Linux.sh")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Linux.sh" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- if(EXISTS "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Linux.tar.gz")
- file(COPY "${CTEST_BINARY_DIRECTORY}/HDF5-${CTEST_SOURCE_VERSION}${CTEST_SOURCE_VERSEXT}-Linux.tar.gz" DESTINATION ${CTEST_SCRIPT_DIRECTORY})
- endif()
- endif()
- endif()
-endif()
-
+#############################################################################################
========================================================================
For further assistance, send email to help@hdfgroup.org
diff --git a/release_docs/INSTALL_parallel b/release_docs/INSTALL_parallel
index e4c540c..5a8b603 100644
--- a/release_docs/INSTALL_parallel
+++ b/release_docs/INSTALL_parallel
@@ -1,6 +1,18 @@
Installation instructions for Parallel HDF5
-------------------------------------------
+0. Use Build Scripts
+--------------------
+The HDF Group is accumulating build scripts to handle building parallel HDF5
+on various platforms (Cray, IBM, SGI, etc...). These scripts are being
+maintained and updated continuously for current and future systems. The reader
+is strongly encouraged to consult the repository at,
+
+https://github.com/HDFGroup/build_hdf5
+
+for building parallel HDF5 on these system. All contributions, additions
+and fixes to the repository are welcomed and encouraged.
+
1. Overview
-----------
diff --git a/release_docs/RELEASE.txt b/release_docs/RELEASE.txt
index cc77fcb..8fb56cc 100644
--- a/release_docs/RELEASE.txt
+++ b/release_docs/RELEASE.txt
@@ -1,24 +1,37 @@
-HDF5 version 1.9.236 currently under development
+HDF5 version 1.11.0 currently under development
================================================================================
INTRODUCTION
-This document describes the differences between HDF5-1.9.0 and
-HDF5 1.9.x snapshot, and contains information on the platforms
-tested and known problems in HDF5-1.9.x.
+This document describes the differences between HDF5-1.10.1 and HDF5 1.10.2, and
+contains information on the platforms tested and known problems in HDF5-1.10.1.
For more details check the HISTORY*.txt files in the HDF5 source.
-Links to HDF5 1.9.x source code can be found on The HDF Group's
-development FTP server at the following location:
- ftp://ftp.hdfgroup.uiuc.edu/pub/outgoing/hdf5/snapshots
+Links to HDF5 1.10.1 source code, documentation, and additional materials can be found on The HDF5 web page at:
+
+ https://support.hdfgroup.org/HDF5/
+
+The HDF5 1.10.1 release can be obtained from:
+
+ https://support.hdfgroup.org/HDF5/release/obtain5.html
+
User documentation for the snapshot can be accessed directly at this location:
- http://www.hdfgroup.uiuc.edu/HDF5/doc_dev_snapshot/H5_dev/
-For more information, see the HDF5 home page:
+ https://support.hdfgroup.org/HDF5/doc/
+
+New features in the HDF5-1.10.x release series, including brief general
+descriptions of some new and modified APIs, are described in the "New Features
+in HDF5 1.10" document:
+
+https://support.hdfgroup.org/HDF5/docNewFeatures/index.html
- http://www.hdfgroup.org/HDF5/
+All new and modified APIs are listed in detail in the "HDF5 Software Changes
+from Release to Release" document, in the section "Release 1.10.1 (current
+release) versus Release 1.10.0
+
+ https://support.hdfgroup.org/HDF5/doc/ADGuide/Changes.html
If you have any questions or comments, please send them to the HDF Help Desk:
@@ -29,7 +42,7 @@ CONTENTS
- New Features
- Support for new platforms and languages
-- Bug Fixes since HDF5-1.8.0
+- Bug Fixes since HDF5-1.10.1
- Supported Platforms
- Tested Configuration Features Summary
- More Tested Platforms
@@ -41,1338 +54,161 @@ New Features
Configuration:
-------------
- - CMake
- Added NAMESPACE hdf5:: to package configuration files
- - CMake: change CTEST_BUILD_CONFIGURATION to CTEST_CONFIGURATION_TYPE, which is
- recommended by CMake documentation.
- HDFFV-9971 (ADB 2016/8/22)
- - Java JNI library API wrappers and supporting files added as HDF_JAVA language
- option. Both configure and CMake disable this option by default.
- HDFFV-9552 (ADB 2016/02/28)
- - CMake minimum is now 3.1.0. (ADB 2015/11/14)
- - cmakehdf5: configure options added to enable or disable the building of
- different API's and testings. See "cmakehdf5 --help" for details.
- (AKC - 2014/12/09 HDFFV-8932)
- - Autotools: Automake updated to 1.14.1 (ADB - 2014/04/08)
- - CMake: Moved minimum CMake version to 2.8.11 which enables better library
- include processing. (ADB - 2014/03/26)
- - New configuration option added to change the default plugin path.
- configure option is --with-default-plugin=location
- cmake option is -DH5_DEFAULT_PLUGINDIR:PATH=location
- HDFFV-8513. (ADB 2013/09/04)
- - Rename FFLAGS to FCFLAGS in configure (ADB 2013/08/13)
- - CMake minimum is now 2.8.10. (ADB 2013/01/14)
- - A new tool, cmakehdf5, which is a build command script similar to
- buildhdf5 is added and is available in the bin directory.
- (AKC - 2012/12/12)
- - Fixed AIX Fortran compiler flags to use appropriate settings for
- debugging, profiling, optimization situations. HDFFV-8069. (AKC
- 2012/09/27)
- - Updated to latest autotools and changed all hard *.sh scripts to
- configure managed *.sh.in files. Removed overloading of autotools
- TESTS variable by examples and tests. Renamed configure.in to
- configure.ac. (ADB - 2012/08/23 - HDFFV-8129)
- - Added code to display the version information of XL fortran and C++
- in the summary of configure. (AKC - 2012/02/28 - HDFFV-7793)
- - Configure now generates Makefiles that build in "silent make mode"
- by default in which compile and link lines are significantly
- simplified for clarity. To override this and view actual compile and
- link lines during building, the --disable-silent-rules flag can be used
- at configure time, or the 'make' command can be followed by V=1, to
- indicate a "verbose" make. (MAM - 2011/4/14).
- - Added mpicc and mpif90 as the default C and Fortran compilers for Linux
- systems when --enable-parallel is specified but no $CC or $FC is defined.
- (AKC - 2011/2/7)
- - Added a new configure option, "--enable-unsupported", which can
- be used to stop configure from preventing the use of unsupported
- configure option combinations, such as c++ in parallel or parallel
- HDF5 with threadsafe. Use at your own risk, as it may result in a
- library that won't compile or run as expected!
- (MAM - 2010/11/17 - Bug 2061)
- - PHDF5 changed to use "mpiexec", instead of mpirun, as the default MPI
- applications startup command as defined in the MPI-2 definition, section
- 4.1. (AKC - 2010/6/11 - Bug 1921)
- - Configure now adds appropriate defines for supporting large (64-bit)
- files on all systems, where supported, by default, instead of only linux.
- This largefile support is controllable with the --enable-largefile
- configure option. This is replacing the linux-specific --enable-linux-lfs
- option, which has been removed from configure.
- (MAM - 2010/05/05 - 1772/1434)
- - Upgraded versions of autotools used to generate configuration suite.
- We now use Automake 1.11.1, Autoconf 2.65, and Libtool 2.2.6b.
- MAM 2010/04/15.
- - Added the xlc-* and mpcc_r-* BASENAME patterns to be recognized as IBM
- compilers so that the ibm compiler options can be added properly. This
- allows non-system-default compiler command names (e.g. xlc-m.n.k.l) be
- recognized. AKC 2009/11/26.
- - Configuration suite now uses Automake 1.11 and Autoconf 2.64.
- MAM 2009/08/11.
- - Changed default Gnu fortran compiler from g95 to gfortran since
- gfortran is more likely installed with gcc now. -AKC 2009/07/19-
- - Added libtool version numbers to generated c++, fortran, and
- hl libraries. MAM 2009/04/19.
- - Regenerated Makefile.ins using Automake 1.10.2. MAM 2009/04/19.
- - Added a Make target of check-all-install to test the correctness of
- installing via the prefix= or $DESTDIR options. AKC - 2009/04/14
- - Configuration suite now uses Libtool 2.2.6a. MAM 2008/10/24
-
- - Configuration suite now uses Autoconf 2.61, Automake 1.10.1.
- MAM 2008/05/05.
-
- - The new configure option "--disable-sharedlib-rpath" disables
- embedding the '-Wl,-rpath' information into executables when
- shared libraries are produced, and instead solely relies on the
- information in LD_LIBRARY_PATH. (MAM - 2008/05/15)
+ -
Library:
--------
- - Java API added new H5Iget_name that returns a string.
- The old function, now deprecated, would cause a memory leak and
- not return a proper String value.
- HDFFV-9972 (ADB 2016/8/22)
- - Add support to expand the plugin path value on Windows when the path
- includes an environment variable.
- HDFFV-9706 (ADB 2016/8/22)
-
- - H5F_ACC_DEBUG labeled "deprecated"
-
- The symbol was originally used to emit some extra debugging
- informationi in the multi VFD. The underlying functionality
- was removed due to disuse in HDF5 1.8.16 though the symbol
- remained defined since it was visible in H5Fpublic.h.
-
- In this release, the symbol has been labeled deprecated and will
- not be defined when H5_NO_DEPRECATED_SYMBOLS is defined.
-
- (DER - 2015-04-30, HDFFV-1074)
-
- - The library can load filter libraries dynamically during runtime. Users
- can set the search path through environment variable HDF5_PLUGIN_PATH
- and call H5Pset_filter to enable a dynamic filter. (SLU - 2013/04/08)
- - Added new API functions H5Dscatter and H5Dgather to scatter data to and
- and gather data from a selection within a memory buffer.
- (NAF - 2013/02/05)
- - The library now supports the data conversion from enumeration to numeric
- (integer and floating-point number) datatypes. See Issue 8221.
- (SLU - 2012/10/23)
- - The data sieve buffer size was for all the datasets in the file. It
- could waste memory if any dataset size is smaller than the sieve buffer
- size. Now the library picks the smaller one between the dataset size
- and the sieve buffer size from the file access property. See Issue 7934.
- (SLU - 2012/4/2)
- - I added a new parameter of object access property list to the function
- H5Rdereference (Issue 2763). It's called H5Rdereference2 now. The former
- H5Rdereference function has been deprecated to H5Rdereference1. (SLU -
- 2011/7/18)
- - H5Tcreate now supports string type (fixed-length and variable-length).
- (SLU - 2011/05/20)
- - Added ability to cache files opened through external links. Added new
- public functions H5Pset_elink_file_cache_size(),
- H5Pget_elink_file_cache_size(), and H5Fclear_elink_file_cache().
- (NAF - 2011/02/17)
- - Removed all old code for Metraowerks compilers, bracketed by
- __MWERKS__). Metraowerks compiler is long gone. (AKC - 2010/11/17)
- - Added support for threadsafety on windows using the windows threads
- library. Use the HDF5_ENABLE_THREADSAFE option in CMake while on a
- windows platform to enable this functionality. This is supported on
- Windows Vista and newer Windows operating systems. (MAM - 2010/09/10)
- - When a mandatory filter failed to write data chunks, the dataset
- couldn't close (bug 1260). The fix releases all resources and closes
- the dataset but returns a failure. (SLU - 2010/9/8)
- - H5Tset_order and H5Tget_order now support all data types. A new byte
- order H5T_ORDER_MIXED has been added specifically for compound datatype
- and its derived type. Please see bug #1934. (SLU - 2010/8/23)
- - Improved performance of the chunk cache by avoiding unnecessary b-tree
- lookups of chunks already in cache. (NAF - 2010/06/15)
- - Greatly improved performance of extending a dataset with early
- allocation. (NAF - 2010/03/24 - 1637)
- - Added support for filtering densely stored groups. Many of the API
- functions related to filters have been extended to support dense groups
- as well as datasets. Pipeline messages can now be stored in a group's
- object header. (NAF/QAK - 2009/10/8)
- - The embedded library information is displayed by H5check_version() if a
- version mismatch is detected. Also changed H5check_version() to
- suppress the warning message totally if $HDF5_DISABLE_VERSION_CHECK is 2
- or higher. (Old behavior treated 3 or higher the same as 1, that is
- print a warning and allows the program to continue. (AKC - 2009/9/28)
- - If a user does not care for the extra library information insert
- in the executables, he may turn it off by --disable-embedded-libinfo
- during configure. (AKC - 2009/9/15)
- - Corrected problem where library would re-write the superblock in a file
- opened for R/W access, even when no changes were made to the file.
- (QAK - 2009/08/20, Bz#1473)
- - Separated "factory" free list class from block free lists. These free
- lists are dynamically created and manage blocks of a fixed size.
- H5set_free_list_limits() will use the same settings specified for block
- free lists for factory free lists. (NAF - 2009/04/08)
- - Added support for dense attributes to H5Ocopy. (XCao/NAF - 2009/01/29)
- - Added H5Pset_elink_cb and H5Pget_elink_cb functions to support a
- user-defined callback function for external link traversal.
- (NAF - 2009/01/08)
- - Added H5Pset_elink_acc_flags and H5Pget_elink_acc_flags functions to
- allow the user to specify the file access flags used to open the target
- file of an external link. (NAF - 2009/01/08)
- - Added H5Pset_chunk_cache() and H5Pget_chunk_cache() functions to allow
- individual rdcc configuration for each dataset. Added
- H5Dget_access_plist() function to retrieve a dataset access property
- list from a dataset. (NAF - 2008/11/12)
- - Added H5Iis_valid() function to check if an id is valid without producing
- an error message. (NAF - 2008/11/5)
- - Added two new public routines: H5Pget_elink_fapl() and
- H5Pset_elink_fapl(). (see bug #1247) (VC - 2008/10/13)
- - Improved free space tracking in file to be faster. (QAK - 2008/10/06)
- - Added 'mounted' field to H5G_info_t struct. (QAK - 2008/07/15)
+ -
Parallel Library:
-----------------
- - Add H5Pget_mpio_no_collective_cause() function that retrive reasons
- why the collective I/O was broken during read/write IO access.
- (JKM - 2012/08/30 HDFFV-8143)
- - Special Collective IO (IO when some processes do not contribute to the
- IO) and Complex Derived Datatype MPI functionalities are no longer
- conditionally enabled in the library by configure. They are always
- enabled in order to take advantage of performance boosts from these
- behaviors. Older MPI implementations that do not allow for these
- functionalities can no longer by used by HDF5. (MAM - 2011/07/08).
- - Modified parallel tests to run with arbitrary number of processes. The
- modified tests are testphdf5 (parallel dataset access), t_chunk_alloc
- (chunk allocation), and t_posix_compliant (posix compliance). The rest of
- the parallel tests already use in the code the number of processes
- available in the communicator. (CMC - 2009/04/28)
+ -
Fortran Library:
----------------
-
- - Added parallel routine H5Pget_mpio_actual_io_mode_f (MSB - 2012/09/27)
-
- - Added for the C API the Fortran wrapper:
- h5ocopy_f (MSB - 2012/03/22)
-
-
- HDF5 Fortran library was enhanced to support Fortran 2003 standard.
- The following features are available when the HDF5 library is configured
- using --enable-fortran --enable-fortran2003 configure flags AND
- if fortran compiler is Fortran2003 compliant:
-
- - Subroutines overloaded with the C_PTR derived type:
- h5pget_f
- h5pget_fill_value_f
- h5pinsert_f
- h5pregister_f
- h5pset_f
- h5pset_fill_value_f
- h5rcreate_f
- h5rderefrence_f
- h5rget_name_f
- h5rget_obj_type_f
- - Subroutines overloaded with the C_PTR derived type
- and simplified signatures:
- h5aread_f
- h5awrite_f
- h5dread_f
- h5dwrite_f
- - New subroutines
- h5dvlen_reclaim_f
- h5literate_by_name_f
- h5literate_f
- h5ovisit_f
- h5tconvert_f
-
- - Subroutines with additional optional parameters:
- h5pcreate_class_f
- (EIP - 2011/10/14)
-
- - Added for the C APIs the Fortran wrappers:
- h5dget_access_plist_f
- h5iis_valid_f
- h5pset_chunk_cache_f
- h5pget_chunk_cache_f
- (MSB - 2009/04/17)
-
-
+ -
C++ Library:
------------
- - New member function added
-
- The assignment operator ArrayType::operator= is added because ArrayType
- has pointer data members.
-
- (BMR, 2016/03/07, HDFFV-9562)
-
- - New member functions
- + Overloaded CommonFG::getObjnameByIdx to take char* for name
- + Overloaded CommonFG::getObjTypeByIdx to return type name as a char*.
- (BMR - 2010/05/02)
- + DataSet::getInMemDataSize() to simplify getting the dataset's
- data size in memory. (BMR - 2009/07/26)
- - These member functions were added as wrapper for H5Rdereference to
- replace the incorrect IdComponent::dereference().
- void H5Object::dereference(H5File& h5file, void* ref)
- void H5Object::dereference(H5Object& obj, void* ref)
- In addition, these constructors were added to create the associated
- objects by way of dereference:
- Attribute(H5Object& obj, void* ref);
- Attribute(H5File& file, void* ref);
- DataSet(H5Object& obj, void* ref);
- DataSet(H5File& file, void* ref);
- DataType(H5Object& obj, void* ref);
- DataType(H5File& file, void* ref);
- Group(H5Object& obj, void* ref);
- Group(H5File& obj, void* ref);
- (BMR - 2008/08/10)
-
-
+ -
Tools:
------
- - h5repack: Added ability to use plugin filters. HDFFV-8345 (ADB - 2013/09/04).
- - h5dump: Added option -N --any_path, which searches the file for paths that
- match the search path. HDFFV-7989 (ADB - 2013/08/12).
- - h5dump: Added optional arg 0 to -A, which excludes attributes from display.
- HDFFV-8134 (ADB - 2013/08/01).
- - h5dump: Fixed displaying compression ratio for unknown or user-defined
- filters. HDFFV-8344 (XCAO 2013/03/19)
- - h5dump: Changed UNKNOWN_FILTER to USER_DEFINED_FILTER for user defined filter.
- HDFFV-8346 (XCAO 2013/03/19)
- - h5dump: Added capability for "-a" option to show attributes containing "/"
- by using an escape character. For example, for a dataset "/dset"
- containing attribute "speed(m/h)", use "h5dump -a "/dset/speed(\/h)"
- to show the content of the attribute. See details at HDFFV-7523
- (PC -- 2012/03/12)
- - h5dump: Added ability to apply command options across multiple files using a
- wildcard in the filename. Example; "h5dump -H -d Dataset1 tarr*.h5".
- HDFFV-7876 (ADB - 2012/03/12).
- - h5repack: Improved performance for big chunked datasets (size > 128MB)
- when used with layout (-l) or compression (-f) option.
- It would perform much better prior to the improvement,
- especially for cases that chunk dimentions looks like
- "1024x5x1" (compare to "1x5x1024"). When bigger numbers
- are toward front and smaller number is toward back in chunk
- dimentions. HDFFV-7862 (JKM - 2012/03/01)
- - h5dump: Added new option --no-compact-subset. This option will not
- interpret the '[' character as starting the compact form of
- subsetting. This is useful when the "h5dump error: unable to
- open dataset "datset_name"" message is output because a dataset
- name contains a '[' character. HDFFV-7689 (ADB - 2012/01/31)
- - h5dump: Corrected schema location:
- <hdf5:HDF5-File
- xmlns:hdf5="http://hdfgroup.org/HDF5/XML/schema/HDF5-File"
- xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
- xsi:schemaLocation="http://hdfgroup.org/HDF5/XML/schema/HDF5-File
- http://www.hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd">
- (ADB - 2011/08/10)
- - h5diff: Added new level for -v (verbose) option. The new levels are
- 1 and 2. So -v1 and -v2 can be specified to view more
- information about attributes differences.
- Bug#2121 (JKM 2011/3/23)
- - h5dump: Added new option --enable-error-stack. This option will display
- error stack information in the output stream. This is useful
- when the "h5dump: Unable to print data" message is output.
- (ADB - 2011/02/24)
- - h5diff: Add a new flag --exclude-path. Specified path to an object will
- be excluded from comparing the two files or two groups. If group
- is specified all the member objects will be excluded.
- (JKM - 2010/09/16).
- - h5ls: Add new flag --no-dangling-links. (refer to --help for details)
- (JKM - 2010/06/15)
- - h5ls: Add new flag --follow-symlinks. (refer to --help for details)
- (JKM - 2010/05/25)
- - h5diff: Add new flag --no-dangling-links. (refer to --help for details)
- (JKM - 2010/02/10)
- - h5diff: Add new flag --follow-symlinks. (refer to --help for details)
- (JKM - 2010/01/25)
- - h5diff: fix for displaying garbage value on LE machine for BE data.
- (JKM - 2009/11/20)
- - h5dump: subsetting now allows default for count. Also trailing ; in short form
- can be omitted after last specified value.
- (ADB - 2009/09/04)
- - h5dump/h5ls: now can display data in region references
- using new -R, --region flag.
- (ADB - 2009/09/04)
- - h5diff: new flag, -c, --compare, list objects that are not comparable.
- (PVN - 2009/4/10 - 1368)
- - h5diff new flag, -N, --nan, avoids NaNs detection. (PVN - 2009/4/10)
- - h5dump correctly specifies XML dtd / schema urls (ADB - 2009/4/3 - 1519)
- - h5repack now handles group creation order. (PVN - 2009/4/2 - 1402)
- - h5dump: added a printing of the compression ratio of uncompressed and compressed
- sizes for cases where compression filters are present. (PVN - 2008/05/01)
- - h5dump: added an option to allow a user defined formatting string for printf
- regarding floating point numbers. (PVN - 2008/05/06)
- - h5dump: support for external links, display the object that the external link
- points to. (PVN - 2008/05/12)
- - h5repack: add a userblock to an HDF5 file during the repack. (PVN - 2008/08/26)
- - h5repack: add 2 options that call H5Pset_alignment in the repacked file. (PVN - 2008/08/29)
- - h5ls: added capability to traverse through external links when the -r
- (recursive) flag is given. (NAF - 2008/09/16)
- - h5ls: added -E option to enable traversal of external links. h5ls will
- not traverse external links without this flag being set.
- (NAF - 2008/10/06)
- - h5diff: added support for long double (PVN - 2008/10/28)
- - h5dump: binary output defaults to NATIVE with -b optionally accepting
- the form of binary output (NATIVE, FILE, BE, LE). (PVN - 2008/10/30)
- - h5diff: return 1 for file differences when both file graphs differ by any object.
- Error return code was changed to 2 from -1. (PVN - 2008/10/30)
- - h5import: TEXTFPE (scientific format) was deprecated. Use TEXTFP
- instead (PVN - 2008/10/30)
- - h5repack: When user doesn't specify a chunk size, h5repack now defines a default
- chunk size as the same size of the size of the hyperslab used to read the chunks.
- The size of the hyperslabs are defined as the size of each dimension or a
- predefined constant, whatever is smaller. This assures that the chunk
- read fits in the chunk cache. (PVN - 2008/11/21)
- - h5diff: h5diff treats two INFINITY values different. Fixed by checking (value==expect)
- before call ABS(...) at h5diff_array.c This will make that (INF==INF) is true
- (INF is treated as an number instead of NaN) (PC -- 2009/07/28)
- - h5diff: add option "--use-system-epsilon" to print difference if (|a-b| > EPSILON)
- Change default to use strict equality (PC -- 2009/09/12)
-
+ -
High-Level APIs:
---------------
+ -
- C Packet Table API
- ------------------
- - Replacement of a public function
-
- The existing function H5PTcreate_fl limits applications to deflate
- compression only. The public function H5PTcreate is added to replace
- H5PTcreate_fl. H5PTcreate takes a property list ID to provide
- flexibility on creation properties.
-
- hid_t H5PTcreate(hid_t loc_id, const char *dset_name,
- hid_t dtype_id, hsize_t chunk_size, hid_t plist_id);
- (BMR, 2016/03/04, HDFFV-8623)
-
- - New public functions
-
- Two accessor functions were added per HDFFV-8623/patch 003.
- /* Returns the ID of the dataset associated with the packet table */
- hid_t H5PTget_dataset(hid_t table_id);
-
- /* Returns the ID of the datatype the packet table uses */
- hid_t H5PTget_type(hid_t table_id);
- (BMR, 2016/03/04, HDFFV-8623)
-
- - Regarding #ifdef VLPT_REMOVED
-
- The #ifdef VLPT_REMOVED blocks are removed from the PT library source
- except the following cases:
- + H5PTis_varlen() is made available again.
- + H5PTfree_vlen_readbuff() now became H5PTfree_vlen_buff()
- (BMR, 2016/03/04, HDFFV-442)
-
- C++ Packet Table API
- --------------------
- - New constructor
+ C Packet Table API
+ ------------------
+ -
- An overloaded constructor is added to FL_PacketTable and takes a property
- list ID to provide flexibility on creation properties.
-
- FL_PacketTable(hid_t fileID, hid_t plist_id, const char* name, hid_t dtypeID, hsize_t chunkSize);
- (BMR, 2016/03/08, HDFFV-8623)
-
- - New public functions
-
- Two accessor wrappers to class PacketTable, per HDFFV-8623/patch 004.
- /* Returns the ID of the dataset associated with the packet table */
- hid_t PacketTable::GetDataset()
-
- /* Returns the ID of the datatype the packet table uses */
- hid_t PacketTable::GetDataset()
- (BMR, 2016/03/04, HDFFV-8623)
-
- - Member functions having "char*" as an argument
-
- Overloaded functions were added to provide "const char*" argument, the
- existing version will be deprecated.
- (BMR, 2016/03/04)
-
- - Regarding #ifdef VLPT_REMOVED
-
- The #ifdef VLPT_REMOVED blocks are removed from the PT library source
- except the following cases:
- + VL_PacketTable::IsVariableLength() is moved to PacketTable
- + VL_PacketTable::FreeReadBuff() now became PacketTable::FreeBuff()
-
- (BMR, 2016/03/04, HDFFV-442)
-
-
- Internal header file
- --------------------
- - A new API function H5DOwrite_chunk. It writes a data chunk directly
- into a file bypassing hyperslab selection, data conversion, and
- filter pipeline. The user must be careful with the function and
- clearly understand the I/O process of the library.
- (SLU - 2013/2/11)
- - New API: h5ltpath_valid (Fortran: h5ltpath_valid_f) which checks
- if a path is correct and determines if a link resolves to a valid
- object and checks that the link does not dangle. (MSB- 2012/3/15)
-
- - Added Fortran wrappers for Dimension Scale APIs. HDFFV-3797
- h5dsset_scale_f
- h5dsattach_scale_f
- h5dsdetach_scale_f
- h5dsis_attached_f
- h5dsis_scale_f
- h5dsset_label_f
- h5dsget_label_f
- h5dsget_scale_name_f
- h5dsget_num_scales_f
- (EIP for SB - 2011/10/13)
-
- - Table: In version 3.0 of Table, "NROWS" (used to store number of records) was
- deprecated (PVN - 2008/11/24)
+ Internal header file
+ --------------------
+ -
Documentation
-------------
+ -
Support for new platforms, languages and compilers.
=======================================
- - Intel V11.1 uses now -O3 optimization in production mode (EIP - 2010/10/08)
- - PathScale compilers are recognized and can build the HDF5 library
- properly. AKC - 2009/7/28 -
- - SunOS 5.11 (emu) 32-bit and 64-bit with Sun C/C++ 5.12 compiler and
- Sun Fortran 95 8.6 compiler. (SLU - 2013/04/15)
+ -
-Bug Fixes since HDF5-1.8.0 release
+Bug Fixes since HDF5-1.10.1 release
==================================
Library
-------
- - Fixed a memory bug that could occur when a message was improperly marked
- as sharable on disk.
-
- (NAF, 2016/07/01, HDFFV-9950)
-
- - Incorrect usage of list in CMake COMPILE_DEFINITIONS set_property
-
- The CMake command, set_property with COMPILE_DEFINITIONS property
- needs a quoted semi-colon separated list of values. CMake will
- transform the list to a series of -D{value} for the compile.
-
- (ADB - 2014/12/09, HDFV-9041)
-
- - H5Z.c: H5Zfilter_avail(H5Z_filter_t id)
- Added else block if the call to the internal H5Z_filter_avail(id) does not
- fail and returns FALSE. This block calls the H5PL_load(H5PL_TYPE_FILTER, (int)id)
- function to attempt to dynamically load the filter plugin.
- (ADB - 2014/03/03 HDFFV-8629)
- - Added const qualifier to source buffer parameters in H5Dgather and
- H5D_scatter_func_t (H5Dscatter callback). (NAF - 2013/7/02)
- - Fixed an error involving failure to write fill values to the user's
- buffer when reading unallocated chunks from datasets that have a
- fill value set to H5D_FILL_VALUE_DEFAULT. A consequence of this
- was the reporting of spurious data values in h5dump and h5diff
- output.
- (HDFFV-8247; JP - 2013/05/03)
- - Fixed an error that could occur when calling H5Ocopy within an
- H5Literate callback (and possibly other situations).
- (NAF - 2012/7/25 - HDFFV-5853)
- - Fixed an error that would occur when copying an object with attribute
- creation order tracked and indexed. (NAF - 2012/3/28 - HDFFV-7762)
- - Fixed a bug in H5Ocopy(): When copying an opened object, call the
- object's flush class action to ensure that cached data is flushed
- so that H5Ocopy will get the correct data.
- (VC - 2012/3/27 - HDFFV-7853)
- - When an application tries to write or read many small data chunks and
- runs out of memory, the library had a seg fault. The fix is to
- return the error stack with proper information. (SLU - 2012/3/23.
- Issue 7785)
- - H5Pset_data_transform had seg fault in some cases like x*-100. It
- works correctly now and handles other cases like 100-x or 2/x.
- (SLU - 2012/3/15. Issue 7922)
- - Fixed rare corruption bugs that could occur when using the new object
- header format. (NAF - 2012/3/15 - HDFFV-7879)
- - Creating a dataset in a read-only file caused seg fault when the file
- is closed. It's fixed. The attemp to create a dataset will fail
- with the error stack indicating the file is read-only. (SLU -
- 2012/1/25. Issue 7756)
- - Fixed a seg fault that could occur when shrinking a dataset with chunks
- larger than 1 MB. (NAF - 2011/11/30 - HDFFV-7833)
- - Fixed a bug that could cause file corruption when copying named
- datatypes to a file using shared messages. (NAF - 2011/11/14)
- - Fixed a bug that could cause H5Oget_info to return the wrong address
- after copying a named datatype. (NAF - 2011/11/14)
- - The library allowed the conversion of strings between ASCII and UTF8
- (Issue 7582). We have corrected it to report an error under this
- situation. (SLU - 2011/11/8)
- - The library had seg fault when it tried to shrink the size of compound type
- through H5Tset_size immediately after the type was created (Issue
- 7618). It's fixed now. (SLU - 2011/10/26)
- - Fixed a bug that occurred when using H5Ocopy on a committed datatype
- containing an attribute using that committed datatype.
- (NAF - 2011/10/13 - Issue 5854)
- - #ifdef _WIN32 instances changed to #ifdef H5_HAVE_WIN32_API and added
- H5_HAVE_VISUAL_STUDIO checks where necessary. CMake only as configure
- never set _WIN32.
- - CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
- discovered 3 problems in tests and tools' library (Issue 7674):
- 1. In dsets.c, left shifting an unsigned int for 32 bits or more
- caused undefined behavior.
- 2. In dt_arith.c, the INIT_INTEGER macro definition has an overflow
- when the value is negative minimal and is being subtracted one.
- 3. In tools/lib/h5tools_str.c, right shifting an int value for 32 bits
- or more caused undefined behavior.
- All the problems have been corrected. (SLU - 2011/9/2)
- - In v1.6 library, there was EOA for the whole MULTI file saved in the
- super block. We took it out in v1.8 library because it's meaningless
- for the MULTI file. v1.8 library saves the EOA for the metadata file,
- instead. But this caused some backward compatibility problem.
- v1.8 library couldn't open the file created with v1.6 library. We
- fixed the problem by checking the EOA value to detect the file
- created with v1.6 library. (SLU - 2011/6/22)
- - When a dataset had filters and reading data failed, the error message
- didn't say which filter isn't registered. It's fixed now.
- (SLU - 2011/6/3)
- - The datatype handler created with H5Tencode/decode used to have the
- reference count 0 (zero). I have fixed it. It is 1 (one) now.
- (SLU - 2011/2/18)
- - Fixed a bug that caused big endian machines to generate corrupt files
- when using the scale-offset filter with floating point data or
- fill values. Note that such datasets will no longer be readable
- by any machine after this patch. (NAF - 2010/02/02 - Bug 2131)
- - Retrieving a link's name by index in the case where the link is
- external and the file that the link refers to doesn't exist will
- now fail gracefully rather than cause a segmentation fault.
- (MAM - 2010/11/17)
- - Modified library to always cache symbol table information. Libraries
- version 1.6.3 have a bug which causes them to require this
- information for some operations. (NAF - 2010/09/21 - 1864)
- - Fixed a bug that could occur when getting information for a new-style
- group that was previously opened through a file handle that was
- later closed. (NAF - 2010/09/15)
- - Added define check in H5public.h if stdint.h is supported by the C++
- compiler. This define is only available on Windows with VS2010 and
- using CMake to build the library. (ADB - 2010/09/13 - Bug 1938)
- - H5Eset_current_stack now also closes the error stack to be set as the
- default. This is to avoid a potential problem (Bug 1799).
- (SLU - 2010/9/7)
- - Fixed the bug in the filter's public CAN_APPLY function. The return
- value should be htri_t not herr_t (Bug #1239). (SLU - 2010/8/5)
- - Fixed a bug in the direct I/O driver that could render files with
- certain kinds of unaligned data unreadable or corrupt them.
- (NAF - 2010/07/28)
- - valgrind reported an error of copying data to itself when a new attribute
- is written (Bug #1956). I fixed it by taking out the memcpy step in
- the attribute code. (SLU - 2010/07/28)
- - Fixed a bug that could cause file corruption when using non-default
- sizes of addresses and/or lengths. This bug could also cause
- uncorrupted files with this property to be unreadable. This bug
- was introduced in 1.8.5. (NAF - 2010/07/16 - 1951)
- - Fixed a file corruption bug that could happen when shrinking a
- compressed dataset. (NAF - 2010/05/20)
- - Fixed some memory leaks in VL datatype conversion when strings are
- used as fill values. (MAM - 2010/05/12 - BZ# 1826)
- - Fixed a bug when copying objects with NULL references with the
- H5O_COPY_EXPAND_REFERENCE_FLAG flag set. (NAF - 2010/04/08 - 1815)
- - Files can now be concurrently opened more than once using the core file
- driver, as long as the backing store is used. (NAF - 2010/03/09)
- - Added support for H5O_COPY_EXPAND_EXT_LINK_FLAG to H5Ocopy. External
- links will now be expanded if this flag is set.
- (NAF - 2010/03/05 - 1733)
- - Fixed a bug where the library, when traversing an external link, would
- reopen the source file if nothing else worked. (NAF - 2010/03/05)
- - Fixed an intermittent bug in the b-tree code which could be triggered
- by expanding and shrinking chunked datasets in certain ways.
- (NAF - 2010/02/16)
- - H5Tdetect_class said a VL string is a string type. But when it's
- in a compound type, it said it's a VL type (Bug #1584). I fixed it
- to be consistent. It always return string type. (SLU - 2009/12/10)
- - Fixed a bug where writing and deleting many global heap objects (i.e.
- variable length data) would render the file unreadable. Previously
- created files exhibiting this problem should now be readable.
- (NAF - 2009/10/27 - 1483)
- - Fixed incorrect return value for H5Pget_preserve. (AKC - 2009/10/08 - 1628)
- - Fixed an assertion failure that occurred when H5Ocopy was called on a
- dataset using a vlen inside a compound. (NAF - 2009/10/02 - 1597)
- - Fixed incorrect return value for H5Pget_filter_by_id1/2 in H5Ppublic.h.
- (NAF - 2009/09/25 - 1620)
- - Fixed a bug where properties weren't being compared with the registered
- compare callback. (NAF - 2009/09/25 - 1555)
- - Fixed a bug where H5Pget_fitler_by_id would succeed when called for a
- filter that wasn't present. (NAF - 2009/06/25 - 1250)
- - Fixed an issue with committed compound datatypes containing a vlen.
- Also fixed memory leaks involving committed datatypes.
- (NAF - 2009/06/10 - 1593)
- - Added versioning to H5Z_class_t struct to allow compatibility with 1.6
- API. (NAF - 2009/04/20 - 1533)
- - Fixed a problem with using data transforms with non-native types in the
- file. (NAF - 2009/04/20 - 1548)
- - Added direct.h include file to windows section of H5private.h
- to fix _getcwd() warning. (ADB - 2009/04/14 - 1536)
- - Fixed a bug that prevented external links from working after calling
- H5close(). (NAF - 2009/04/10 - 1539)
- - Modified library to write cached symbol table information to the
- superblock, to allow library versions 1.3.0 to 1.6.3 to read files
- created by this version. (NAF - 2009/04/08 - 1423)
- - Changed skip lists to use a deterministic algorithm. The library should
- now never call rand() or srand(). (NAF - 2009/04/08 - 503)
- - Fixed a bug where H5Lcopy and H5Lmove wouldn't create intermediate
- groups when that property was set. (NAF - 2009/04/07 - 1526)
- - Fixed a bug that caused files with a user block to grow by the size of
- the user block every time they were opened.
- (NAF - 2009/03/26 - 1499)
- - Fixed a rare problem that could occur with files using the old (pre 1.4)
- array datatype. (NAF - 2009/03/23)
- - Modified library to be able to open files with corrupt root group symbol
- table messages, and correct these errors if they are found. Such
- files can only be successfully opened with write access.
- (NAF - 2009/03/23 - 1189)
- - Removed the long_long #define and replaced all instances with
- "long long". This caused problems with third party products. All
- currently supported compliers support the type. (ADB - 2009/03/05)
- - Fixed various bugs that could prevent the fill value from being written
- in certain rare cases. (NAF - 2009/02/26 - 1469)
- - Fixed a bug that prevented more than one dataset chunk from being cached
- at a time. (NAF - 2009/02/12 - 1015)
- - Fixed an assertion failure caused by opening an attribute multiple times
- through multiple file handles. (NAF - 2009/02/12 - 1420)
- - Fixed a problem that could prevent the user from adding attributes (or
- any object header message) in some circumstances.
- (NAF - 2009/02/12 - 1427)
- - Fixed a bug that could cause problems when an attribute was added to a
- committed datatype using the committed datatype's datatype.
- (NAF - 2009/02/12)
- - Fixed a bug that could cause problems when copying an object with a
- shared message in its own object header. (NAF - 2009/01/29)
- - Changed H5Tset_order to properly reject H5T_ORDER_NONE for most
- datatypes. (NAF - 2009/01/27 - 1443)
- - Fixed a bug where H5Tpack wouldn't remove trailing space from an
- otherwise packed compound type. (NAF - 2009/01/14)
- - Fixed up some old v2 btree assertions that get run in debug mode that
- were previously failing on compilation, and removed some of the
- more heavily outdated and non-rewritable ones. (MAM - 2008/12/15)
- - Fixed a bug that could cause problems when "automatically" unmounting
- multiple files. (NAF - 2008/11/17)
- - H5Ovisit and H5Ovisit_by_name will now properly terminate when the
- callback function returns a positive value on the starting object.
- (NAF - 2008/11/03)
- - Fixed an error where a null message could be created that was larger
- than could be written to the file. (NAF - 2008/10/23)
- - Corrected error with family/split/multi VFD not updating driver info
- when "latest" version of the file format used. (QAK - 2008/10/14)
- - Corrected alignment+threshold errors to work correctly when metadata
- aggregation is enabled. (QAK - 2008/10/06)
- - Changed H5Fget_obj_count and H5Fget_obj_ids to ignore objects registered
- by the library for internal library use. (NAF - 2008/10/06)
- - Fixed potential memory leak during compound conversion.
- (NAF - 2008/10/06)
- - Changed the return value of H5Fget_obj_count from INT to SSIZE_T. Also
- changed the return value of H5Fget_obj_ids from HERR_T to SSIZE_T and
- the type of the parameter MAX_OBJS from INT to SIZE_T. (SLU - 2008/09/26)
- - Fixed an issue that could cause data to be improperly overwritten
- during compound type conversion. (NAF - 2008/09/19)
- - Fixed pointer alignment violations that could occur during vlen
- conversion. (NAF - 2008/09/16)
- - Fixed problem where library could cause a segmentation fault when
- an invalid location ID was given to H5Giterate(). (QAK - 2008/08/19)
- - Fixed improper shutdown when objects have reference count > 1. The
- library now tracks reference count due to the application separately
- from that due to internal library routines. (NAF - 2008/08/19)
- - Fixed assertion failure caused by incorrect array datatype version.
- (NAF - 2008/08/08)
- - Fixed an issue where mount point traversal would fail when using
- multiple handles for the child. (NAF - 2008/08/07)
- - Fixed an issue where mount points were inaccessible when using multiple
- file handles for the parent. The mount table is now in the shared
- file structure (the parent pointer is still in the top structure).
- (NAF - 2008/08/07)
- - when an attribute was opened twice and data was written with one of the handles,
- the file didn't have the data. It happened because each handle had its own
- object structure, and the empty one overwrote the data with fill value. This is
- fixed by making some attribute information like the data be shared in the
- attribute structure. SLU - 2008/07/22
- - Fixed issue where a group could have a file mounted on it twice.
- (QAK - 2008/07/15)
- - Fixed a Windows-specific issue in the ohdr test which was causing users
- in some timezones to get false errors. This a deficiency in the Windows
- mktime() function, and has been handled properly. SJW - 2008/06/19
- - Fixed the problem with the searching of target file for H5Lcreate_external().
- The searching pattern will depend on whether the target file's
- pathname is an absolute or a relative path. Please see the description
- in the RM for H5Lcreate_external(). (VC - 2008/04/08)
- - Fixed possible file corruption bug when encoding datatype
- descriptions for compound datatypes whose size was between
- 256 & 511 bytes and the file was opened with the "use the
- latest format" property enabled (with H5Pset_libver_bounds).
- (QAK - 2008/03/13)
- - Fixed bug in H5Aget_num_attrs() routine to handle invalid location
- ID correctly. (QAK - 2008/03/11)
- - H5Dset_extent: when shrinking dimensions, some chunks were not deleted.
- (PVN - 2009/01/8)
- - Added code to maintain a min_clean_fraction in the metadata cache when
- in serial mode. (MAM - 2009/01/9)
-
-
+ -
Configuration
-------------
- - CMake: When CMake commands are executed individually on the command line
- and the external filters are being built, the CMAKE_BUILD_TYPE define
- must be set to the same value as the configuration
- (-DCMAKE_BUILD_TYPE:STRING=Release if using -C Release). This is needed
- by the the szip and zlib filter build commands. (ADB - HDFFV-8695)
- - CMake: Remove use of XLATE_UTILITY program. (ADB - 2014/03/28 HDFFV-8640)
- - CMake: Added missing quotes in setting the CMAKE_EXE_LINKER_FLAGS for the
- MPI option. (ADB - 2014/02/27 HDFFV-8674)
- - Modified H5detect.c to scan floating point types for padding bits before
- analyzing the type further. This should fix problems with gcc 4.8
- (NAF - 2013/09/19 - HDFFV-8523/HDFFV-8500)
- - Fixed Makefile issue in which "-Wl," was not properly specified
- prior to -rpath when building parallel fortran libraries with
- an Intel compiler. (MAM - 2012/03/26)
- - Makefiles generated by other packages using h5cc as the compiler
- no longer error when 'make' is invoked more than once in order
- to 'rebuild' after changes to source. (MAM - 2012/03/26)
- - Added --enable-fortran2003 flag to enable Fortran2003 support
- in the HDF5 Fortran library. The flag should be used along with the
- --enable-fortran flag and takes affect only when Fortran compiler
- is Fortran2003 compliant. (EIP - 2011/11/14)
-
- - In Windows platform, the default VFD, was Windows VFD, is restored back
- to the SEC2, aka POSIX, VFD. The Windows VFD is deprecated. HDFFV-7740
- (AKC 2011/09/26)
- - Removed config/ibm-aix6.x. All IBM-AIX settings are in one file,
- ibm-aix. (AKC - 2011/4/14)
- - Shared C libraries are no longer disabled on Mac when Fortran
- is enabled. Shared Fortran libraries are still not supported on Mac,
- so configure will disable them by default, but this is overridable
- with the new --enable-unsupported configure option. The configure
- summary has been updated to reflect the fact that the shared-ness of
- the C++/Fortran wrapper libraries may not align with the C library.
- (MAM - 04/11/2011 - HDFFV-4353).
- - Removed recognition of the parallel compilers of LAM(hcc) and
- ChMPIon(cmpicc) since we have no access to these two MPI implementations
- and cannot verify their correctness. (AKC - 2010/7/14 - Bug 1921)
- - Removed the following config files, as we no longer support them:
- config/dec-osf*, config/hpux11.00, config/irix5.x,
- config/powerpc-ibm-aix4.x config/rs6000-ibm-aix5.x config/unicos*
- MAM - 2009/10/08
- - Modified configure and make process to properly preserve user's CFLAGS
- (and company) environment variables. Build will now properly use
- automake's AM_CFLAGS for any compiler flags set by the configure
- process. Configure will no longer modify CFLAGS directly, nor will
- setting CFLAGS during make completely replace what configure has set up.
- MAM - 2009/10/08
- - Support for TFLOPS, config/intel-osf1, is removed since the TFLOPS
- machine has long retired. AKC - 2009/10/06.
- - Added $(EXEEXT) extension to H5detect when it's executed in the
- src/Makfile to generate H5Tinit.c so it works correctly on platforms
- that require the full extension when running executables.
- MAM - 2009/10/01 - BZ #1613
- - Configure will now set FC and CXX to "no" when fortran and c++
- are not being compiled, respectively, so configure will not run
- some of the compiler tests for these languages when they are not
- being used. MAM - 2009/10/01
- - The PathScale compiler (v3.2) was mistaken as gcc v4.2.0 but it fails to
- recognize some gcc options. Fixed. (see bug 1301). AKC - 2009/7/28 -
- - The --enable-static-exec flag will now properly place the -static flag
- on the link line of all installed executables. This will force the
- executable to link with static libraries over shared libraries, provided
- the static libraries are available. MAM - 2009/08/31 - BZ #1583
- - The --includedir=DIR configuration option now works as intended, and can
- be used to specify the location to install C header files. The default
- location remains unchanged, residing at ${prefix}/include.
- MAM - 2009/03/10 - BZ #1381
- - Configure no longer removes the '-g' flag from CFLAGS when in production
- mode if it has been explicitly set in the CFLAGS environment variable
- prior to configuration. MAM - 2009/03/09 - BZ #1401.
- - Fixed error with 'make check install' failing due to h5dump
- needing other tools built first. MAM - 2008/10/24.
- - Wpen using shared szip, it is no longer necessary to specify
- the path to the shared szip libraries in LD_LIBRARY_PATH. MAM -
- 2008/10/24.
- - The file libhdf5_fortran.settings is not installed since its content
- is included in libhdf5.settings now. AKC - 2008/10/21
- - "make DESTDIR=xxx install" failed to install some tools and files
- (e.g., h5cc and fortran modules). Fixed. AKC - 2008/10/8.
- - Autotools: An export of LD_LIBRARY_PATH=<szip library location> was
- removed from configure and make installcheck was revised to run
- scripts installed in share/hdf5_examples to use the installed h5cc, etc.
- to compile and run example source files also installed there. Make
- installcheck will now fail when a shared szip or other external lib file
- cannot be found in the same manner that executables compiled and linked
- with h5cc will fail to run when those lib files cannot be found after
- install. Make installcheck should pass after setting LD_LIBRARY_PATH to the
- szip location.
- (LRK - 2014/04/16)
+ -
Performance
-------------
- - Removed program perform/benchpar from the enable-build-all list. The
- program will be retired or moved to another location. HDFFV-8156
- (AKC 2012/10/01)
- - Retired program perform/mpi-perf. Its purpose has been incorporated
- into h5perf before. (AKC 2012/09/20)
- - ifdefs added to tests around include unistd.h and function to simulate
- getlogin() on Windows.
- (ADB - 2011/08/15)
- - perf_serial test added to Windows projects and check batch file.
- (ADB - 2009/06/11)
+ -
+
Fortran
--------
- - Fixed a typo in return value of the nh5dread_f_c function ( was 1
- instead of 0 on success); fixed the return value to make it consistent
- with other Fortran functions; cleaned the code from debug statements.
- (EIP - 2012/06/23)
-
- - Fixed problem writing/reading control characters to a dataset; writing
- a string containing: alerts, backspace, carriage_return, form_feed,
- horizontal_tab, vertical_tab, new_line is now tested and working.
- (MSB - 2012/09/01)
-
- - Corrected the integer type of H5S_UNLIMITED_F to HSIZE_T (MSB - 2012/09/01)
-
- - Corrected the number of continuation lines in the src files
- to be less then 32 lines for F95 compliance. (MSB - 2012/10/01)
+ -
Tools
-----
- - h5dump subsetting fixed for dims greater then two
- When a dataset has more then two dimensions, subsetting would incorrectly
- calculate the data that needed to be displayed.
- Added in block and stride calculation that account for dimensions greater
- then two. NOTE: lines that have line breaks inserted because of display
- length calculations, may have index info that is incorrect until the next
- dimension break. (ADB, 2016/03/04, HDFFV-9698)
- - h5repack: h5repack would not attempt to remove UD filters. Added a
- check to h5repack for UD filters that checks if the filter can
- be dynamically loaded. This will require a change in the library to
- add the H5PL_load() to the H5Zfilter_avail(). (ADB - 2014/03/03 HDFFV-8629)
- - h5repack: Fixed failure for converting a layout of small chunked dataset
- (size < 1K) to contiguous layout. HDFFV-8214 (JKM 2013/03/18)
- - h5diff: Fixed to return correct exit code 1 when detect unique extra
- attribute. Prior to this fix, h5diff returned exit code 0 indicating
- two files are identical. HDFFV-7643 (JKM 2013/02/15)
- - h5diff: Improved speed when comparing HDF5 files with lots of
- attributes. Much slower performance was identified with release
- version from 1.8.7 to 1.8.10 compared to 1.8.6. (JKM 2012/10/19)
- - h5repack: "h5repack -f NONE file1.h5 out.h5" command failed if
- source file contains chunked dataset and a chunk dim is bigger than
- the dataset dim. Another issue is that the command changed max dims
- if chunk dim is smaller than the dataset dim.
- These issue occurred when dataset size is smaller than 64k (compact
- size limit) Fixed both.
- HDFFV-8012 (JKM 2012/09/24)
- - h5diff: Fixed not to accumulate attribute difference to dataset
- difference in verbose mode (-v, -r), which caused incorrect
- difference between dataset and group/datatype object if attribute
- exist with any differences. This also lead to fix inconsistent
- format indicating difference between dataset and group/datatype
- object. HDFFV-5919 (JKM 2012/09/05)
- - h5diff: Fixed the incorrect result when comparing attribute data
- values and the data type has same class but different size.
- HDFFV-7942 (JKM 2012/08/15)
- - ph5diff: Fixed intermittent hang issue on a certain operation in
- parallel mode. It was detected by daily test for comparing
- non-comparable objects, but it could have occurred in other
- operations depend on machine condition. HDFFV-8003 (JKM 2012/08/01)
- - h5diff: Fixed test failure for "make check" due to failure of
- copying test files when performed in HDF5 source tree. Also applied
- to other tools.
- HDFFV-8107 (JKM 2012/08/01)
- - h5diff: Fixed the Function COPY_TESTFILES_TO_TESTDIR() of
- testh5diff.sh to better report when there is an error in the file
- copying. HDFFV-8105 (AKC -2012/07/22)
- - h5diff: Fixed not to check and display dangling link status without
- --follow-symlinks option. This also improved performance when
- comparing lots of external links without the --follow-symlinks
- option. HDFFV-7998 (JKM 2012/04/26)
- - h5unjam: Fixed sefgault when used -V (show version) option.
- HDFFV-8001 (JKM 2012/04/19)
- - h5repack: Fixed a failure when change the chunk size of a specified
- chunked dataset with unlimited max dims. HDFFV-7993 (JKM 2012/04/11)
- - h5diff: Fixed failure for comparing same named object with different
- object types in comparing groups. Prior to the fix, h5diff resulted
- in error. After the fix, h5diff detects such case as non-comparable
- and display messages accordingly. HDFFV-7664 (JKM 2012/03/28)
- - h5diff: If unique objects exists only in one file and try to exclude
- the unique objects with --exclude-path option, h5diff missed
- excluding some objects.
- Fixed to exclude objects correctly in such case.
- HDFFV-7837 (JKM 2012/03/20)
- - h5dump: Added tools library error stack to properly catch error
- information generated within the library.
- HDFFV-7958 (ADB 2012/03/12)
- - h5dump: Dangling links no longer throw error message, change process
- when open link fails.
- HDFFV-7839 (ADB 2012/03/12)
- - h5diff: When two symbolic dangling links are compared with
- --follow-symlinks option, the result should be same. It worked for
- comparing two files, but didn't work for comparing two objects.
- HDFFV-7835 (JKM 2012/03/09)
- - h5dump: Refactored code to remove duplicated functions. Split XML
- functions from DDL functions. Corrected indentation and formatting
- errors. Also fixed subsetting counting overflow (HDFFV-5874). Verified
- all tools call tools_init() in main.
- HDFFV-7560 (ADB 2012/02/17)
- - h5diff: fixed to prevent from displaying error stack message when
- comparing the two dangling symlinks with follow-symlinks option.
- HDFFV-7836 (JKM 2012/01/13)
- - h5repack: fixed memory leak for handling variable length string in
- attribute. HDFFV-7840 (JKM 2012/01/06)
- - h5ls: fixed segfault when access region reference data in an
- attribute. HDFFV-7838 (JKM 2011/12/29)
- - h5diff: fixed segfault over non-comparable attribute with different
- dimention or rank, along with '-c' option to display details.
- HDFFV-7770 (JKM 2011/10/24)
- - Fixed h5diff to display all the comparable object and attribute
- regardless of non-comparables. HDFFV-7693 (JKM 2011/09/16)
- - Fixed h5repack to update values of references(object and region) of
- attributes in h5repack for 1) references, 2) ARRAY of references,
- 3) VLEN of references, and 4) COMPOUND of references.
- (JIRA HDF5 5932) PC -2011/09/14
- - h5diff: fixed segfault over dataset with container types
- (array,lven) with multiple nested compound types.
- (ex: compound->array->compound, compound->vlen->compound)
- HDFFV-7712 JKM (2011/09/01)
- - h5repack: added macro to handle failure in H5Dread/write when memory
- allocation failed inside the library. (PC -- 2011/08/19)
- - Fixed h5jam not to allow specifying an HDF5 formatted file as input
- file for -u (user block file) option, because the original HDF5 file
- will not be accessible if allows. HDFFV-5941 (JKM 2011/08/15)
- - Revised command help pages of h5jam and h5unjam. The descriptions
- were not up to date and some were missing.
- HDFFV-7515 (JKM 2011/08/15)
- - h5repack: h5repack failed to copy dataset if the layout is changed
- from chunked with unlimited dims to contiguous. HDFFV-7649
- (PC -- 2011/07/15)
- - h5diff: "--delta" option considers two NaN of the same type are
- different, which is wrong based on h5diff description in Reference
- Manual. HDFFV-7656 (PC -- 2011/07/15)
- - Fixed h5diff to display instructive error message and exit with 1
- when mutually exclusive options (-d, -p and --use-system-epsilon)
- are used together. HDFFV-7600 (JKM 2011/07/07)
- - Fixed h5dump to display the first line of each element into correct
- position for multiple dimention array type.
- Before this fix, the first line of each element in array were
- displayed after the last line of previous element without
- moving to the next line (+indentation).
- Bug #HDFFV-5878 (JKM 2011/06/15)
- - Fixed h5dump to display correct value for H5T_STD_I8LE dataset
- on a system (ppc64, linux, Big-Endian, clustering).
- Bug #HDFFV-7594 (ABERT & JKM 2011/05/12)
- - Fixed h5diff to compare file itself correctly. Previously h5diff
- reported either different or not compatible in certain cases even
- comparing file itself. This fix also improve performance when
- comparing same target objects through verifying the obj&file
- addresses before comparing the details in the objects (ex: datasets
- or attributes) Bug #HDFFV-5928 (XCAO & JKM 2011/05/06)
- - Updated h5dump test case script to prevent entire test failure upon
- source directory is read-only. Bug# HDFFV-4342 (JKM 2011/4/12)
- - Fixed h5dump displaying incorrect values for H5T_STD_I8BE type data in
- attribute on Big-Endian machine. H5T_STD_I8BE is unsigned 8bit type,
- so h5dump is supposed to display -2 instead of 254. It worked
- correctly on Little-Endian system , but not on Big-Endian system.
- Bug #HDFFV-4358 (JKM 2011/04/08)
- - Updated to unify option name to '--enable-error-stack' for printing
- HDF5 error stack messages for HDF5 tools. h5ls and h5dump for now.
- For h5ls, this replaces "-e/--errors" option, which is deprecated.
- Bug#2182 (JKM 2011/3/30)
- - Fix h5diff for --use-system-epsilon option: the calculation changed
- from ( |a - b| / b ) to ( |a - b| ). This was decided for better
- performance. Bug#2184 (JKM 2011/3/24)
- - Fixed output for H5T_REFERENCE in h5dump. According to the BNF document
- the output of a H5T_REFERENCE should be followed by the type;
- <reference> ::= H5T_REFERENCE { <ref_type> }
- <ref_type> ::= H5T_STD_REF_OBJECT | H5T_STD_REF_DSETREG
- Previously this was only displayed if the -R option was used.
- Bug#1725 (ADB 2011/3/28)
- - Fix h5diff issues for #1: h5diff compared attributes correctly only
- when two objects have the same number of attributes and attribute
- names are identical, #2: didn't display useful information about
- attribute difference. Bug#2121 (JKM 2011/3/17)
- - Fixed memory leak for h5diff when accessing symbolic links with
- --follow-symlink option. Bug#2214 (JKM 2011/3/18)
- - Fixed memory leak for h5diff when access variable length string
- data. Bug#2216 (JKM 2011/3/18)
- - Fixed and improved help page for -a option of h5ls.
- Bug#1904 (JKM 2011/3/11)
- - Fixed h5dump not to include attribute values in the output file when
- h5dump "-y -o output_file" options were used. The problem was introduced
- in HDF5 1.8.6 by showing data pointed by region references. (XCAO 2011/3/9)
- - Fixed h5copy to be able to copy any object into the same HDF5 file.
- Previously h5copy displayed error message when target file is same
- as source file. (XCAO 2011/3/8)
- - Fixed h5dump for skipping some values for long array type dataset on
- Windows. This issue only occurred on Windows due to the different
- return behavior from _vsnprintf() funtion. Bug#2161 (JKM 2011/3/3)
- - Fixed h5dump for skipping array indices every certain number
- when the array type dataset is relatively big. The certain number
- varies according to the size of array. Bug#2092 (JKM 2011/2/15).
- - Fixed h5diff for the segfault when compares compound datasets
- with combination of fixed length string types and vlen string types
- in certain orders. bug#2089 (JKM 2010/12/28)
- - Improve h5diff performance. 1) use HDmemcmp() before comparing each
- elements. 2) replace expensive H5Tequals() calls 3) retrieve datatype
- information at dataset level not each element level for compound
- datasets
- - Fixed h5ls to display nested compound type with curly bracket
- when -S (--simple) option is used with -l (--label), so it shows
- which member (in curly bracket) belong to which nested compound type
- and make the output make sense. bug#1979 (JKM 2010/11/09)
- - Fixed h5diff to handle variable-length strings in a compound dataset
- correctly. (also variable-length string array in a compound dataset)
- Garbage values were displayed when h5diff compared multiple
- variable-length strings in a compound type dataset.
- Bug#1989 (JKM 2010/10/28)
- - Fixed h5copy to fail gracefully when copying object to non-exist
- group without -p option. Bug#2040 (JKM 2010/10/18)
- - Fixed to compare member objects and groups recursively when two
- files or groups are specified to be compared. Bug#1975
- (JKM 2010/9/16)
- - Make h5repack be able to convert a layout to COMPACT for small size
- dataset as default. bug#1896 (JKM 2010/09/15)
- - Change h5ls not to manipulate special characters in object name or
- attribute name for smart display. bug#1784 (JKM 2010/06/28)
- - Fixed h5ls to return exit code 1 (error) when non-existent file is
- specified. bug#1793. (JKM 2010/04/27)
- - h5copy failed to copy dangling link when the link is specified
- directly. bug#1817. (JKM 2010/04/22)
- - h5repack lost attributes from a dataset of reference type. bug#1726.
- (JKM 2010/3/25)
- - h5repack sets NULL for object reference value for group or
- named datatype. bug#1814. (JKM 2010/03/19)
- - h5diff: fixed incorrect behavior (hang) in parallel mode when
- specify invalid options (ex: -v and -q) (JKM 2010/02/17)
- - h5dump/h5ls display buffer resize fixed in tools library.
- (ADB - 2009/07/21 - 1520)
- - Fixed many problems that could occur when using h5repack with named
- datatypes. (NAF - 2009/4/20 - 1516/1466)
- - h5dump, h5diff, h5repack were not reading (by hyperslabs) datasets
- that have a datatype datum size greater than H5TOOLS_BUFSIZE, a
- constant defined as 1024Kb, such as array types with large
- dimensions (PVN - 2009/4/1 - 1501)
- - h5import: By selecting a compression type, a big endian byte order was being
- selected (PVN - 2009/3/11 - 1462)
- - zip_perf.c had missing argument on one of the open() calls. Fixed.
- (AKC - 2008/12/9)
- - h5dump now checks for uniqueness of committed datatypes.
- (NAF - 2008/10/15)
- - Fixed unnecessary indentation of committed datatypes in h5dump.
- (NAF - 2008/10/15)
- - Fixed bugs in h5stat:segmemtation fault when printing groups and
- print warning message when traversal of objects is unsuccessful.
- (see bug #1253) (VC- 2008/10/13)
- - Fixed bug in h5ls that prevented relative group listings (like
- "h5ls foo.h5/bar") from working correctly (QAK - 2008/06/03)
- - Fixed bug in h5diff that prevented datasets & attributes with
- variable-length string elements from comparing correctly.
- (QAK - 2008/02/28)
- - h5import bug on Windows w/binary datasets. fread in windows needs a
- binary file to be open with 'rb' instead of 'r' otherwise it
- terminates execution if an end of file character is found on the
- input file. Besides that the binary file generated needs to be open
- with 'wb' , otherwise an end of line character is read twice.
- (PVN - 2008/02/19)
- - Fixed bug in h5dump that caused binary output to be made only for the first
- dataset, when several datasets were requested. (PVN - 2008/04/07)
- - h5dump: when doing binary output (-b), the stdout printing of attributes
- was done incorrectly. Removed printing of attributes when doing binary
- output. PVN - 2008/06/05
+ - Improved h5diff compare of strings and arrays.
+ (ADB, 2017/05/18, HDFFV-9055, HDFFV-10128)
High-Level APIs:
- ------
- - Packet Table is updated.
-
- In the Packet Table C API, there are changes with the following functions,
- which had been ifdef'ed out with VLPT_REMOVED since 2006
- * H5PTcreate_vl, is removed from this release
- * H5PTfree_vlen_readbuff, is renamed to H5PTfree_vlen_buff
- * H5PTis_varlen, is made available again
-
- Various cleanup: replacing 0/-1 with SUCCEED/FAIL and H5I_BADID with
- H5I_INVALID_HID. (BMR, 2016/03/04, HDFFV-442)
-
- - Fixed problem with H5DSget_scale_name including the NULL terminator in
- the size calculation returned by the function. The API does not
- include the NULL terminator in the size returned (MSB- 2013/2/10)
-
- - Fixed problem with H5TBdelete_record destroying all data following the deletion
- of a row. (MSB- 2012/7/26)
-
- - Fixed H5LTget_attribute_string not closing an object identifier when an
- error occurs. (MSB- 2012/7/21)
-
- - Fixed the H5LTdtype_to_text function. It had some memory problems when
- dealing with some complicated data types. HDFFVI-7701 (SLU - 2011/10/19)
-
- - Fixed a bug in H5DSattach_scale, H5DSis_attached and H5DSdetach_scale
- caused by using H5Tget_native_type function to determine the native
- type for reading REFERENCE_LIST attribute. The bug was exposed
- on Mac PPC.
- (EIP - 2010/05/22 -1851)
- - Fixed a bug in the H5DSdetach_scale function when 0 bytes
- were allocated after the last reference to a dim. scale
- was removed from the list of references in a VL element of the
- DIMENSION_LIST attribute; modified the function to comply
- with the Spec: DIMENSION_LIST attribute is deleted now when no
- dimension scales left attached.
- (EIP - 2010/05/14 -1822)
- - Fixed a bug where the H5TB API would forget the order of fields when
- added out of offset order. (NAF - 2009/10/27 - 1582)
- - H5DSis_attached failed to account for different platform types. Added a
- get native type call. (ADB - 2009/9/29 - 1562)
- - Dimension scales: The scale index return value in H5DSiterate_scales was not always
- incremented. (PVN - 2009/4/8 - 1538)
+ ------
+ -
Fortran High-Level APIs:
- ------
-
- - Lite: The h5ltget_attribute_string_f used to return the C NULL character in the
- returned character buffer. The returned Fortran charactor buffer now does
- not return the C NULL character. (MSB - 2012/3/23)
- - Lite: The h5ltget_dataset_info_f function (gets information about a dataset)
- was not correctly returning the dimension array. (PVN - 2009/3/23)
- - Lite: the h5ltread_dataset_string_f and h5ltget_attribute_string_f functions
- had memory problems with the g95 fortran compiler. (PVN � 5/13/2009) 1522
-
-
-
-
+ ------
+ -
Documentation
-------------
-
+ -
F90 APIs
--------
- - Modified the h5open_f and h5close_f subroutines to not to call H5open
- and H5close correspodningly. While the H5open call just adds overhead,
- the H5close call called by an Fortran application shuts down the HDF5
- library making it unaccessible to the application.
- HDFFV-915 (EIP & SB - 2011/10/13)
-
+ -
C++ APIs
--------
- - The constructor PropList::PropList(id) was fixed to act properly
- according to the nature of 'id'. When 'id' is a property class id,
- a new property list will be created. When 'id' id a property list id,
- a copy of the property list will be made. (BMR - 2010/5/9)
- - The parameters 'size' and 'bufsize' in CommonFG::getLinkval and
- CommonFG::getComment, respectively, now have default values for
- user's convenience. (BMR - 2009/10/23)
- - NULL pointer accessing was fixed, bugzilla 1061. (BMR - 2009/10/05)
- - read/write methods of DataSet and Attribute classes were fixed
- to handle string correctly. (BMR - 2009/07/26)
- - Fixed bug that caused segfaults in Attribute::read. (BMR - 2008/04/20)
- - Fixed bug in PropList::getClassName to use portable HDfree instead
- of free. (BMR - 2008/04/20)
- - Fixed a design bug which allowed an Attribute object to create/modify
- attributes (bugzilla #1068). The API class hierarchy was revised
- to address the problem. Classes AbstractDS and Attribute are moved
- out of H5Object. Class Attribute now multiply inherits from
- IdComponent and AbstractDs and class DataSet from H5Object and
- AbstractDs. In addition, the data member IdComponent::id was
- moved into subclasses: Attribute, DataSet, DataSpace, DataType,
- H5File, Group, and PropList. (BMR - 2008/08/10)
- - IdComponent::dereference was incorrect and replaced as described
- in "New Features" section.
- (BMR - 2008/08/10)
+ -
Testing
-------
- - tools/h5diff/testh5diff.sh is run in every "make check", even after it
- has passed in the previous run. It should not run again if there is no
- code changes. Fixed. (AKC - 2013/07/19 HDFFV-8392)
- - In some Mac system, testlibinfo.sh failed with this error:
- Check file ../src/.libs/libhdf5.7.dylib
- strings: object: ../src/.libs/libhdf5.7.dylib malformed object \
- (unknown load command 15)
- The strings command of Mac inspects library files and older
- versions of strings may not know newer library format, resulting
- in errors. Fixed by sending the library file as stdin to the strings
- coommand to avoid this problem. (AKC - 2013/03/08 HDFFV-8305)
-
- - Fixed a typo in the ERROR macro in test/testhdf5.h. It segmentation
- faulted when used before. (AKC - 2013/02/12 HDFFV-8267)
-
+ -
Supported Platforms
===================
- AIX 6.1 xlc 10.1.0.5
- (NASA G-ADA) xlC 10.1.0.5
- xlf90 12.1.0.6
-
- Linux 2.6.18-308.13.1.el5PAE GNU C (gcc), Fortran (gfortran), C++ (g++)
- #1 SMP i686 i686 i386 compilers for 32-bit applications;
- (jam) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
- Version 4.8.2
- PGI C, Fortran, C++ Compilers for 32-bit
- applications;
- Version 13.7-0
- Intel(R) C, C++, Fortran Compiler for 32-bit
- applications;
- Version 14.0.2 (Build 20140120)
-
- Linux 2.6.18-371.6.1.el5 GNU C (gcc), Fortran (gfortran), C++ (g++)
- #1 SMP x86_64 GNU/Linux compilers for 64-bit applications;
- (koala) Version 4.1.2 20080704 (Red Hat 4.1.2-54)
- Version 4.8.2
- Intel(R) C, C++, Fortran Compilers for
- applications running on Intel(R) 64;
- Version 14.0.2 (Build 20140120)
-
- Linux 2.6.32-431.11.2.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+
+ Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
#1 SMP x86_64 GNU/Linux compilers:
- (platypus) Version 4.4.7 20120313
- Version 4.8.2
+ (mayll/platypus) Version 4.4.7 20120313
+ Version 4.8.4
PGI C, Fortran, C++ for 64-bit target on
x86-64;
- Version 13.7-0
+ Version 16.10-0
Intel(R) C (icc), C++ (icpc), Fortran (icc)
compilers:
- Version 14.0.2 (Build 20140120)
+ Version 15.0.3.187 (Build 20150407)
+ MPICH 3.1.4 compiled with GCC 4.9.3
- Linux 2.6.32-431.29.2.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
+ Linux 2.6.32-573.18.1.el6.ppc64 gcc (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
#1 SMP ppc64 GNU/Linux g++ (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
(ostrich) GNU Fortran (GCC) 4.4.7 20120313 (Red Hat 4.4.7-4)
IBM XL C/C++ V13.1
IBM XL Fortran V15.1
- Linux 2.6.32-220.23.1.1chaos Intel C, C++, Fortran Compilers
- ch5.x86_64 GNU/Linux Version 12.1.5.339
- (LLNL Aztec)
-
- IBM Blue Gene/P XL C for Blue Gene/P, bgxlc V9.0
- (LLNL uDawn) XL C++ for Blue Gene/P, bgxlC V9.0
- XL Fortran for Blue Gene/P, bgxlf90 V11.1
+ Linux 3.10.0-327.10.1.el7 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (kituo/moohan) Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ Version 4.9.3, Version 5.2.0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 Build 20150407
+ MPICH 3.1.4 compiled with GCC 4.9.3
SunOS 5.11 32- and 64-bit Sun C 5.12 SunOS_sparc
(emu) Sun Fortran 95 8.6 SunOS_sparc
Sun C++ 5.12 SunOS_sparc
- Windows 7 Visual Studio 2008 (cmake)
- Visual Studio 2010 w/ Intel Fortran 14 (cmake)
- Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Windows 7 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
Visual Studio 2013 w/ Intel Fortran 15 (cmake)
- Visual Studio 2015 (cmake)
- Cygwin(CYGWIN_NT-6.1 1.7.34(0.285/5/3) gcc(4.9.2) compiler and gfortran)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+ Cygwin(CYGWIN_NT-6.1 2.2.1(0.289/5/3) gcc(4.9.3) compiler and gfortran)
(cmake and autotools)
- Windows 7 x64 Visual Studio 2008 (cmake)
- Visual Studio 2010 w/ Intel Fortran 14 (cmake)
- Visual Studio 2012 w/ Intel Fortran 15 (cmake)
+ Windows 7 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
Visual Studio 2013 w/ Intel Fortran 15 (cmake)
- Visual Studio 2015 (cmake)
+ Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+ Visual Studio 2015 w/ Intel Parallel Studio 2017 (cmake)
- Windows 8.1 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
- Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Windows 10 Visual Studio 2015 w/ Intel Fortran 16 (cmake)
- Windows 8.1 x64 Visual Studio 2012 w/ Intel Fortran 15 (cmake)
- Visual Studio 2013 w/ Intel Fortran 15 (cmake)
+ Windows 10 x64 Visual Studio 2015 w/ Intel Fortran 16 (cmake)
+
+ Mac OS X Mt. Lion 10.8.5 Apple clang/clang++ version 5.1 from Xcode 5.1
+ 64-bit gfortran GNU Fortran (GCC) 4.8.2
+ (swallow/kite) Intel icc/icpc/ifort version 15.0.3
+
+ Mac OS X Mavericks 10.9.5 Apple clang/clang++ version 6.0 from Xcode 6.2
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (wren/quail) Intel icc/icpc/ifort version 15.0.3
- Mac OS X Lion 10.7.3 gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.2.1
- 32- and 64-bit g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.2.1
- (duck) gfortran GNU Fortran (GCC) 4.6.2
+ Mac OS X Yosemite 10.10.5 Apple clang/clang++ version 6.1 from Xcode 7.0
+ 64-bit gfortran GNU Fortran (GCC) 4.9.2
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 15.0.3
- Mac OS X Mountain Lion 10.8.1 cc Apple clang version 4.0 from Xcode 4.5.1
- (owl) c++ Apple clang version 4.0 from Xcode 4.5.1
- gcc i686-apple-darwin11-llvm-gcc-4.2 (GCC) 4.2.1 from Xcode 4.5.1
- g++ i686-apple-darwin11-llvm-g++-4.2 (GCC) 4.2.1 from Xcode 4.5.1
- gfortran GNU Fortran (GCC) 4.6.2
+ Mac OS X El Capitan 10.11.6 Apple clang/clang++ version 7.3.0 from Xcode 7.3
+ 64-bit gfortran GNU Fortran (GCC) 5.2.0
+ (osx1010dev/osx1010test) Intel icc/icpc/ifort version 16.0.2
Tested Configuration Features Summary
@@ -1393,25 +229,21 @@ Platform C F90/ F90 C++ zlib SZIP
Solaris2.11 32-bit n y/y n y y y
Solaris2.11 64-bit n y/n n y y y
Windows 7 y y/y n y y y
-Windows 7 x64 y y/y n y y y
+Windows 7 x64 y y/y y y y y
Windows 7 Cygwin n y/n n y y y
Windows 7 x64 Cygwin n y/n n y y y
-Windows 8 y y/y n y y y
-Windows 8 x64 y y/y n y y y
-Mac OS X Lion 10.7.3 32-bit n y/y n y y n
-Mac OS X Lion 10.7.3 64-bit n y/y n y y y
-Mac OS X Mountain Lion 10.8.1 64-bit n y/y n y y y
-Mac OS X Mavericks 10.9.1 64-bit n y/y n y y ?
-AIX 6.1 32- and 64-bit n y/n n y y y
-CentOS 5.9 Linux 2.6.18-308 i686 GNU y y/y y y y y
-CentOS 5.9 Linux 2.6.18-308 i686 Intel n y/y n y y y
-CentOS 5.9 Linux 2.6.18-308 i686 PGI n y/y n y y y
-CentOS 5.9 Linux 2.6.18 x86_64 GNU n y/y n y y y
-CentOS 5.9 Linux 2.6.18 x86_64 Intel n y/y n y y y
-CentOS 6.4 Linux 2.6.32 x86_64 GNU y y/y y y y y
-CentOS 6.4 Linux 2.6.32 x86_64 Intel n y/y n y y y
-CentOS 6.4 Linux 2.6.32 x86_64 PGI n y/y n y y y
-Linux 2.6.32-431.11.2.el6.ppc64 n y/n n y y y
+Windows 10 y y/y n y y y
+Windows 10 x64 y y/y n y y y
+Mac OS X Mountain Lion 10.8.5 64-bit n y/y n y y y
+Mac OS X Mavericks 10.9.5 64-bit n y/y n y y ?
+Mac OS X Yosemite 10.10.5 64-bit n y/y n y y ?
+Mac OS X El Capitan 10.11.6 64-bit n y/y n y y ?
+CentOS 6.7 Linux 2.6.18 x86_64 GNU n y/y n y y y
+CentOS 6.7 Linux 2.6.18 x86_64 Intel n y/y n y y y
+CentOS 6.7 Linux 2.6.32 x86_64 PGI n y/y n y y y
+CentOS 7.2 Linux 2.6.32 x86_64 GNU y y/y y y y y
+CentOS 7.2 Linux 2.6.32 x86_64 Intel n y/y n y y y
+Linux 2.6.32-573.18.1.el6.ppc64 n y/n n y y y
Platform Shared Shared Shared Thread-
@@ -1422,22 +254,18 @@ Windows 7 y y y y
Windows 7 x64 y y y y
Windows 7 Cygwin n n n y
Windows 7 x64 Cygwin n n n y
-Windows 8 y y y y
-Windows 8 x64 y y y y
-Mac OS X Lion 10.7.3 32-bit y n y y
-Mac OS X Lion 10.7.3 64-bit y n y y
-Mac OS X Mountain Lion 10.8.1 64-bit y n y y
-Mac OS X Mavericks 10.9.1 64-bit y n y y
-AIX 6.1 32- and 64-bit y n n y
-CentOS 5.9 Linux 2.6.18-308 i686 GNU y y y y
-CentOS 5.9 Linux 2.6.18-308 i686 Intel y y y n
-CentOS 5.9 Linux 2.6.18-308 i686 PGI y y y n
-CentOS 5.9 Linux 2.6.18 x86_64 GNU y y y y
-CentOS 5.9 Linux 2.6.18 x86_64 Intel y y y n
-CentOS 6.4 Linux 2.6.32 x86_64 GNU y y y n
-CentOS 6.4 Linux 2.6.32 x86_64 Intel y y y n
-CentOS 6.4 Linux 2.6.32 x86_64 PGI y y y n
-Linux 2.6.32-431.11.2.el6.ppc64 y y y n
+Windows 10 y y y y
+Windows 10 x64 y y y y
+Mac OS X Mountain Lion 10.8.5 64-bit y n y y
+Mac OS X Mavericks 10.9.5 64-bit y n y y
+Mac OS X Yosemite 10.10.5 64-bit y n y y
+Mac OS X El Capitan 10.11.6 64-bit y n y y
+CentOS 6.7 Linux 2.6.18 x86_64 GNU y y y y
+CentOS 6.7 Linux 2.6.18 x86_64 Intel y y y n
+CentOS 6.7 Linux 2.6.32 x86_64 PGI y y y n
+CentOS 7.2 Linux 2.6.32 x86_64 GNU y y y n
+CentOS 7.2 Linux 2.6.32 x86_64 Intel y y y n
+Linux 2.6.32-573.18.1.el6.ppc64 y y y n
Compiler versions for each platform are listed in the preceding
"Supported Platforms" table.
@@ -1447,315 +275,58 @@ More Tested Platforms
=====================
The following platforms are not supported but have been tested for this release.
- Linux 2.6.18-308.13.1.el5PAE MPICH mpich 3.1.2 compiled with
- #1 SMP i686 i686 i386 gcc 4.9.1 and gfortran 4.9.1
- (jam) g95 (GCC 4.0.3 (g95 0.94!)
-
- Linux 2.6.18-431.11.2.el6 MPICH mpich 3.1.2 compiled with
- #1 SMP x86_64 GNU/Linux gcc 4.9.1 and gfortran 4.9.1
- (platypus) g95 (GCC 4.0.3 (g95 0.94!)
-
- FreeBSD 8.2-STABLE i386 gcc 4.2.1 [FreeBSD] 20070719
- (loyalty) gcc 4.6.1 20110422
- g++ 4.6.1 20110422
- gfortran 4.6.1 20110422
-
- FreeBSD 8.2-STABLE amd64 gcc 4.2.1 [FreeBSD] 20070719
- (freedom) gcc 4.6.1 20110422
- g++ 4.6.1 20110422
- gfortran 4.6.1 20110422
-
- Debian7.5.0 3.2.0-4-686 #1 SMP Debian 3.2.51-1 i686 GNU/Linux
- gcc (Debian 4.7.2-5) 4.7.2
- GNU Fortran (Debian 4.7.2-5) 4.7.2
- (cmake and autotools)
-
- Debian7.5.0 3.2.0-4-amd64 #1 SMP Debian 3.2.51-1 x86_64 GNU/Linux
- gcc (Debian 4.7.2-5) 4.7.2
- GNU Fortran (Debian 4.7.2-5) 4.7.2
- (cmake and autotools)
-
- Fedora20 3.15.3-200.fc20.i6866 #1 SMP i686 i686 i386 GNU/Linux
- gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
- GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
- (cmake and autotools)
-
- Fedora20 3.15.3-200.fc20.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
- gcc (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
- GNU Fortran (GCC) 4.8.3 20140624 (Red Hat 4.8.3-1)
- (cmake and autotools)
-
- SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT i686 athlon i386 GNU/Linux
- gcc (SUSE Linux) 4.8.1
- GNU Fortran (SUSE Linux) 4.8.1
- (cmake and autotools)
-
- SUSE 13.1 3.11.10-17-desktop #1 SMP PREEMPT x86_64 x86_64 x86_64 GNU/Linux
- gcc (SUSE Linux) 4.8.1
- GNU Fortran (SUSE Linux) 4.8.1
+ Linux 2.6.32-573.22.1.el6 GNU C (gcc), Fortran (gfortran), C++ (g++)
+ #1 SMP x86_64 GNU/Linux compilers:
+ (mayll/platypus) Version 4.4.7 20120313
+ Version 4.8.4
+ PGI C, Fortran, C++ for 64-bit target on
+ x86-64;
+ Version 16.10-0
+ Intel(R) C (icc), C++ (icpc), Fortran (icc)
+ compilers:
+ Version 15.0.3.187 (Build 20150407)
+ MPICH 3.1.4 compiled with GCC 4.9.3
+
+ Linux 3.10.0-327.18.2.el7 GNU C (gcc) and C++ (g++) compilers
+ #1 SMP x86_64 GNU/Linux Version 4.8.5 20150623 (Red Hat 4.8.5-4)
+ (jelly) with NAG Fortran Compiler Release 6.1(Tozai)
+ Intel(R) C (icc) and C++ (icpc) compilers
+ Version 15.0.3.187 (Build 20150407)
+ with NAG Fortran Compiler Release 6.1(Tozai)
+
+ Linux 2.6.32-573.18.1.el6.ppc64 MPICH mpich 3.1.4 compiled with
+ #1 SMP ppc64 GNU/Linux IBM XL C/C++ for Linux, V13.1
+ (ostrich) and IBM XL Fortran for Linux, V15.1
+
+ Debian 8.4 3.16.0-4-amd64 #1 SMP Debian 3.16.36-1 x86_64 GNU/Linux
+ gcc, g++ (Debian 4.9.2-10) 4.9.2
+ GNU Fortran (Debian 4.9.2-10) 4.9.2
(cmake and autotools)
- Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP i686 GNU/Linux
- gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
- GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ Fedora 24 4.7.2-201.fc24.x86_64 #1 SMP x86_64 x86_64 x86_64 GNU/Linux
+ gcc, g++ (GCC) 6.1.1 20160621
+ (Red Hat 6.1.1-3)
+ GNU Fortran (GCC) 6.1.1 20160621
+ (Red Hat 6.1.1-3)
(cmake and autotools)
- Ubuntu 14.04 3.13.0-35-generic #62-Ubuntu SMP x86_64 GNU/Linux
- gcc (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
- GNU Fortran (Ubuntu/Linaro 4.9.1-0ubuntu1) 4.9.1
+ Ubuntu 16.04.1 4.4.0-38-generic #57-Ubuntu SMP x86_64 GNU/Linux
+ gcc, g++ (Ubuntu 5.4.0-6ubuntu1~16.04.2)
+ 5.4.0 20160609
+ GNU Fortran (Ubuntu 5.4.0-6ubuntu1~16.04.2)
+ 5.4.0 20160609
(cmake and autotools)
- Cray Linux Environment (CLE) PrgEnv-pgi/4.0.46
- hopper.nersc.gov pgcc 12.5-0 64-bit target on x86-64 Linux -tp shanghai
- pgf90 12.5-0 64-bit target on x86-64 Linux -tp shanghai
- pgCC 12.5-0 64-bit target on x86-64 Linux -tp shanghai
-
Known Problems
==============
-* "make check" fails on CYGWIN when building shared lib files is enabled. The
- default on Cygwin has been changed to disable shared. It can be enabled with
- the --enable-shared configure option but is likely to fail "make check"
- with GCC compilers. (LK -2015/04/16)
-
-* CLANG compiler with the options -fcatch-undefined-behavior and -ftrapv
- catches some undefined behavior in the alignment algorithm of the macro DETECT_I
- in H5detect.c (Issue 8147). Since the algorithm is trying to detect the alignment
- of integers, ideally the flag -fcatch-undefined-behavior shouldn't to be used for
- H5detect.c. In the future, we can separate flags for H5detect.c from the rest of
- the library. (SLU - 2013/10/16)
-
-* The 5.9 C++ compiler on Sun failed to compile a C++ test ttypes.cpp. It
- complains with this message:
- "/home/hdf5/src/H5Vprivate.h", line 130: Error: __func__ is not defined.
-
- The reason is that __func__ is a predefined identifier in C99 standard. The
- HDF5 C library uses it in H5private.h. The test ttypes.cpp includes
- H5private.h (H5Tpkg.h<-H5Fprivate.h<-H5Vprivate.h<-H5private.h). Sun's 5.9
- C++ compiler doesn't support __func__, thus fails to compile the C++ test.
- But 5.11 C++ compiler does. To check whether your Sun C++ compiler knows this
- identifier, try to compile the following simple C++ program:
- #include<stdio.h>
-
- int main(void)
- {
- printf("%s\n", __func__);
- return 0;
- }
- (SLU - 2012/11/5)
-
-* The C++ and FORTRAN bindings are not currently working on FreeBSD with the
- native release 8.2 compilers (4.2.1), but are working with gcc 4.6 from the
- ports (and probably gcc releases after that).
- (QAK - 2012/10/19)
-
-* The data conversion test dt_arith.c has failures (segmentation fault) from
- "long double" to other datatypes during hard conversion when the library
- is built with the default GCC 4.2.1 on Mac Lion system. It only happens
- with optimization (-O3, -O2, and -O1). Some newer versions of GCC do not
- have this problem. Users should disable optimization or try newer version
- of GCC. (Issue 8017. SLU - 2012/6/12)
-
-* The data conversion test dt_arith.c fails in "long double" to integer
- conversion on Ubuntu 11.10 (3.0.0.13 kernal) with GCC 4.6.1 if the library
- is built with optimization -O3 or -O2. The older GCC (4.5) or newer kernal
- (3.2.2 on Fedora) doesn't have the problem. Users should lower down the
- optimization level (-O1 or -O0) by defining CFLAGS in the command line of
- "configure" like:
-
- CFLAGS=-O1 ./configure
-
- It will overwrite the library's default optimization level. (Issue 7829.
- SLU - 2012/2/7)
-
-* --with-mpe configure option does not work with Mpich2. AKC - 2011/03/10)
-
-* While working on the 1.8.6 release of HDF5, a bug was discovered that can
- occur when reading from a dataset in parallel shortly after it has been
- written to collectively. The issue was exposed by a new test in the parallel
- HDF5 test suite, but had existed before that. We believe the problem lies with
- certain MPI implementations and/or filesystems.
-
- We have provided a pure MPI test program, as well as a standalone HDF5
- program, that can be used to determine if this is an issue on your system.
- They should be run across multiple nodes with a varying number of processes.
- These programs can be found at:
- http://www.hdfgroup.org/ftp/HDF5/examples/known_problems/
-
-* Parallel mode in AIX will fail some of the testcheck_version.sh tests where
- it treats "exit(134) the same as if process 0 had received an abort signal.
- This is fixed and will be available in the next release. AKC - 2009/11/3
-
-* The PathScale MPI implementation, accessing a Panasas file system, would
- cause H5Fcreate() with H5F_ACC_EXCL to fail even when the file is not
- existing. This is due to the MPI_File_open() call failing if the amode has
- the MPI_MODE_EXCL bit set. (See bug 1468 for details.) AKC - 2009/8/11
-
-* Parallel tests failed with 16 processes with data inconsistency at testphdf5
- / dataset_readAll. Parallel tests also failed with 32 and 64 processes with
- collective abort of all ranks at t_posix_compliant / allwrite_allread_blocks
- with MPI IO. (CMC - 2009/04/28)
-
-* For SNL, spirit/liberty/thunderbird: The serial tests pass but parallel
- tests failed with MPI-IO file locking message. AKC - 2007/6/25.
-* On Intel 64 Linux cluster (RH 4, Linux 2.6.9) with Intel 10.0 compilers use
- -mp -O1 compilation flags to build the libraries. Higher level of optimization
- causes failures in several HDF5 library tests.
-* For HPUX 11.23 many tools tests failed for 64-bit version when linked to the
- shared libraries (tested for 1.8.0-beta2)
-* For SNL, Red Storm: only paralle HDF5 is supported. The serial tests pass
- and the parallel tests also pass with lots of non-fatal error messages.
-* on SUN 5.10 C++ test fails in the "Testing Shared Datatypes with Attributes" test
-* configuring with --enable-debug=all produces compiler errors on most
- platforms. Users who want to run HDF5 in debug mode should use
- --enable-debug rather than --enable-debug=all to enable debugging
- information on most modules.
-* On Mac OS 10.4, test/dt_arith.c has some errors in conversion from long
- double to (unsigned) long long and from (unsigned)long long to long double.
-* On Altix SGI with Intel 9.0 testmeta.c would not compile with -O3
- optimization flag.
-* On VAX, Scaleoffset filter isn't supported. The filter cannot be applied to
- HDF5 data generated on VAX. Scaleoffset filter only supports IEEE standard
- for floating-point data.
-* On Cray X1, a lone colon on the command line of h5dump --xml (as in
- the testh5dumpxml.sh script) is misinterpereted by the operating system
- and causes an error.
-* On mpich 1.2.5 and 1.2.6, we found that if more than two processes
- contribute no IO and the application asks to do IO with collective, we found
- that when using 4 processors, a simple collective write will be hung
- sometimes. This can be verified with t_mpi test under testpar.
-* The dataset created or rewritten with the v1.6.3 library or after can't
- be read with the v1.6.2 library or before when Fletcher32 EDC(filter) is
- enabled. There was a bug in the calculating code of the Fletcher32
- checksum in the library before v1.6.3. The checksum value wasn't consistent
- between big-endian and little-endian systems. This bug was fixed in
- Release 1.6.3. However, after fixing the bug, the checksum value is no
- longer the same as before on little-endian system. The library release
- after 1.6.4 can still read the dataset created or rewritten with the library
- of v1.6.2 or before. SLU - 2005/6/30
-* For the version 6(6.02 and 6.04) of Portland Group compiler on AMD Opteron
- processor, there's a bug in the compiler for optimization(-O2). The library
- failed in several tests but all related to multi driver. The problem has
- been reported to the vendor.
-* On IBM AIX systems, parallel HDF5 mode will fail some tests with error
- messages like "INFO: 0031-XXX ...". This is from the command poe.
- Set the environment variable MP_INFOLEVEL to 0 to minimize the messages
- and run the tests again.
- The tests may fail with messages like "The socket name is already
- in use". HDF5 does not use sockets (except for stream-VFD). This is
- due to problems of the poe command trying to set up the debug socket.
- Check if there are many old /tmp/s.pedb.* staying around. These are
- sockets used by the poe command and left behind due to failed commands.
- Ask your system administrator to clean them out. Lastly, request IBM
- to provide a mean to run poe without the debug socket.
-
-* The C++ library's tests fails when compiling with PGI C++ compiler. The
- workaround until the problem is correctly handled is to use the
- flag "--instantiate=local" prior to the configure and build steps, as:
- setenv CXX "pgCC --instantiate=local" for pgCC 5.02 and higher
-
-
-* The stream-vfd test uses ip port 10007 for testing. If another
- application is already using that port address, the test will hang
- indefinitely and has to be terminated by the kill command. To try the
- test again, change the port address in test/stream_test.c to one not
- being used in the host.
-
-* The --enable-static-exec configure flag will only statically link libraries
- if the static version of that library is present. If only the shared version
- of a library exists (i.e., most system libraries on Solaris, AIX, and Mac,
- for example, only have shared versions), the flag should still result in a
- successful compilation, but note that the installed executables will not be
- fully static. Thus, the only guarantee on these systems is that the
- executable is statically linked with just the HDF5 library.
-
-* With the gcc 2.95.2 compiler, HDF 5 uses the `-ansi' flag during
- compilation. The ANSI version of the compiler complains about not being
- able to handle the `long long' datatype with the warning:
-
- warning: ANSI C does not support `long long'
-
- This warning is innocuous and can be safely ignored.
-
-* Certain platforms give false negatives when testing h5ls:
- - Cray J90 and Cray T90IEEE give errors during testing when displaying
- some floating-point values. These are benign differences due to
- the different precision in the values displayed and h5ls appears to
- be dumping floating-point numbers correctly.
-
-* Not all platforms behave correctly with szip's shared libraries. Szip is
- disabled in these cases, and a message is relayed at configure time. Static
- libraries should be working on all systems that support szip, and should be
- used when shared libraries are unavailable. There is also a configure error
- on Altix machines that incorrectly reports when a version of szip without
- an encoder is being used.
-
-* On some platforms that use Intel and Absoft compilers to build HDF5 fortran library,
- compilation may fail for fortranlib_test.f90, fflush1.f90 and fflush2.f90
- complaining about exit subroutine. Comment out the line
- IF (total_error .ne. 0) CALL exit (total_error)
-
-* Information about building with PGI and Intel compilers is available in
- INSTALL file sections 5.7 and 5.8
-
-* On at least one system, (SDSC DataStar), the scheduler (in this case
- LoadLeveler) sends job status updates to standard error when you run
- any executable that was compiled with the parallel compilers.
-
- This causes problems when running "make check" on parallel builds, as
- many of the tool tests function by saving the output from test runs,
- and comparing it to an exemplar.
-
- The best solution is to reconfigure the target system so it no longer
- inserts the extra text. However, this may not be practical.
-
- In such cases, one solution is to "setenv HDF5_Make_Ignore yes" prior to
- the configure and build. This will cause "make check" to continue after
- detecting errors in the tool tests. However, in the case of SDSC DataStar,
- it also leaves you with some 150 "failed" tests to examine by hand.
-
- A second solution is to write a script to run serial tests and filter
- out the text added by the scheduler. A sample script used on SDSC
- DataStar is given below, but you will probably have to customize it
- for your installation.
-
- Observe that the basic idea is to insert the script as the first item
- on the command line which executes the the test. The script then
- executes the test and filters out the offending text before passing
- it on.
-
- #!/bin/csh
-
- set STDOUT_FILE=~/bin/serial_filter.stdout
- set STDERR_FILE=~/bin/serial_filter.stderr
-
- rm -f $STDOUT_FILE $STDERR_FILE
-
- ($* > $STDOUT_FILE) >& $STDERR_FILE
-
- set RETURN_VALUE=$status
-
- cat $STDOUT_FILE
-
- tail +3 $STDERR_FILE
-
- exit $RETURN_VALUE
-
- You get the HDF make files and test scipts to execute your filter script
- by setting the environment variable "RUNSERIAL" to the full path of the
- script prior to running configure for parallel builds. Remember to
- "unsetenv RUNSERIAL" before running configure for a serial build.
-
- Note that the RUNSERIAL environment variable exists so that we can
- can prefix serial runs as necessary on the target system. On DataStar,
- no prefix is necessary. However on an MPICH system, the prefix might
- have to be set to something like "/usr/local/mpi/bin/mpirun -np 1" to
- get the serial tests to run at all.
- In such cases, you will have to include the regular prefix in your
- filter script.
+ At present, metadata cache images may not be generated by parallel
+ applications. Parallel applications can read files with metadata cache
+ images, but since this is a collective operation, a deadlock is possible
+ if one or more processes do not participate.
-* H5Ocopy() does not copy reg_ref attributes correctly when shared-message
- is turn on. The value of the reference in the destination attriubte is
- wrong. This H5Ocopy problem will affect h5copy tool
+ Known problems in previous releases can be found in the HISTORY*.txt files
+ in the HDF5 source. Please report any new problems found to
+ help@hdfgroup.org.
diff --git a/release_docs/USING_CMake_Examples.txt b/release_docs/USING_CMake_Examples.txt
index d3618e9..f188ab3 100644
--- a/release_docs/USING_CMake_Examples.txt
+++ b/release_docs/USING_CMake_Examples.txt
@@ -22,7 +22,7 @@ I. Preconditions
1. We suggest you obtain the latest CMake for windows from the Kitware
web site. The HDF5 1.10.x product requires a minimum CMake version
- of 3.1.0.
+ of 3.2.2.
2. You have installed the HDF5 library built with CMake, by executing
the HDF Install Utility (the *.msi file in the binary package for
@@ -52,7 +52,7 @@ Default installation process:
with the CTEST_CONFIGURATION_TYPE script option. Note that this must
be the same as the value used with the -C command line option.
The default build configuration is defined to build and use static libraries.
- Shared libraries can be used with the STATIC_LIBRARIES script option set to "NO".
+ Shared libraries can be used with the STATIC_ONLY script option set to "NO".
Other options can be changed by editing the HDF5_Examples.cmake file.
If the defaults are okay, execute from this directory:
diff --git a/release_docs/USING_HDF5_CMake.txt b/release_docs/USING_HDF5_CMake.txt
index 313af83..ecf972d 100644
--- a/release_docs/USING_HDF5_CMake.txt
+++ b/release_docs/USING_HDF5_CMake.txt
@@ -37,7 +37,7 @@ I. Preconditions
1. We suggest you obtain the latest CMake for windows from the Kitware
web site. The HDF5 1.10.x product requires a minimum CMake version
- of 3.1.0.
+ of 3.2.2.
2. You have installed the HDF5 library built with CMake, by executing
the HDF Install Utility (the *.msi file in the binary package for
@@ -180,7 +180,7 @@ Given the preconditions in section I, create a CMakeLists.txt file at the
source root. Include the following text in the file:
##########################################################
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
project (HDF5MyApp C CXX)
set (LIB_TYPE STATIC) # or SHARED
@@ -221,103 +221,105 @@ NOTE: this file is available at the HDF web site:
HDF5_Examples.cmake
-
+Also available at the HDF web site is a CMake application framework template.
+You can quickly add files to the framework and execute the script to compile
+your application with an installed HDF5 binary.
========================================================================
ctest
========================================================================
-cmake_minimum_required(VERSION 3.1.0 FATAL_ERROR)
+cmake_minimum_required(VERSION 3.2.2 FATAL_ERROR)
###############################################################################################################
# This script will build and run the examples from a folder
# Execute from a command line:
# ctest -S HDF5_Examples.cmake,OPTION=VALUE -C Release -V -O test.log
###############################################################################################################
-set(CTEST_CMAKE_GENERATOR "@CMAKE_GENERATOR@")
-set(CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY})
+set (CTEST_CMAKE_GENERATOR "@CMAKE_GENERATOR@")
+set (CTEST_DASHBOARD_ROOT ${CTEST_SCRIPT_DIRECTORY})
# handle input parameters to script.
#INSTALLDIR - HDF5 root folder
#CTEST_CONFIGURATION_TYPE - Release, Debug, RelWithDebInfo
#CTEST_SOURCE_NAME - name of source folder; HDF4Examples
-#STATIC_LIBRARIES - Default is YES
+#STATIC_ONLY - Default is YES
#FORTRAN_LIBRARIES - Default is NO
#JAVA_LIBRARIES - Default is NO
##NO_MAC_FORTRAN - set to TRUE to allow shared libs on a Mac)
-if(DEFINED CTEST_SCRIPT_ARG)
+if (DEFINED CTEST_SCRIPT_ARG)
# transform ctest script arguments of the form
# script.ctest,var1=value1,var2=value2
# to variables with the respective names set to the respective values
- string(REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
- foreach(current_var ${script_args})
+ string (REPLACE "," ";" script_args "${CTEST_SCRIPT_ARG}")
+ foreach (current_var ${script_args})
if ("${current_var}" MATCHES "^([^=]+)=(.+)$")
set("${CMAKE_MATCH_1}" "${CMAKE_MATCH_2}")
- endif()
- endforeach()
-endif()
-if(NOT DEFINED INSTALLDIR)
- set(INSTALLDIR "@CMAKE_INSTALL_PREFIX@")
-endif()
-if(NOT DEFINED CTEST_CONFIGURATION_TYPE)
- set(CTEST_CONFIGURATION_TYPE "Release")
-endif()
-if(NOT DEFINED CTEST_SOURCE_NAME)
- set(CTEST_SOURCE_NAME "HDF5Examples")
-endif()
-if(NOT DEFINED STATIC_LIBRARIES)
- set(STATICLIBRARIES "YES")
-else(NOT DEFINED STATIC_LIBRARIES)
- set(STATICLIBRARIES "NO")
-endif()
-if(NOT DEFINED FORTRAN_LIBRARIES)
- set(FORTRANLIBRARIES "NO")
-else(NOT DEFINED FORTRAN_LIBRARIES)
- set(FORTRANLIBRARIES "YES")
-endif()
-if(NOT DEFINED JAVA_LIBRARIES)
- set(JAVALIBRARIES "NO")
-else(NOT DEFINED JAVA_LIBRARIES)
- set(JAVALIBRARIES "YES")
-endif()
+ endif ()
+ endforeach ()
+endif ()
+if (NOT DEFINED INSTALLDIR)
+ set (INSTALLDIR "@CMAKE_INSTALL_PREFIX@")
+endif ()
+if (NOT DEFINED CTEST_CONFIGURATION_TYPE)
+ set (CTEST_CONFIGURATION_TYPE "Release")
+endif ()
+if (NOT DEFINED CTEST_SOURCE_NAME)
+ set (CTEST_SOURCE_NAME "HDF5Examples")
+endif ()
+if (NOT DEFINED STATIC_ONLY)
+ set (STATICONLYLIBRARIES "YES")
+else ()
+ set (STATICONLYLIBRARIES "NO")
+endif ()
+if (NOT DEFINED FORTRAN_LIBRARIES)
+ set (FORTRANLIBRARIES "NO")
+else ()
+ set (FORTRANLIBRARIES "YES")
+endif ()
+if (NOT DEFINED JAVA_LIBRARIES)
+ set (JAVALIBRARIES "NO")
+else ()
+ set (JAVALIBRARIES "YES")
+endif ()
#TAR_SOURCE - name of tarfile
-#if(NOT DEFINED TAR_SOURCE)
-# set(CTEST_USE_TAR_SOURCE "HDF5Examples-1.10.1-Source")
-#endif()
+#if (NOT DEFINED TAR_SOURCE)
+# set (CTEST_USE_TAR_SOURCE "HDF5Examples-1.10.1-Source")
+#endif ()
###############################################################################################################
# Adjust the following SET Commands as needed
###############################################################################################################
-if(WIN32)
- if(${STATICLIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
- endif()
- set(ENV{HDF5_DIR} "${INSTALLDIR}/cmake")
- set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}\\build)
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
-else(WIN32)
- if(${STATICLIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
- endif()
- set(ENV{HDF5_DIR} "${INSTALLDIR}/share/cmake")
- set(ENV{LD_LIBRARY_PATH} "${INSTALLDIR}/lib")
- set(CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}/build)
- set(CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
- set(CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
+if (WIN32)
+ if (${STATICONLYLIBRARIES})
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF")
+ endif ()
+ set (ENV{HDF5_DIR} "${INSTALLDIR}/cmake")
+ set (CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}\\build)
+ set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_SOURCE_NAME}")
+ set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}\\${CTEST_BINARY_NAME}")
+else (WIN32)
+ if (${STATICONLYLIBRARIES})
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
+ endif ()
+ set (ENV{HDF5_DIR} "${INSTALLDIR}/share/cmake")
+ set (ENV{LD_LIBRARY_PATH} "${INSTALLDIR}/lib")
+ set (CTEST_BINARY_NAME ${CTEST_SOURCE_NAME}/build)
+ set (CTEST_SOURCE_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_SOURCE_NAME}")
+ set (CTEST_BINARY_DIRECTORY "${CTEST_DASHBOARD_ROOT}/${CTEST_BINARY_NAME}")
endif(WIN32)
-if(${FORTRANLIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=ON")
-else()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=OFF")
-endif()
-if(${JAVALIBRARIES})
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=ON")
-else()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=OFF")
-endif()
-set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@")
+if (${FORTRANLIBRARIES})
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=ON")
+else ()
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=OFF")
+endif ()
+if (${JAVALIBRARIES})
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=ON")
+else ()
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_JAVA:BOOL=OFF")
+endif ()
+set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HDF_PACKAGE_EXT@")
###############################################################################################################
# For any comments please contact cdashhelp@hdfgroup.org
@@ -327,90 +329,90 @@ set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF5_PACKAGE_NAME:STRING=@HDF5_PACKAGE@@HD
#-----------------------------------------------------------------------------
# MAC machines need special option
#-----------------------------------------------------------------------------
-if(APPLE)
+if (APPLE)
# Compiler choice
- execute_process(COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
- execute_process(COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
- set(ENV{CC} "${XCODE_CC}")
- set(ENV{CXX} "${XCODE_CXX}")
- if(NOT NO_MAC_FORTRAN)
+ execute_process (COMMAND xcrun --find cc OUTPUT_VARIABLE XCODE_CC OUTPUT_STRIP_TRAILING_WHITESPACE)
+ execute_process (COMMAND xcrun --find c++ OUTPUT_VARIABLE XCODE_CXX OUTPUT_STRIP_TRAILING_WHITESPACE)
+ set (ENV{CC} "${XCODE_CC}")
+ set (ENV{CXX} "${XCODE_CXX}")
+ if (NOT NO_MAC_FORTRAN)
# Shared fortran is not supported, build static
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
- else()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=OFF")
- endif()
- set(BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=OFF")
-endif()
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DBUILD_SHARED_LIBS:BOOL=OFF -DCMAKE_ANSI_CFLAGS:STRING=-fPIC")
+ else ()
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DHDF_BUILD_FORTRAN:BOOL=OFF")
+ endif ()
+ set (BUILD_OPTIONS "${BUILD_OPTIONS} -DCTEST_USE_LAUNCHERS:BOOL=ON -DCMAKE_BUILD_WITH_INSTALL_RPATH:BOOL=OFF")
+endif ()
#-----------------------------------------------------------------------------
-set(CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
+set (CTEST_CMAKE_COMMAND "\"${CMAKE_COMMAND}\"")
## --------------------------
-if(CTEST_USE_TAR_SOURCE)
+if (CTEST_USE_TAR_SOURCE)
## Uncompress source if tar or zip file provided
## --------------------------
- if(WIN32)
- message(STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.zip]")
- execute_process(COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip RESULT_VARIABLE rv)
- else()
- message(STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.tar]")
- execute_process(COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar RESULT_VARIABLE rv)
- endif()
-
- if(NOT rv EQUAL 0)
- message(STATUS "extracting... [error-(${rv}) clean up]")
- file(REMOVE_RECURSE "${CTEST_SOURCE_DIRECTORY}")
- message(FATAL_ERROR "error: extract of ${CTEST_SOURCE_NAME} failed")
- endif()
-endif(CTEST_USE_TAR_SOURCE)
+ if (WIN32)
+ message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.zip]")
+ execute_process (COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}\\${CTEST_USE_TAR_SOURCE}.zip RESULT_VARIABLE rv)
+ else ()
+ message (STATUS "extracting... [${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_USE_TAR_SOURCE}.tar]")
+ execute_process (COMMAND ${CMAKE_EXECUTABLE_NAME} -E tar -xvf ${CTEST_DASHBOARD_ROOT}/${CTEST_USE_TAR_SOURCE}.tar RESULT_VARIABLE rv)
+ endif ()
+
+ if (NOT rv EQUAL 0)
+ message (STATUS "extracting... [error-(${rv}) clean up]")
+ file (REMOVE_RECURSE "${CTEST_SOURCE_DIRECTORY}")
+ message (FATAL_ERROR "error: extract of ${CTEST_SOURCE_NAME} failed")
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
## Clear the build directory
## --------------------------
-set(CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE)
+set (CTEST_START_WITH_EMPTY_BINARY_DIRECTORY TRUE)
if (EXISTS "${CTEST_BINARY_DIRECTORY}" AND IS_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
- ctest_empty_binary_directory(${CTEST_BINARY_DIRECTORY})
+ ctest_empty_binary_directory (${CTEST_BINARY_DIRECTORY})
else ()
- file(MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
+ file (MAKE_DIRECTORY "${CTEST_BINARY_DIRECTORY}")
endif ()
# Use multiple CPU cores to build
-include(ProcessorCount)
-ProcessorCount(N)
-if(NOT N EQUAL 0)
- if(NOT WIN32)
- set(CTEST_BUILD_FLAGS -j${N})
- endif()
- set(ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
-endif()
+include (ProcessorCount)
+ProcessorCount (N)
+if (NOT N EQUAL 0)
+ if (NOT WIN32)
+ set (CTEST_BUILD_FLAGS -j${N})
+ endif ()
+ set (ctest_test_args ${ctest_test_args} PARALLEL_LEVEL ${N})
+endif ()
set (CTEST_CONFIGURE_COMMAND
"${CTEST_CMAKE_COMMAND} -C \"${CTEST_SOURCE_DIRECTORY}/config/cmake/cacheinit.cmake\" -DCMAKE_BUILD_TYPE:STRING=${CTEST_CONFIGURATION_TYPE} ${BUILD_OPTIONS} \"-G${CTEST_CMAKE_GENERATOR}\" \"${CTEST_SOURCE_DIRECTORY}\""
)
#-----------------------------------------------------------------------------
## -- set output to english
-set($ENV{LC_MESSAGES} "en_EN")
+set ($ENV{LC_MESSAGES} "en_EN")
#-----------------------------------------------------------------------------
-configure_file(${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
+configure_file (${CTEST_SOURCE_DIRECTORY}/config/cmake/CTestCustom.cmake ${CTEST_BINARY_DIRECTORY}/CTestCustom.cmake)
ctest_read_custom_files ("${CTEST_BINARY_DIRECTORY}")
## NORMAL process
## --------------------------
ctest_start (Experimental)
ctest_configure (BUILD "${CTEST_BINARY_DIRECTORY}")
-if(LOCAL_SUBMIT)
+if (LOCAL_SUBMIT)
ctest_submit (PARTS Configure Notes)
-endif()
+endif ()
ctest_build (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND)
-if(LOCAL_SUBMIT)
+if (LOCAL_SUBMIT)
ctest_submit (PARTS Build)
-endif()
+endif ()
ctest_test (BUILD "${CTEST_BINARY_DIRECTORY}" APPEND ${ctest_test_args} RETURN_VALUE res)
-if(LOCAL_SUBMIT)
+if (LOCAL_SUBMIT)
ctest_submit (PARTS Test)
-endif()
-if(res GREATER 0)
+endif ()
+if (res GREATER 0)
message (FATAL_ERROR "tests FAILED")
-endif()
+endif ()
#-----------------------------------------------------------------------------
##############################################################################################################
diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt
index 55de5ea..178c954 100644
--- a/src/CMakeLists.txt
+++ b/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_SRC C CXX)
#-----------------------------------------------------------------------------
@@ -90,8 +90,10 @@ set (H5C_SOURCES
${HDF5_SRC_DIR}/H5C.c
${HDF5_SRC_DIR}/H5Cdbg.c
${HDF5_SRC_DIR}/H5Cepoch.c
+ ${HDF5_SRC_DIR}/H5Cimage.c
${HDF5_SRC_DIR}/H5Clog.c
${HDF5_SRC_DIR}/H5Cmpio.c
+ ${HDF5_SRC_DIR}/H5Cprefetched.c
${HDF5_SRC_DIR}/H5Cquery.c
${HDF5_SRC_DIR}/H5Ctag.c
${HDF5_SRC_DIR}/H5Ctest.c
@@ -193,6 +195,7 @@ set (H5F_SOURCES
${HDF5_SRC_DIR}/H5Fmpi.c
${HDF5_SRC_DIR}/H5Fquery.c
${HDF5_SRC_DIR}/H5Fsfile.c
+ ${HDF5_SRC_DIR}/H5Fspace.c
${HDF5_SRC_DIR}/H5Fsuper.c
${HDF5_SRC_DIR}/H5Fsuper_cache.c
${HDF5_SRC_DIR}/H5Ftest.c
@@ -439,6 +442,7 @@ set (H5O_SOURCES
${HDF5_SRC_DIR}/H5Obogus.c
${HDF5_SRC_DIR}/H5Obtreek.c
${HDF5_SRC_DIR}/H5Ocache.c
+ ${HDF5_SRC_DIR}/H5Ocache_image.c
${HDF5_SRC_DIR}/H5Ochunk.c
${HDF5_SRC_DIR}/H5Ocont.c
${HDF5_SRC_DIR}/H5Ocopy.c
@@ -501,12 +505,22 @@ set (H5P_HDRS
)
IDE_GENERATED_PROPERTIES ("H5P" "${H5P_HDRS}" "${H5P_SOURCES}" )
+set (H5PB_SOURCES
+ ${HDF5_SRC_DIR}/H5PB.c
+)
+
+set (H5PB_HDRS
+ ${HDF5_SRC_DIR}/H5PBpkg.h
+)
+IDE_GENERATED_PROPERTIES ("H5PB" "${H5PB_HDRS}" "${H5PB_SOURCES}" )
+
set (H5PL_SOURCES
${HDF5_SRC_DIR}/H5PL.c
)
set (H5PL_HDRS
${HDF5_SRC_DIR}/H5PLextern.h
+ ${HDF5_SRC_DIR}/H5PLpkg.h
${HDF5_SRC_DIR}/H5PLpublic.h
)
IDE_GENERATED_PROPERTIES ("H5PL" "${H5PL_HDRS}" "${H5PL_SOURCES}" )
@@ -658,7 +672,7 @@ set (H5Z_SOURCES
if (H5_ZLIB_HEADER)
SET_PROPERTY(SOURCE ${HDF5_SRC_DIR}/H5Zdeflate.c PROPERTY
COMPILE_DEFINITIONS H5_ZLIB_HEADER="${H5_ZLIB_HEADER}")
-endif (H5_ZLIB_HEADER)
+endif ()
set (H5Z_HDRS
@@ -696,6 +710,7 @@ set (common_SRCS
${H5MP_SOURCES}
${H5O_SOURCES}
${H5P_SOURCES}
+ ${H5PB_SOURCES}
${H5PL_SOURCES}
${H5R_SOURCES}
${H5UC_SOURCES}
@@ -736,6 +751,7 @@ set (H5_PUBLIC_HEADERS
${H5MP_HDRS}
${H5O_HDRS}
${H5P_HDRS}
+ ${H5PB_HDRS}
${H5PL_HDRS}
${H5R_HDRS}
${H5S_HDRS}
@@ -774,6 +790,7 @@ set (H5_PRIVATE_HEADERS
${HDF5_SRC_DIR}/H5MPprivate.h
${HDF5_SRC_DIR}/H5Oprivate.h
${HDF5_SRC_DIR}/H5Pprivate.h
+ ${HDF5_SRC_DIR}/H5PBprivate.h
${HDF5_SRC_DIR}/H5PLprivate.h
${HDF5_SRC_DIR}/H5UCprivate.h
${HDF5_SRC_DIR}/H5Rprivate.h
@@ -799,7 +816,7 @@ set (H5_GENERATED_HEADERS
${HDF5_SRC_DIR}/H5overflow.h
)
-option (HDF5_GENERATE_HEADERS "Rebuild Generated Files" OFF)
+option (HDF5_GENERATE_HEADERS "Rebuild Generated Files" ON)
if (HDF5_GENERATE_HEADERS)
set_source_files_properties(${H5_GENERATED_HEADERS} PROPERTIES GENERATED TRUE)
find_package (Perl)
@@ -816,10 +833,10 @@ if (HDF5_GENERATE_HEADERS)
COMMAND ${PERL_EXECUTABLE} ${HDF5_SOURCE_DIR}/bin/make_overflow ${HDF5_SRC_DIR}/H5overflow.txt OUTPUT_VARIABLE SCRIPT_OUTPUT
)
message(STATUS ${SCRIPT_OUTPUT})
- else (PERL_FOUND)
+ else ()
message (STATUS "Cannot generate headers - perl not found")
- endif (PERL_FOUND)
-endif (HDF5_GENERATE_HEADERS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# Setup the H5Detect utility which generates H5Tinit with platform
@@ -829,7 +846,7 @@ add_executable (H5detect ${HDF5_SRC_DIR}/H5detect.c)
TARGET_C_PROPERTIES (H5detect STATIC " " " ")
if (MSVC OR MINGW)
target_link_libraries (H5detect "ws2_32.lib")
-endif (MSVC OR MINGW)
+endif ()
set (CMD $<TARGET_FILE:H5detect>)
add_custom_command (
@@ -843,7 +860,7 @@ add_executable (H5make_libsettings ${HDF5_SRC_DIR}/H5make_libsettings.c)
TARGET_C_PROPERTIES (H5make_libsettings STATIC " " " ")
if (MSVC OR MINGW)
target_link_libraries (H5make_libsettings "ws2_32.lib")
-endif (MSVC OR MINGW)
+endif ()
set (CMD $<TARGET_FILE:H5make_libsettings>)
add_custom_command (
@@ -864,10 +881,10 @@ TARGET_C_PROPERTIES (${HDF5_LIB_TARGET} STATIC " " " ")
target_link_libraries (${HDF5_LIB_TARGET} ${LINK_LIBS})
if (NOT WIN32)
target_link_libraries (${HDF5_LIB_TARGET} dl)
-endif (NOT WIN32)
+endif ()
if (H5_HAVE_PARALLEL AND MPI_C_FOUND)
target_link_libraries (${HDF5_LIB_TARGET} ${MPI_C_LIBRARIES})
-endif (H5_HAVE_PARALLEL AND MPI_C_FOUND)
+endif ()
set_global_variable (HDF5_LIBRARIES_TO_EXPORT ${HDF5_LIB_TARGET})
H5_SET_LIB_OPTIONS (${HDF5_LIB_TARGET} ${HDF5_LIB_NAME} STATIC)
set_target_properties (${HDF5_LIB_TARGET} PROPERTIES
@@ -881,7 +898,7 @@ if (HDF5_ENABLE_DEBUG_APIS)
COMPILE_DEFINITIONS
"H5Z_DEBUG;H5T_DEBUG;H5ST_DEBUG;H5S_DEBUG;H5O_DEBUG;H5I_DEBUG;H5HL_DEBUG;H5F_DEBUG;H5D_DEBUG;H5B2_DEBUG;H5AC_DEBUG"
)
-endif (HDF5_ENABLE_DEBUG_APIS)
+endif ()
set (install_targets ${HDF5_LIB_TARGET})
if (BUILD_SHARED_LIBS)
@@ -908,10 +925,10 @@ if (BUILD_SHARED_LIBS)
target_link_libraries (${HDF5_LIBSH_TARGET} ${LINK_SHARED_LIBS})
if (NOT WIN32)
target_link_libraries (${HDF5_LIBSH_TARGET} dl)
- endif (NOT WIN32)
+ endif ()
if (H5_HAVE_PARALLEL AND MPI_C_FOUND)
target_link_libraries (${HDF5_LIBSH_TARGET} ${MPI_C_LIBRARIES})
- endif (H5_HAVE_PARALLEL AND MPI_C_FOUND)
+ endif ()
set_global_variable (HDF5_LIBRARIES_TO_EXPORT "${HDF5_LIBRARIES_TO_EXPORT};${HDF5_LIBSH_TARGET}")
H5_SET_LIB_OPTIONS (${HDF5_LIBSH_TARGET} ${HDF5_LIB_NAME} SHARED ${HDF5_PACKAGE_SOVERSION})
set_target_properties (${HDF5_LIBSH_TARGET} PROPERTIES
@@ -926,16 +943,16 @@ if (BUILD_SHARED_LIBS)
"H5_HAVE_THREADSAFE"
)
target_link_libraries (${HDF5_LIBSH_TARGET} Threads::Threads)
- endif (HDF5_ENABLE_THREADSAFE)
+ endif ()
if (HDF5_ENABLE_DEBUG_APIS)
set_property (TARGET ${HDF5_LIBSH_TARGET}
APPEND PROPERTY COMPILE_DEFINITIONS
"H5Z_DEBUG;H5T_DEBUG;H5ST_DEBUG;H5S_DEBUG;H5O_DEBUG;H5I_DEBUG;H5HL_DEBUG;H5F_DEBUG;H5D_DEBUG;H5B2_DEBUG;H5AC_DEBUG"
)
- endif (HDF5_ENABLE_DEBUG_APIS)
+ endif ()
set (install_targets ${install_targets} ${HDF5_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS)
+endif ()
#-----------------------------------------------------------------------------
# Add file(s) to CMake Install
@@ -950,7 +967,7 @@ if (NOT HDF5_INSTALL_NO_DEVELOPMENT)
COMPONENT
headers
)
-endif (NOT HDF5_INSTALL_NO_DEVELOPMENT)
+endif ()
#-----------------------------------------------------------------------------
# Add Target(s) to CMake Install for import into other projects
@@ -958,7 +975,8 @@ endif (NOT HDF5_INSTALL_NO_DEVELOPMENT)
if (HDF5_EXPORTED_TARGETS)
if (BUILD_SHARED_LIBS)
INSTALL_TARGET_PDB (${HDF5_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} libraries)
- endif (BUILD_SHARED_LIBS)
+ endif ()
+ INSTALL_TARGET_PDB (${HDF5_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} libraries)
install (
TARGETS
@@ -971,4 +989,4 @@ if (HDF5_EXPORTED_TARGETS)
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT libraries
INCLUDES DESTINATION include
)
-endif (HDF5_EXPORTED_TARGETS)
+endif ()
diff --git a/src/COPYING b/src/COPYING
index 6903daf..6497ace 100644
--- a/src/COPYING
+++ b/src/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/src/H5.c b/src/H5.c
index 41fb3ba..1068fc6 100644
--- a/src/H5.c
+++ b/src/H5.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -944,7 +942,7 @@ H5allocate_memory(size_t size, hbool_t clear)
{
void *ret_value = NULL;
- FUNC_ENTER_API_NOINIT;
+ FUNC_ENTER_API_NOINIT
H5TRACE2("*x", "zb", size, clear);
if(clear)
@@ -985,7 +983,7 @@ H5resize_memory(void *mem, size_t size)
{
void *ret_value = NULL;
- FUNC_ENTER_API_NOINIT;
+ FUNC_ENTER_API_NOINIT
H5TRACE2("*x", "*xz", mem, size);
ret_value = H5MM_realloc(mem, size);
@@ -1009,7 +1007,7 @@ H5resize_memory(void *mem, size_t size)
herr_t
H5free_memory(void *mem)
{
- FUNC_ENTER_API_NOINIT;
+ FUNC_ENTER_API_NOINIT
H5TRACE1("e", "*x", mem);
/* At this time, it is impossible for this to fail. */
diff --git a/src/H5A.c b/src/H5A.c
index e5a7ae5..cf48232 100644
--- a/src/H5A.c
+++ b/src/H5A.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5AC.c b/src/H5AC.c
index f68c7a9..4223158 100644
--- a/src/H5AC.c
+++ b/src/H5AC.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -102,41 +100,48 @@ hid_t H5AC_rawdata_dxpl_id = (-1);
hbool_t H5_coll_api_sanity_check_g = false;
#endif /* H5_HAVE_PARALLEL */
+
/*******************/
/* Local Variables */
/*******************/
-static const char *H5AC_entry_type_names[H5AC_NTYPES] =
-{
- "B-tree nodes",
- "symbol table nodes",
- "local heap prefixes",
- "local heap data blocks",
- "global heaps",
- "object headers",
- "object header chunks",
- "v2 B-tree headers",
- "v2 B-tree internal nodes",
- "v2 B-tree leaf nodes",
- "fractal heap headers",
- "fractal heap direct blocks",
- "fractal heap indirect blocks",
- "free space headers",
- "free space sections",
- "shared OH message master table",
- "shared OH message index",
- "extensible array headers",
- "extensible array index blocks",
- "extensible array super blocks",
- "extensible array data blocks",
- "extensible array data block pages",
- "fixed array headers",
- "fixed array data block",
- "fixed array data block pages",
- "superblock",
- "driver info",
- "proxy entry",
- "test entry" /* for testing only -- not used for actual files */
+/* Metadata entry class list */
+
+/* Remember to add new type ID to the H5AC_type_t enum in H5ACprivate.h when
+ * adding a new class.
+ */
+
+static const H5AC_class_t *const H5AC_class_s[] = {
+ H5AC_BT, /* ( 0) B-tree nodes */
+ H5AC_SNODE, /* ( 1) symbol table nodes */
+ H5AC_LHEAP_PRFX, /* ( 2) local heap prefix */
+ H5AC_LHEAP_DBLK, /* ( 3) local heap data block */
+ H5AC_GHEAP, /* ( 4) global heap */
+ H5AC_OHDR, /* ( 5) object header */
+ H5AC_OHDR_CHK, /* ( 6) object header chunk */
+ H5AC_BT2_HDR, /* ( 7) v2 B-tree header */
+ H5AC_BT2_INT, /* ( 8) v2 B-tree internal node */
+ H5AC_BT2_LEAF, /* ( 9) v2 B-tree leaf node */
+ H5AC_FHEAP_HDR, /* (10) fractal heap header */
+ H5AC_FHEAP_DBLOCK, /* (11) fractal heap direct block */
+ H5AC_FHEAP_IBLOCK, /* (12) fractal heap indirect block */
+ H5AC_FSPACE_HDR, /* (13) free space header */
+ H5AC_FSPACE_SINFO, /* (14) free space sections */
+ H5AC_SOHM_TABLE, /* (15) shared object header message master table */
+ H5AC_SOHM_LIST, /* (16) shared message index stored as a list */
+ H5AC_EARRAY_HDR, /* (17) extensible array header */
+ H5AC_EARRAY_IBLOCK, /* (18) extensible array index block */
+ H5AC_EARRAY_SBLOCK, /* (19) extensible array super block */
+ H5AC_EARRAY_DBLOCK, /* (20) extensible array data block */
+ H5AC_EARRAY_DBLK_PAGE, /* (21) extensible array data block page */
+ H5AC_FARRAY_HDR, /* (22) fixed array header */
+ H5AC_FARRAY_DBLOCK, /* (23) fixed array data block */
+ H5AC_FARRAY_DBLK_PAGE, /* (24) fixed array data block page */
+ H5AC_SUPERBLOCK, /* (25) file superblock */
+ H5AC_DRVRINFO, /* (26) driver info block (supplements superblock) */
+ H5AC_EPOCH_MARKER, /* (27) epoch marker - always internal to cache */
+ H5AC_PROXY_ENTRY, /* (28) cache entry proxy */
+ H5AC_PREFETCHED_ENTRY /* (29) prefetched entry - always internal to cache */
};
@@ -346,6 +351,44 @@ H5AC_term_package(void)
/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_cache_image_pending()
+ *
+ * Purpose: Debugging function that tests to see if the load of a
+ * metadata cache image load is pending (i.e. will be executed
+ * on the next protect or insert)
+ *
+ * Returns TRUE if a cache image load is pending, and FALSE
+ * if not. Throws an assertion failure on error.
+ *
+ * Return: TRUE if a cache image load is pending, and FALSE otherwise.
+ *
+ * Programmer: John Mainzer, 1/10/17
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+hbool_t
+H5AC_cache_image_pending(const H5F_t *f)
+{
+ H5C_t *cache_ptr;
+ hbool_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+
+ ret_value = H5C_cache_image_pending(cache_ptr);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_cache_image_pending() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_create
*
* Purpose: Initialize the cache just after a file is opened. The
@@ -364,12 +407,13 @@ H5AC_term_package(void)
*-------------------------------------------------------------------------
*/
herr_t
-H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
+H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr, H5AC_cache_image_config_t * image_config_ptr)
{
#ifdef H5_HAVE_PARALLEL
char prefix[H5C__PREFIX_LEN] = "";
H5AC_aux_t * aux_ptr = NULL;
#endif /* H5_HAVE_PARALLEL */
+ struct H5C_cache_image_ctl_t int_ci_config = H5C__DEFAULT_CACHE_IMAGE_CTL;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -378,11 +422,16 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
HDassert(f);
HDassert(NULL == f->shared->cache);
HDassert(config_ptr != NULL) ;
- HDcompile_assert(NELMTS(H5AC_entry_type_names) == H5AC_NTYPES);
+ HDassert(image_config_ptr != NULL) ;
+ HDassert(image_config_ptr->version == H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION);
+ HDcompile_assert(NELMTS(H5AC_class_s) == H5AC_NTYPES);
HDcompile_assert(H5C__MAX_NUM_TYPE_IDS == H5AC_NTYPES);
+ /* Validate configurations */
if(H5AC_validate_config(config_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache configuration")
+ if(H5AC_validate_cache_image_config(image_config_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache image configuration")
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
@@ -400,7 +449,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't get mpi size")
if(NULL == (aux_ptr = H5FL_CALLOC(H5AC_aux_t)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxilary structure.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "Can't allocate H5AC auxilary structure")
aux_ptr->magic = H5AC__H5AC_AUX_T_MAGIC;
aux_ptr->mpi_comm = mpi_comm;
@@ -424,15 +473,16 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
aux_ptr->candidate_slist_ptr = NULL;
aux_ptr->write_done = NULL;
aux_ptr->sync_point_done = NULL;
+ aux_ptr->p0_image_len = 0;
sprintf(prefix, "%d:", mpi_rank);
if(mpi_rank == 0) {
if(NULL == (aux_ptr->d_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create dirtied entry list.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create dirtied entry list")
if(NULL == (aux_ptr->c_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cleaned entry list.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cleaned entry list")
} /* end if */
/* construct the candidate slist for all processes.
@@ -440,25 +490,25 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
* will use it in the case of a flush.
*/
if(NULL == (aux_ptr->candidate_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create candidate entry list")
if(aux_ptr != NULL)
if(aux_ptr->mpi_rank == 0)
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
+ H5AC_class_s,
H5AC__check_if_write_permitted, TRUE, H5AC__log_flushed_entry,
(void *)aux_ptr);
else
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
+ H5AC_class_s,
H5AC__check_if_write_permitted, TRUE, NULL,
(void *)aux_ptr);
else
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
+ H5AC_class_s,
H5AC__check_if_write_permitted, TRUE, NULL, NULL);
} /* end if */
else {
@@ -469,7 +519,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
*/
f->shared->cache = H5C_create(H5AC__DEFAULT_MAX_CACHE_SIZE,
H5AC__DEFAULT_MIN_CLEAN_SIZE, (H5AC_NTYPES - 1),
- (const char **)H5AC_entry_type_names,
+ H5AC_class_s,
H5AC__check_if_write_permitted, TRUE, NULL, NULL);
#ifdef H5_HAVE_PARALLEL
} /* end else */
@@ -487,7 +537,7 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
/* Turn on metadata cache logging, if being used */
if(H5F_USE_MDC_LOGGING(f)) {
if(H5C_set_up_logging(f->shared->cache, H5F_MDC_LOG_LOCATION(f), H5F_START_MDC_LOG_ON_ACCESS(f)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "mdc logging setup failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "mdc logging setup failed")
/* Write the log header regardless of current logging status */
if(H5AC__write_create_cache_log_msg(f->shared->cache) < 0)
@@ -496,7 +546,20 @@ H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr)
/* Set the cache parameters */
if(H5AC_set_cache_auto_resize_config(f->shared->cache, config_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "auto resize configuration failed")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "auto resize configuration failed")
+
+ /* Don't need to get the current H5C image config here since the
+ * cache has just been created, and thus f->shared->cache->image_ctl
+ * must still set to its initial value (H5C__DEFAULT_CACHE_IMAGE_CTL).
+ * Note that this not true as soon as control returns to the application
+ * program, as some test code modifies f->shared->cache->image_ctl.
+ */
+ int_ci_config.version = image_config_ptr->version;
+ int_ci_config.generate_image = image_config_ptr->generate_image;
+ int_ci_config.save_resize_status = image_config_ptr->save_resize_status;
+ int_ci_config.entry_ageout = image_config_ptr->entry_ageout;
+ if(H5C_set_cache_image_config(f, f->shared->cache, &int_ci_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "auto resize configuration failed")
done:
#ifdef H5_HAVE_PARALLEL
@@ -546,6 +609,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
/* Sanity check */
HDassert(f);
+ HDassert(f->shared);
HDassert(f->shared->cache);
#if H5AC_DUMP_STATS_ON_CLOSE
@@ -555,7 +619,7 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#if H5AC__TRACE_FILE_ENABLED
if(H5AC__close_trace_file(f->shared->cache) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__close_trace_file() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__close_trace_file() failed")
#endif /* H5AC__TRACE_FILE_ENABLED */
if(H5F_USE_MDC_LOGGING(f)) {
@@ -569,16 +633,24 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
/* destroying the cache, so clear all collective entries */
if(H5C_clear_coll_entries(f->shared->cache, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed")
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(f->shared->cache);
if(aux_ptr)
/* Sanity check */
HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
- /* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
- if(H5AC__flush_entries(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
+ /* If the file was opened R/W, attempt to flush all entries
+ * from rank 0 & Bcast clean list to other ranks.
+ *
+ * Must not flush in the R/O case, as this will trigger the
+ * free space manager settle routines.
+ */
+ if ( ( H5F_ACC_RDWR & H5F_INTENT(f) ) &&
+ ( H5AC__flush_entries(f, dxpl_id) < 0 ) )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
+
#endif /* H5_HAVE_PARALLEL */
/* Destroy the cache */
@@ -588,12 +660,18 @@ H5AC_dest(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
if(aux_ptr != NULL) {
- if(aux_ptr->d_slist_ptr != NULL)
+ if(aux_ptr->d_slist_ptr != NULL) {
+ HDassert(H5SL_count(aux_ptr->d_slist_ptr) == 0);
H5SL_close(aux_ptr->d_slist_ptr);
- if(aux_ptr->c_slist_ptr != NULL)
+ } /* end if */
+ if(aux_ptr->c_slist_ptr != NULL) {
+ HDassert(H5SL_count(aux_ptr->c_slist_ptr) == 0);
H5SL_close(aux_ptr->c_slist_ptr);
- if(aux_ptr->candidate_slist_ptr != NULL)
+ } /* end if */
+ if(aux_ptr->candidate_slist_ptr != NULL) {
+ HDassert(H5SL_count(aux_ptr->candidate_slist_ptr) == 0);
H5SL_close(aux_ptr->candidate_slist_ptr);
+ } /* end if */
aux_ptr->magic = 0;
aux_ptr = H5FL_FREE(H5AC_aux_t, aux_ptr);
} /* end if */
@@ -704,7 +782,7 @@ H5AC_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
#endif /* H5AC__TRACE_FILE_ENABLED */
if(H5C_expunge_entry(f, dxpl_id, type, addr, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_expunge_entry() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "H5C_expunge_entry() failed")
done:
#if H5AC__TRACE_FILE_ENABLED
@@ -774,17 +852,17 @@ H5AC_flush(H5F_t *f, hid_t dxpl_id)
#ifdef H5_HAVE_PARALLEL
/* flushing the cache, so clear all collective entries */
if(H5C_clear_coll_entries(f->shared->cache, FALSE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "H5C_clear_coll_entries() failed")
/* Attempt to flush all entries from rank 0 & Bcast clean list to other ranks */
if(H5AC__flush_entries(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush")
#endif /* H5_HAVE_PARALLEL */
/* Flush the cache */
/* (Again, in parallel - writes out the superblock) */
if(H5C_flush_cache(f, dxpl_id, H5AC__NO_FLAGS_SET) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush cache")
done:
#if H5AC__TRACE_FILE_ENABLED
@@ -802,6 +880,46 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5AC_force_cache_image_load()
+ *
+ * Purpose: On rare occasions, it is necessary to run
+ * H5MF_tidy_self_referential_fsm_hack() prior to the first
+ * metadata cache access. This is a problem as if there is a
+ * cache image at the end of the file, that routine will
+ * discard it.
+ *
+ * We solve this issue by calling this function, which will
+ * load the cache image and then call
+ * H5MF_tidy_self_referential_fsm_hack() to discard it.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 1/11/17
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_force_cache_image_load(H5F_t *f, hid_t dxpl_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+
+ if(H5C_force_cache_image_load(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't load cache image")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_force_cache_image_load() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_get_entry_status
*
* Purpose: Given a file address, determine whether the metadata
@@ -832,16 +950,17 @@ H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
hbool_t is_corked;
hbool_t is_flush_dep_child; /* Entry @ addr is in the cache and is a flush dependency child */
hbool_t is_flush_dep_parent; /* Entry @ addr is in the cache and is a flush dependency parent */
+ hbool_t image_is_up_to_date; /* Entry @ addr is in the cache and has an up to date image */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
if((f == NULL) || (!H5F_addr_defined(addr)) || (status == NULL))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry")
if(H5C_get_entry_status(f, addr, NULL, &in_cache, &is_dirty,
- &is_protected, &is_pinned, &is_corked, &is_flush_dep_parent, &is_flush_dep_child) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_status() failed.")
+ &is_protected, &is_pinned, &is_corked, &is_flush_dep_parent, &is_flush_dep_child, &image_is_up_to_date) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_status() failed")
if(in_cache) {
*status |= H5AC_ES__IN_CACHE;
@@ -857,6 +976,8 @@ H5AC_get_entry_status(const H5F_t *f, haddr_t addr, unsigned *status)
*status |= H5AC_ES__IS_FLUSH_DEP_PARENT;
if(is_flush_dep_child)
*status |= H5AC_ES__IS_FLUSH_DEP_CHILD;
+ if(image_is_up_to_date)
+ *status |= H5AC_ES__IMAGE_IS_UP_TO_DATE;
} /* end if */
else
*status = 0;
@@ -953,7 +1074,7 @@ H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t add
/* Check if we should try to flush */
if(aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
if(H5AC__run_sync_point(f, dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point")
} /* end if */
}
#endif /* H5_HAVE_PARALLEL */
@@ -973,6 +1094,41 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5AC_load_cache_image_on_next_protect
+ *
+ * Purpose: Load the cache image block at the specified location,
+ * decode it, and insert its contents into the metadata
+ * cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_load_cache_image_on_next_protect(H5F_t * f, haddr_t addr, hsize_t len,
+ hbool_t rw)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+
+ if(H5C_load_cache_image_on_next_protect(f, addr, len, rw) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "call to H5C_load_cache_image_on_next_protect failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_load_cache_image_on_next_protect() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_mark_entry_dirty
*
* Purpose: Mark a pinned or protected entry as dirty. The target
@@ -1129,6 +1285,138 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5AC_mark_entry_unserialized
+ *
+ * Purpose: Mark a pinned or protected entry as unserialized. The target
+ * entry MUST be either pinned, protected, or both.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 12/22/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_mark_entry_unserialized(void *thing)
+{
+#if H5AC__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */
+ H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(thing);
+
+ /* Set up entry & cache pointers */
+ entry_ptr = (H5AC_info_t *)thing;
+ cache_ptr = entry_ptr->cache_ptr;
+
+#if H5AC__TRACE_FILE_ENABLED
+ /* For the mark entry unserialized call, only the addr
+ * is really necessary in the trace file. Write the result to catch
+ * occult errors.
+ */
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
+ sprintf(trace, "%s 0x%lx", FUNC, (unsigned long)(entry_ptr->addr));
+#endif /* H5AC__TRACE_FILE_ENABLED */
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to get logging status")
+
+ if(H5C_mark_entry_unserialized(thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL, "can't mark entry unserialized")
+
+done:
+#if H5AC__TRACE_FILE_ENABLED
+ if(trace_file_ptr)
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+#endif /* H5AC__TRACE_FILE_ENABLED */
+
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_mark_unserialized_entry_log_msg(cache_ptr, entry_ptr, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_LOGFAIL, FAIL, "unable to emit log message")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_mark_entry_unserialized() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_mark_entry_serialized
+ *
+ * Purpose: Mark a pinned entry as serialized. The target
+ * entry MUST be pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 12/22/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_mark_entry_serialized(void *thing)
+{
+#if H5AC__TRACE_FILE_ENABLED
+ char trace[128] = "";
+ FILE * trace_file_ptr = NULL;
+#endif /* H5AC__TRACE_FILE_ENABLED */
+ hbool_t log_enabled; /* TRUE if logging was set up */
+ hbool_t curr_logging; /* TRUE if currently logging */
+ H5AC_info_t *entry_ptr = NULL; /* Pointer to the cache entry */
+ H5C_t *cache_ptr = NULL; /* Pointer to the entry's associated metadata cache */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(thing);
+
+#if H5AC__TRACE_FILE_ENABLED
+ /* For the mark entry serializedn call, only the addr
+ * is really necessary in the trace file. Write the result to catch
+ * occult errors.
+ */
+ if(NULL != (trace_file_ptr = H5C_get_trace_file_ptr_from_entry(thing)))
+ sprintf(trace, "%s 0x%lx", FUNC,
+ (unsigned long)(((H5C_cache_entry_t *)thing)->addr));
+#endif /* H5AC__TRACE_FILE_ENABLED */
+
+ entry_ptr = (H5AC_info_t *)thing;
+ cache_ptr = entry_ptr->cache_ptr;
+
+ /* Check if log messages are being emitted */
+ if(H5C_get_logging_status(cache_ptr, &log_enabled, &curr_logging) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "unable to get logging status")
+
+ if(H5C_mark_entry_serialized(thing) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "can't mark entry serialized")
+
+done:
+#if H5AC__TRACE_FILE_ENABLED
+ if(trace_file_ptr)
+ HDfprintf(trace_file_ptr, "%s %d\n", trace, (int)ret_value);
+#endif /* H5AC__TRACE_FILE_ENABLED */
+
+ /* If currently logging, generate a message */
+ if(curr_logging)
+ if(H5AC__write_mark_serialized_entry_log_msg(cache_ptr, entry_ptr, ret_value) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_LOGFAIL, FAIL, "unable to emit log message")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_mark_entry_serialized() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_move_entry
*
* Purpose: Use this function to notify the cache that an object's
@@ -1193,13 +1481,13 @@ H5_ATTR_UNUSED
#endif /* H5_HAVE_PARALLEL */
if(H5C_move_entry(f->shared->cache, type, old_addr, new_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "H5C_move_entry() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMOVE, FAIL, "H5C_move_entry() failed")
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
if(NULL != aux_ptr && aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold)
if(H5AC__run_sync_point(f, dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point")
#endif /* H5_HAVE_PARALLEL */
done:
@@ -1285,6 +1573,44 @@ done:
/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_prep_for_file_close
+ *
+ * Purpose: This function should be called just prior to the cache
+ * flushes at file close.
+ *
+ * The objective of the call is to allow the metadata cache
+ * to do any preparatory work prior to generation of a
+ * cache image.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/3/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_prep_for_file_close(H5F_t *f, hid_t dxpl_id)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+
+ if(H5C_prep_for_file_close(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cache prep for file close failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_prep_for_file_close() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_create_flush_dependency()
*
* Purpose: Create a flush dependency between two entries in the metadata
@@ -1333,7 +1659,7 @@ H5AC_create_flush_dependency(void * parent_thing, void * child_thing)
/* Create the flush dependency */
if(H5C_create_flush_dependency(parent_thing, child_thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "H5C_create_flush_dependency() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "H5C_create_flush_dependency() failed")
done:
#if H5AC__TRACE_FILE_ENABLED
@@ -1434,7 +1760,7 @@ H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
#endif /* H5AC_DO_TAGGING_SANITY_CHECKS */
if(NULL == (thing = H5C_protect(f, dxpl_id, type, addr, udata, flags)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_protect() failed")
#if H5AC__TRACE_FILE_ENABLED
if(trace_file_ptr != NULL)
@@ -1657,7 +1983,7 @@ H5AC_destroy_flush_dependency(void * parent_thing, void * child_thing)
/* Destroy the flush dependency */
if(H5C_destroy_flush_dependency(parent_thing, child_thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "H5C_destroy_flush_dependency() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "H5C_destroy_flush_dependency() failed")
done:
#if H5AC__TRACE_FILE_ENABLED
@@ -1781,18 +2107,18 @@ H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type, haddr_t addr,
if(deleted && aux_ptr->mpi_rank == 0)
if(H5AC__log_deleted_entry((H5AC_info_t *)thing) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5AC__log_deleted_entry() failed")
} /* end if */
#endif /* H5_HAVE_PARALLEL */
if(H5C_unprotect(f, dxpl_id, addr, thing, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5C_unprotect() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "H5C_unprotect() failed")
#ifdef H5_HAVE_PARALLEL
/* Check if we should try to flush */
if((aux_ptr != NULL) && (aux_ptr->dirty_bytes >= aux_ptr->dirty_bytes_threshold))
if(H5AC__run_sync_point(f, dxpl_id, H5AC_SYNC_POINT_OP__FLUSH_TO_MIN_CLEAN) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't run sync point")
#endif /* H5_HAVE_PARALLEL */
done:
@@ -1835,22 +2161,22 @@ H5AC_get_cache_auto_resize_config(const H5AC_t *cache_ptr,
/* Check args */
if((cache_ptr == NULL) || (config_ptr == NULL) ||
(config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or config_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr or config_ptr on entry")
#ifdef H5_HAVE_PARALLEL
{
H5AC_aux_t *aux_ptr;
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr);
if((aux_ptr != NULL) && (aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad aux_ptr on entry")
}
#endif /* H5_HAVE_PARALLEL */
/* Retrieve the configuration */
if(H5C_get_cache_auto_resize_config((const H5C_t *)cache_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_auto_resize_config() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_auto_resize_config() failed")
if(H5C_get_evictions_enabled((const H5C_t *)cache_ptr, &evictions_enabled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_resize_enabled() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_resize_enabled() failed")
/* Set the information to return */
if(internal_config.rpt_fcn == NULL)
@@ -1919,7 +2245,7 @@ done:
*/
herr_t
H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_size_ptr,
- size_t *cur_size_ptr, int32_t *cur_num_entries_ptr)
+ size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -1927,7 +2253,7 @@ H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr, size_t *min_clean_s
if(H5C_get_cache_size((H5C_t *)cache_ptr, max_size_ptr, min_clean_size_ptr,
cur_size_ptr, cur_num_entries_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_size() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_size() failed")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1954,7 +2280,7 @@ H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr)
FUNC_ENTER_NOAPI(FAIL)
if(H5C_get_cache_hit_rate((H5C_t *)cache_ptr, hit_rate_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_hit_rate() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_cache_hit_rate() failed")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1981,7 +2307,7 @@ H5AC_reset_cache_hit_rate_stats(H5AC_t * cache_ptr)
FUNC_ENTER_NOAPI(FAIL)
if(H5C_reset_cache_hit_rate_stats((H5C_t *)cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats() failed")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2030,14 +2356,14 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config
#endif /* H5AC__TRACE_FILE_ENABLED */
if(cache_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad cache_ptr on entry")
#ifdef H5_HAVE_PARALLEL
{
H5AC_aux_t *aux_ptr;
aux_ptr = (H5AC_aux_t *)H5C_get_aux_ptr(cache_ptr);
if((aux_ptr != NULL) && (aux_ptr->magic != H5AC__H5AC_AUX_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad aux_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "bad aux_ptr on entry")
}
#endif /* H5_HAVE_PARALLEL */
@@ -2049,29 +2375,29 @@ H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr, H5AC_cache_config_t *config
FILE * file_ptr;
if(NULL == (file_ptr = H5C_get_trace_file_ptr(cache_ptr)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_trace_file_ptr() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_trace_file_ptr() failed")
if((!(config_ptr->close_trace_file)) && (file_ptr != NULL))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Trace file already open.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Trace file already open")
} /* end if */
/* Close & reopen trace file, if requested */
if(config_ptr->close_trace_file)
if(H5AC__close_trace_file(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__close_trace_file() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__close_trace_file() failed")
if(config_ptr->open_trace_file)
if(H5AC__open_trace_file(cache_ptr, config_ptr->trace_file_name) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "H5AC__open_trace_file() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "H5AC__open_trace_file() failed")
/* Convert external configuration to internal representation */
if(H5AC__ext_config_2_int_config(config_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed")
/* Set configuration */
if(H5C_set_cache_auto_resize_config(cache_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_cache_auto_resize_config() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_cache_auto_resize_config() failed")
if(H5C_set_evictions_enabled(cache_ptr, config_ptr->evictions_enabled) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_evictions_enabled() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_set_evictions_enabled() failed")
#ifdef H5_HAVE_PARALLEL
{
@@ -2170,9 +2496,9 @@ H5AC_validate_config(H5AC_cache_config_t *config_ptr)
/* Check args */
if(config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry")
if(config_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown config version.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown config version")
/* don't bother to test trace_file_name unless open_trace_file is TRUE */
if(config_ptr->open_trace_file) {
@@ -2184,40 +2510,90 @@ H5AC_validate_config(H5AC_cache_config_t *config_ptr)
*/
name_len = HDstrlen(config_ptr->trace_file_name);
if(name_len == 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name is empty")
else if(name_len > H5AC__MAX_TRACE_FILE_NAME_LEN)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->trace_file_name too long")
} /* end if */
if((config_ptr->evictions_enabled == FALSE) &&
((config_ptr->incr_mode != H5C_incr__off) ||
(config_ptr->flash_incr_mode != H5C_flash_incr__off) ||
(config_ptr->decr_mode != H5C_decr__off)))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Can't disable evictions while auto-resize is enabled.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Can't disable evictions while auto-resize is enabled")
if(config_ptr->dirty_bytes_threshold < H5AC__MIN_DIRTY_BYTES_THRESHOLD)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too small")
else if(config_ptr->dirty_bytes_threshold > H5AC__MAX_DIRTY_BYTES_THRESHOLD)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "dirty_bytes_threshold too big")
if((config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY) &&
(config_ptr->metadata_write_strategy != H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "config_ptr->metadata_write_strategy out of range")
if(H5AC__ext_config_2_int_config(config_ptr, &internal_config) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC__ext_config_2_int_config() failed")
if(H5C_validate_resize_config(&internal_config, H5C_RESIZE_CFG__VALIDATE_ALL) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "error(s) in new config.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "error(s) in new config")
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_validate_config() */
-/*************************************************************************/
-/**************************** Private Functions: *************************/
-/*************************************************************************/
+/*-------------------------------------------------------------------------
+ * Function: H5AC_validate_cache_image_config()
+ *
+ * Purpose: Run a sanity check on the contents of the supplied
+ * instance of H5AC_cache_image_config_t.
+ *
+ * Do nothing and return SUCCEED if no errors are detected,
+ * and flag an error and return FAIL otherwise.
+ *
+ * At present, this function operates by packing the data
+ * from the instance of H5AC_cache_image_config_t into an
+ * instance of H5C_cache_image_ctl_t, and then calling
+ * H5C_validate_cache_image_config(). If and when
+ * H5AC_cache_image_config_t and H5C_cache_image_ctl_t
+ * diverge, we may have to change this.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/25/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_validate_cache_image_config(H5AC_cache_image_config_t *config_ptr)
+{
+ H5C_cache_image_ctl_t internal_config = H5C__DEFAULT_CACHE_IMAGE_CTL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Check args */
+ if(config_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "NULL config_ptr on entry")
+
+ if(config_ptr->version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Unknown image config version")
+
+ /* don't need to get the current H5C image config here since the
+ * default values of fields not in the H5AC config will always be
+ * valid.
+ */
+ internal_config.version = config_ptr->version;
+ internal_config.generate_image = config_ptr->generate_image;
+ internal_config.save_resize_status = config_ptr->save_resize_status;
+ internal_config.entry_ageout = config_ptr->entry_ageout;
+
+ if(H5C_validate_cache_image_config(&internal_config) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "error(s) in new cache image config")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_validate_cache_image_config() */
/*-------------------------------------------------------------------------
@@ -2303,7 +2679,7 @@ H5AC__ext_config_2_int_config(H5AC_cache_config_t *ext_conf_ptr,
if((ext_conf_ptr == NULL) || (ext_conf_ptr->version != H5AC__CURR_CACHE_CONFIG_VERSION) ||
(int_conf_ptr == NULL))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad ext_conf_ptr or inf_conf_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad ext_conf_ptr or inf_conf_ptr on entry")
int_conf_ptr->version = H5C__CURR_AUTO_SIZE_CTL_VER;
if(ext_conf_ptr->rpt_fcn_enabled)
@@ -2370,7 +2746,7 @@ H5AC_ignore_tags(const H5F_t *f)
/* Set up a new metadata tag */
if(H5C_ignore_tags(f->shared->cache) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "H5C_ignore_tags() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "H5C_ignore_tags() failed")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2780,6 +3156,96 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5AC_unsettle_entry_ring()
+ *
+ * Purpose: Advise the metadata cache that the specified entry's metadata
+ * cache manager ring is no longer settled (if it was on entry).
+ *
+ * If the target metadata cache manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target metadata cache manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target metadata cache manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Note that this function simply passes the call on to
+ * the metadata cache proper, and returns the result.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * September 17, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_unsettle_entry_ring(void *_entry)
+{
+ H5AC_info_t *entry = (H5AC_info_t *)_entry; /* Entry to remove */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry);
+
+ /* Unsettle the entry's ring */
+ if(H5C_unsettle_entry_ring(entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_unsettle_entry_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_unsettle_ring()
+ *
+ * Purpose: Advise the metadata cache that the specified free space
+ * manager ring is no longer settled (if it was on entry).
+ *
+ * If the target free space manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target free space manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target free space manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Note that this function simply passes the call on to
+ * the metadata cache proper, and returns the result.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 10/15/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_unsettle_ring(H5F_t * f, H5C_ring_t ring)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if(FAIL == (ret_value = H5C_unsettle_ring(f, ring)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_unsettle_ring() failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_unsettle_ring() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC_remove_entry()
*
* Purpose: Remove an entry from the cache. Must be not protected, pinned,
@@ -2842,3 +3308,29 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_remove_entry() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_get_mdc_image_info
+ *
+ * Purpose: Wrapper function for H5C_get_mdc_image_info().
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: Vailin Choi; March 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC_get_mdc_image_info(H5AC_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if(H5C_get_mdc_image_info((H5C_t *)cache_ptr, image_addr, image_len) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't retrieve cache image info")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_get_mdc_image_info() */
+
diff --git a/src/H5ACdbg.c b/src/H5ACdbg.c
index 6120242..c6d71a8 100644
--- a/src/H5ACdbg.c
+++ b/src/H5ACdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -101,6 +99,7 @@ H5AC_stats(const H5F_t *f)
FUNC_LEAVE_NOAPI(SUCCEED)
} /* H5AC_stats() */
+#ifndef NDEBUG
/*-------------------------------------------------------------------------
* Function: H5AC_dump_cache
@@ -133,6 +132,7 @@ H5AC_dump_cache(const H5F_t *f)
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC_dump_cache() */
+#endif /* NDEBUG */
/*-------------------------------------------------------------------------
@@ -249,3 +249,244 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5AC__open_trace_file() */
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_get_entry_ptr_from_addr()
+ *
+ * Purpose: Debugging function that attempts to look up an entry in the
+ * cache by its file address, and if found, returns a pointer
+ * to the entry in *entry_ptr_ptr. If the entry is not in the
+ * cache, *entry_ptr_ptr is set to NULL.
+ *
+ * WARNING: This call should be used only in debugging
+ * routines, and it should be avoided when
+ * possible.
+ *
+ * Further, if we ever multi-thread the cache,
+ * this routine will have to be either discarded
+ * or heavily re-worked.
+ *
+ * Finally, keep in mind that the entry whose
+ * pointer is obtained in this fashion may not
+ * be in a stable state.
+ *
+ * Note that this function is only defined if NDEBUG
+ * is not defined.
+ *
+ * As heavy use of this function is almost certainly a
+ * bad idea, the metadata cache tracks the number of
+ * successful calls to this function, and (if
+ * H5C_DO_SANITY_CHECKS is defined) displays any
+ * non-zero count on cache shutdown.
+ *
+ * This function is just a wrapper that calls the H5C
+ * version of the function.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 5/30/14
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr, void **entry_ptr_ptr)
+{
+ H5C_t *cache_ptr; /* Ptr to cache */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+
+ if(H5C_get_entry_ptr_from_addr(cache_ptr, addr, entry_ptr_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_get_entry_ptr_from_addr() failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_get_entry_ptr_from_addr() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_flush_dependency_exists()
+ *
+ * Purpose: Test to see if a flush dependency relationship exists
+ * between the supplied parent and child. Both parties
+ * are indicated by addresses so as to avoid the necessity
+ * of protect / unprotect calls prior to this call.
+ *
+ * If either the parent or the child is not in the metadata
+ * cache, the function sets *fd_exists_ptr to FALSE.
+ *
+ * If both are in the cache, the childs list of parents is
+ * searched for the proposed parent. If the proposed parent
+ * is found in the childs parent list, the function sets
+ * *fd_exists_ptr to TRUE. In all other non-error cases,
+ * the function sets *fd_exists_ptr FALSE.
+ *
+ * Return: SUCCEED on success/FAIL on failure. Note that
+ * *fd_exists_ptr is undefined on failure.
+ *
+ * Programmer: John Mainzer
+ * 9/28/16
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr, haddr_t child_addr,
+ hbool_t *fd_exists_ptr)
+{
+ H5C_t *cache_ptr; /* Ptr to cache */
+ herr_t ret_value = FAIL; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+
+ ret_value = H5C_flush_dependency_exists(cache_ptr, parent_addr, child_addr, fd_exists_ptr);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_flush_dependency_exists() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_verify_entry_type()
+ *
+ * Purpose: Debugging function that attempts to look up an entry in the
+ * cache by its file address, and if found, test to see if its
+ * type field contains the expected value.
+ *
+ * If the specified entry is in cache, *in_cache_ptr is set
+ * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending
+ * on whether the entries type field matches the
+ * expected_type parameter
+ *
+ * If the target entry is not in cache, *in_cache_ptr is
+ * set to FALSE, and *type_ok_ptr is undefined.
+ *
+ * Note that this function is only defined if NDEBUG
+ * is not defined.
+ *
+ * This function is just a wrapper that calls the H5C
+ * version of the function.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 5/30/14
+ *
+ * Changes: None.
+ *
+ * JRM -- 9/17/16
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5AC_verify_entry_type(const H5F_t *f, haddr_t addr,
+ const H5AC_class_t *expected_type, hbool_t *in_cache_ptr,
+ hbool_t *type_ok_ptr)
+{
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+
+ if(H5C_verify_entry_type(cache_ptr, addr, expected_type, in_cache_ptr, type_ok_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_verify_entry_type() failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_verify_entry_type() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC_get_serialization_in_progress
+ *
+ * Purpose: Return the current value of
+ * cache_ptr->serialization_in_progress.
+ *
+ * Return: Current value of cache_ptr->serialization_in_progress.
+ *
+ * Programmer: John Mainzer
+ * 8/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+hbool_t
+H5AC_get_serialization_in_progress(H5F_t *f)
+{
+ H5C_t * cache_ptr;
+ hbool_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+
+ /* Set return value */
+ ret_value = H5C_get_serialization_in_progress(cache_ptr);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_get_serialization_in_progress() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5AC_cache_is_clean()
+ *
+ * Purpose: Debugging function that verifies that all rings in the
+ * metadata cache are clean from the outermost ring, inwards
+ * to the inner ring specified.
+ *
+ * Returns TRUE if all specified rings are clean, and FALSE
+ * if not. Throws an assertion failure on error.
+ *
+ * Return: TRUE if the indicated ring(s) are clean, and FALSE otherwise.
+ *
+ * Programmer: John Mainzer, 6/18/16
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+hbool_t
+H5AC_cache_is_clean(const H5F_t *f, H5AC_ring_t inner_ring)
+{
+ H5C_t *cache_ptr;
+ hbool_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+
+ ret_value = H5C_cache_is_clean(cache_ptr, inner_ring);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC_cache_is_clean() */
+#endif /* NDEBUG */
+
diff --git a/src/H5AClog.c b/src/H5AClog.c
index 1cdaa00..51a2050 100644
--- a/src/H5AClog.c
+++ b/src/H5AClog.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -496,6 +494,101 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5AC__write_mark_unserialized_entry_log_msg
+ *
+ * Purpose: Write a log message for marking cache entries as unserialized.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, December 22, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_mark_unserialized_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE];
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(entry);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"unserialized\",\
+\"address\":0x%lx,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)HDtime(NULL), (unsigned long)entry->addr, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_mark_unserialized_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5AC__write_mark_serialize_entry_log_msg
+ *
+ * Purpose: Write a log message for marking cache entries as serialize.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * Thursday, December 22, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5AC__write_mark_serialized_entry_log_msg(const H5AC_t *cache, const H5AC_info_t *entry,
+ herr_t fxn_ret_value)
+{
+ char msg[MSG_SIZE]; /* Log message buffer */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache);
+ HDassert(entry);
+
+ /* Create the log message string */
+ HDsnprintf(msg, MSG_SIZE,
+"\
+{\
+\"timestamp\":%lld,\
+\"action\":\"serialized\",\
+\"address\":0x%lx,\
+\"returned\":%d\
+},\n\
+"
+ , (long long)HDtime(NULL), (unsigned long)entry->addr, (int)fxn_ret_value);
+
+ /* Write the log message to the file */
+ if(H5C_write_log_message(cache, msg) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unable to emit log message")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5AC__write_mark_serialized_entry_log_msg() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5AC__write_move_entry_log_msg
*
* Purpose: Write a log message for moving a cache entry.
diff --git a/src/H5ACmodule.h b/src/H5ACmodule.h
index a8dba59..e218b31 100644
--- a/src/H5ACmodule.h
+++ b/src/H5ACmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5ACmpio.c b/src/H5ACmpio.c
index 570783a..d03c17b 100644
--- a/src/H5ACmpio.c
+++ b/src/H5ACmpio.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -81,7 +79,7 @@ typedef struct H5AC_addr_list_ud_t
{
H5AC_aux_t * aux_ptr; /* 'Auxiliary' parallel cache info */
haddr_t * addr_buf_ptr; /* Array to store addresses */
- int i; /* Counter for position in array */
+ unsigned u; /* Counter for position in array */
} H5AC_addr_list_ud_t;
@@ -90,21 +88,21 @@ typedef struct H5AC_addr_list_ud_t
/********************/
static herr_t H5AC__broadcast_candidate_list(H5AC_t *cache_ptr,
- int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+ unsigned *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__broadcast_clean_list(H5AC_t *cache_ptr);
static herr_t H5AC__construct_candidate_list(H5AC_t *cache_ptr,
H5AC_aux_t *aux_ptr, int sync_point_op);
static herr_t H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr,
- int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+ unsigned *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id);
static herr_t H5AC__propagate_flushed_and_still_clean_entries_list(H5F_t *f,
hid_t dxpl_id);
-static herr_t H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
+static herr_t H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__receive_candidate_list(const H5AC_t *cache_ptr,
- int *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
+ unsigned *num_entries_ptr, haddr_t **haddr_buf_ptr_ptr);
static herr_t H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id);
-static herr_t H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
+static herr_t H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates,
haddr_t *candidates_list_ptr);
static herr_t H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id);
static herr_t H5AC__rsp__dist_md_write__flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
@@ -151,7 +149,7 @@ H5FL_DEFINE_STATIC(H5AC_slist_entry_t);
*/
herr_t
H5AC__set_sync_point_done_callback(H5C_t * cache_ptr,
- void (* sync_point_done)(int num_writes, haddr_t * written_entries_tbl))
+ void (* sync_point_done)(unsigned num_writes, haddr_t * written_entries_tbl))
{
H5AC_aux_t * aux_ptr;
@@ -282,13 +280,13 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, int *num_entries_ptr,
+H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, unsigned *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr)
{
H5AC_aux_t * aux_ptr = NULL;
haddr_t * haddr_buf_ptr = NULL;
int mpi_result;
- int num_entries;
+ unsigned num_entries;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -310,13 +308,13 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, int *num_entries_ptr,
* receivers can set up buffers to receive them. If there aren't
* any, we are done.
*/
- num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
+ num_entries = (unsigned)H5SL_count(aux_ptr->candidate_slist_ptr);
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
size_t buf_size = 0;
- int chk_num_entries = 0;
+ unsigned chk_num_entries = 0;
/* convert the candidate list into the format we
* are used to receiving from process 0, and also load it
@@ -328,7 +326,7 @@ H5AC__broadcast_candidate_list(H5AC_t *cache_ptr, int *num_entries_ptr,
HDassert(haddr_buf_ptr != NULL);
/* Now broadcast the list of candidate entries */
- buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ buf_size = sizeof(haddr_t) * num_entries;
if(MPI_SUCCESS != (mpi_result = MPI_Bcast((void *)haddr_buf_ptr, (int)buf_size, MPI_BYTE, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
} /* end if */
@@ -378,8 +376,8 @@ H5AC__broadcast_clean_list_cb(void *_item, void H5_ATTR_UNUSED *_key,
/* Store the entry's address in the buffer */
addr = slist_entry_ptr->addr;
- udata->addr_buf_ptr[udata->i] = addr;
- udata->i++;
+ udata->addr_buf_ptr[udata->u] = addr;
+ udata->u++;
/* now release the entry */
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -420,7 +418,7 @@ H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
haddr_t * addr_buf_ptr = NULL;
H5AC_aux_t * aux_ptr;
int mpi_result;
- int num_entries = 0;
+ unsigned num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -437,8 +435,8 @@ H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
* receives can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- num_entries = (int)H5SL_count(aux_ptr->c_slist_ptr);
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, aux_ptr->mpi_comm)))
+ num_entries = (unsigned)H5SL_count(aux_ptr->c_slist_ptr);
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
@@ -446,14 +444,14 @@ H5AC__broadcast_clean_list(H5AC_t * cache_ptr)
size_t buf_size;
/* allocate a buffer to store the list of entry base addresses in */
- buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ buf_size = sizeof(haddr_t) * num_entries;
if(NULL == (addr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for addr buffer")
/* Set up user data for callback */
udata.aux_ptr = aux_ptr;
udata.addr_buf_ptr = addr_buf_ptr;
- udata.i = 0;
+ udata.u = 0;
/* Free all the clean list entries, building the address list in the callback */
/* (Callback also removes the matching entries from the dirtied list) */
@@ -568,8 +566,8 @@ H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key,
HDassert(udata);
/* Store the entry's address in the buffer */
- udata->addr_buf_ptr[udata->i] = slist_entry_ptr->addr;
- udata->i++;
+ udata->addr_buf_ptr[udata->u] = slist_entry_ptr->addr;
+ udata->u++;
/* now release the entry */
slist_entry_ptr = H5FL_FREE(H5AC_slist_entry_t, slist_entry_ptr);
@@ -610,14 +608,14 @@ H5AC__copy_candidate_list_to_buffer_cb(void *_item, void H5_ATTR_UNUSED *_key,
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, int *num_entries_ptr,
+H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, unsigned *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr)
{
H5AC_aux_t * aux_ptr = NULL;
H5AC_addr_list_ud_t udata;
haddr_t * haddr_buf_ptr = NULL;
size_t buf_size;
- int num_entries = 0;
+ unsigned num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -635,19 +633,19 @@ H5AC__copy_candidate_list_to_buffer(const H5AC_t *cache_ptr, int *num_entries_pt
HDassert(haddr_buf_ptr_ptr != NULL);
HDassert(*haddr_buf_ptr_ptr == NULL);
- num_entries = (int)H5SL_count(aux_ptr->candidate_slist_ptr);
+ num_entries = (unsigned)H5SL_count(aux_ptr->candidate_slist_ptr);
/* allocate a buffer(s) to store the list of candidate entry
* base addresses in
*/
- buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ buf_size = sizeof(haddr_t) * num_entries;
if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
/* Set up user data for callback */
udata.aux_ptr = aux_ptr;
udata.addr_buf_ptr = haddr_buf_ptr;
- udata.i = 0;
+ udata.u = 0;
/* Free all the candidate list entries, building the address list in the callback */
if(H5SL_free(aux_ptr->candidate_slist_ptr, H5AC__copy_candidate_list_to_buffer_cb, &udata) < 0)
@@ -1071,7 +1069,7 @@ H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr, haddr_t new_addr)
/* get entry status, size, etc here */
if(H5C_get_entry_status(f, old_addr, &entry_size, &entry_in_cache,
- &entry_dirty, NULL, NULL, NULL, NULL, NULL) < 0)
+ &entry_dirty, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get entry status.")
if(!entry_in_cache)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry not in cache.")
@@ -1234,7 +1232,7 @@ H5AC__propagate_and_apply_candidate_list(H5F_t *f, hid_t dxpl_id)
H5AC_aux_t * aux_ptr;
haddr_t * candidates_list_ptr = NULL;
int mpi_result;
- int num_candidates = 0;
+ unsigned num_candidates = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1448,12 +1446,12 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
+H5AC__receive_haddr_list(MPI_Comm mpi_comm, unsigned *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr)
{
haddr_t * haddr_buf_ptr = NULL;
int mpi_result;
- int num_entries;
+ unsigned num_entries;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1468,14 +1466,14 @@ H5AC__receive_haddr_list(MPI_Comm mpi_comm, int *num_entries_ptr,
* can set up a buffer to receive them. If there aren't
* any, we are done.
*/
- if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_INT, 0, mpi_comm)))
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&num_entries, 1, MPI_UNSIGNED, 0, mpi_comm)))
HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
if(num_entries > 0) {
size_t buf_size;
/* allocate buffers to store the list of entry base addresses in */
- buf_size = sizeof(haddr_t) * (size_t)num_entries;
+ buf_size = sizeof(haddr_t) * num_entries;
if(NULL == (haddr_buf_ptr = (haddr_t *)H5MM_malloc(buf_size)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for haddr buffer")
@@ -1523,7 +1521,7 @@ H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id)
H5AC_t * cache_ptr;
H5AC_aux_t * aux_ptr;
haddr_t * haddr_buf_ptr = NULL;
- int num_entries = 0;
+ unsigned num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -1543,7 +1541,7 @@ H5AC__receive_and_apply_clean_list(H5F_t *f, hid_t dxpl_id)
if(num_entries > 0)
/* mark the indicated entries as clean */
- if(H5C_mark_entries_as_clean(f, dxpl_id, (int32_t)num_entries, haddr_buf_ptr) < 0)
+ if(H5C_mark_entries_as_clean(f, dxpl_id, num_entries, haddr_buf_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't mark entries clean.")
/* if it is defined, call the sync point done callback. Note
@@ -1582,7 +1580,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__receive_candidate_list(const H5AC_t *cache_ptr, int *num_entries_ptr,
+H5AC__receive_candidate_list(const H5AC_t *cache_ptr, unsigned *num_entries_ptr,
haddr_t **haddr_buf_ptr_ptr)
{
H5AC_aux_t * aux_ptr;
@@ -1667,7 +1665,7 @@ H5AC__rsp__dist_md_write__flush(H5F_t *f, hid_t dxpl_id)
H5AC_aux_t * aux_ptr;
haddr_t * haddr_buf_ptr = NULL;
int mpi_result;
- int num_entries = 0;
+ unsigned num_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -2224,11 +2222,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
+H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, unsigned num_candidates,
haddr_t *candidates_list_ptr)
{
H5AC_aux_t * aux_ptr;
- int i;
+ unsigned u;
FUNC_ENTER_STATIC_NOERR
@@ -2249,12 +2247,12 @@ H5AC__tidy_cache_0_lists(H5AC_t *cache_ptr, int num_candidates,
* cleaned list. However, for this metadata write strategy,
* we just want to remove all references to the candidate entries.
*/
- for(i = 0; i < num_candidates; i++) {
+ for(u = 0; u < num_candidates; u++) {
H5AC_slist_entry_t * d_slist_entry_ptr;
H5AC_slist_entry_t * c_slist_entry_ptr;
haddr_t addr;
- addr = candidates_list_ptr[i];
+ addr = candidates_list_ptr[u];
/* addr may be either on the dirtied list, or on the flushed
* and still clean list. Remove it.
diff --git a/src/H5ACpkg.h b/src/H5ACpkg.h
index 3900ff1..ea7f0bf 100644
--- a/src/H5ACpkg.h
+++ b/src/H5ACpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -351,6 +349,12 @@ H5FL_EXTERN(H5AC_aux_t);
* this verification. The field is set to NULL when the
* callback is not needed.
*
+ * The following field supports the metadata cache image feature.
+ *
+ * p0_image_len: unsiged integer containing the length of the metadata cache
+ * image constructed by MPI process 0. This field should be 0
+ * if the value is unknown, or if cache image is not enabled.
+ *
****************************************************************************/
#ifdef H5_HAVE_PARALLEL
@@ -398,8 +402,11 @@ typedef struct H5AC_aux_t
void (* write_done)(void);
- void (* sync_point_done)(int num_writes,
+ void (* sync_point_done)(unsigned num_writes,
haddr_t * written_entries_tbl);
+
+ unsigned p0_image_len;
+
} H5AC_aux_t; /* struct H5AC_aux_t */
#endif /* H5_HAVE_PARALLEL */
@@ -421,7 +428,7 @@ H5_DLL herr_t H5AC__log_moved_entry(const H5F_t *f, haddr_t old_addr,
H5_DLL herr_t H5AC__flush_entries(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5AC__run_sync_point(H5F_t *f, hid_t dxpl_id, int sync_point_op);
H5_DLL herr_t H5AC__set_sync_point_done_callback(H5C_t *cache_ptr,
- void (*sync_point_done)(int num_writes, haddr_t *written_entries_tbl));
+ void (*sync_point_done)(unsigned num_writes, haddr_t *written_entries_tbl));
H5_DLL herr_t H5AC__set_write_done_callback(H5C_t * cache_ptr,
void (* write_done)(void));
#endif /* H5_HAVE_PARALLEL */
@@ -452,6 +459,10 @@ H5_DLL herr_t H5AC__write_mark_dirty_entry_log_msg(const H5AC_t *cache,
herr_t fxn_ret_value);
H5_DLL herr_t H5AC__write_mark_clean_entry_log_msg(const H5AC_t *cache,
const H5AC_info_t *entry, herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_mark_unserialized_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry, herr_t fxn_ret_value);
+H5_DLL herr_t H5AC__write_mark_serialized_entry_log_msg(const H5AC_t *cache,
+ const H5AC_info_t *entry, herr_t fxn_ret_value);
H5_DLL herr_t H5AC__write_move_entry_log_msg(const H5AC_t *cache,
haddr_t old_addr,
haddr_t new_addr,
diff --git a/src/H5ACprivate.h b/src/H5ACprivate.h
index 48b7c6b..b9e2a60 100644
--- a/src/H5ACprivate.h
+++ b/src/H5ACprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -59,36 +57,37 @@
/* Types of metadata objects cached */
typedef enum {
- H5AC_BT_ID = 0, /* ( 0) B-tree nodes */
- H5AC_SNODE_ID, /* ( 1) symbol table nodes */
- H5AC_LHEAP_PRFX_ID, /* ( 2) local heap prefix */
- H5AC_LHEAP_DBLK_ID, /* ( 3) local heap data block */
- H5AC_GHEAP_ID, /* ( 4) global heap */
- H5AC_OHDR_ID, /* ( 5) object header */
- H5AC_OHDR_CHK_ID, /* ( 6) object header chunk */
- H5AC_BT2_HDR_ID, /* ( 7) v2 B-tree header */
- H5AC_BT2_INT_ID, /* ( 8) v2 B-tree internal node */
- H5AC_BT2_LEAF_ID, /* ( 9) v2 B-tree leaf node */
- H5AC_FHEAP_HDR_ID, /* (10) fractal heap header */
- H5AC_FHEAP_DBLOCK_ID, /* (11) fractal heap direct block */
- H5AC_FHEAP_IBLOCK_ID, /* (12) fractal heap indirect block */
- H5AC_FSPACE_HDR_ID, /* (13) free space header */
- H5AC_FSPACE_SINFO_ID, /* (14) free space sections */
- H5AC_SOHM_TABLE_ID, /* (15) shared object header message master table */
- H5AC_SOHM_LIST_ID, /* (16) shared message index stored as a list */
- H5AC_EARRAY_HDR_ID, /* (17) extensible array header */
- H5AC_EARRAY_IBLOCK_ID, /* (18) extensible array index block */
- H5AC_EARRAY_SBLOCK_ID, /* (19) extensible array super block */
- H5AC_EARRAY_DBLOCK_ID, /* (20) extensible array data block */
- H5AC_EARRAY_DBLK_PAGE_ID, /* (21) extensible array data block page */
- H5AC_FARRAY_HDR_ID, /* (22) fixed array header */
- H5AC_FARRAY_DBLOCK_ID, /* (23) fixed array data block */
- H5AC_FARRAY_DBLK_PAGE_ID, /* (24) fixed array data block page */
- H5AC_SUPERBLOCK_ID, /* (25) file superblock */
- H5AC_DRVRINFO_ID, /* (26) driver info block (supplements superblock)*/
- H5AC_PROXY_ENTRY_ID, /* (27) cache entry proxy */
- H5AC_TEST_ID, /* (28) test entry -- not used for actual files */
- H5AC_NTYPES /* Number of types, must be last */
+ H5AC_BT_ID = 0, /* ( 0) B-tree nodes */
+ H5AC_SNODE_ID, /* ( 1) symbol table nodes */
+ H5AC_LHEAP_PRFX_ID, /* ( 2) local heap prefix */
+ H5AC_LHEAP_DBLK_ID, /* ( 3) local heap data block */
+ H5AC_GHEAP_ID, /* ( 4) global heap */
+ H5AC_OHDR_ID, /* ( 5) object header */
+ H5AC_OHDR_CHK_ID, /* ( 6) object header chunk */
+ H5AC_BT2_HDR_ID, /* ( 7) v2 B-tree header */
+ H5AC_BT2_INT_ID, /* ( 8) v2 B-tree internal node */
+ H5AC_BT2_LEAF_ID, /* ( 9) v2 B-tree leaf node */
+ H5AC_FHEAP_HDR_ID, /* (10) fractal heap header */
+ H5AC_FHEAP_DBLOCK_ID, /* (11) fractal heap direct block */
+ H5AC_FHEAP_IBLOCK_ID, /* (12) fractal heap indirect block */
+ H5AC_FSPACE_HDR_ID, /* (13) free space header */
+ H5AC_FSPACE_SINFO_ID, /* (14) free space sections */
+ H5AC_SOHM_TABLE_ID, /* (15) shared object header message master table */
+ H5AC_SOHM_LIST_ID, /* (16) shared message index stored as a list */
+ H5AC_EARRAY_HDR_ID, /* (17) extensible array header */
+ H5AC_EARRAY_IBLOCK_ID, /* (18) extensible array index block */
+ H5AC_EARRAY_SBLOCK_ID, /* (19) extensible array super block */
+ H5AC_EARRAY_DBLOCK_ID, /* (20) extensible array data block */
+ H5AC_EARRAY_DBLK_PAGE_ID, /* (21) extensible array data block page */
+ H5AC_FARRAY_HDR_ID, /* (22) fixed array header */
+ H5AC_FARRAY_DBLOCK_ID, /* (23) fixed array data block */
+ H5AC_FARRAY_DBLK_PAGE_ID, /* (24) fixed array data block page */
+ H5AC_SUPERBLOCK_ID, /* (25) file superblock */
+ H5AC_DRVRINFO_ID, /* (26) driver info block (supplements superblock) */
+ H5AC_EPOCH_MARKER_ID, /* (27) epoch marker - always internal to cache */
+ H5AC_PROXY_ENTRY_ID, /* (28) cache entry proxy */
+ H5AC_PREFETCHED_ENTRY_ID, /* (29) prefetched entry - always internal to cache */
+ H5AC_NTYPES /* Number of types, must be last */
} H5AC_type_t;
/* H5AC_DUMP_STATS_ON_CLOSE should always be FALSE when
@@ -110,14 +109,22 @@ typedef enum {
* use the dump_stats parameter to takedown_cache(), or call
* H5C_stats() directly.
* JRM -- 4/12/15
+ *
+ * Added the H5AC_DUMP_IMAGE_STATS_ON_CLOSE #define, which works much
+ * the same way as H5AC_DUMP_STATS_ON_CLOSE. However, the set of stats
+ * displayed is much smaller, and directed purely at the cache image feature.
+ *
+ * JRM -- 11/1/15
*/
#if H5C_COLLECT_CACHE_STATS
#define H5AC_DUMP_STATS_ON_CLOSE 0
+#define H5AC_DUMP_IMAGE_STATS_ON_CLOSE 0
#else /* H5C_COLLECT_CACHE_STATS */
#define H5AC_DUMP_STATS_ON_CLOSE 0
+#define H5AC_DUMP_IMAGE_STATS_ON_CLOSE 0
#endif /* H5C_COLLECT_CACHE_STATS */
@@ -152,8 +159,9 @@ typedef enum {
/* Aliases for the "ring" type and values */
typedef H5C_ring_t H5AC_ring_t;
#define H5AC_RING_INV H5C_RING_UNDEFINED
-#define H5AC_RING_US H5C_RING_USER
-#define H5AC_RING_FSM H5C_RING_FSM
+#define H5AC_RING_USER H5C_RING_USER
+#define H5AC_RING_RDFSM H5C_RING_RDFSM
+#define H5AC_RING_MDFSM H5C_RING_MDFSM
#define H5AC_RING_SBE H5C_RING_SBE
#define H5AC_RING_SB H5C_RING_SB
#define H5AC_RING_NTYPES H5C_RING_NTYPES
@@ -168,13 +176,15 @@ typedef H5C_notify_action_t H5AC_notify_action_t;
#define H5AC_NOTIFY_ACTION_ENTRY_CLEANED H5C_NOTIFY_ACTION_ENTRY_CLEANED
#define H5AC_NOTIFY_ACTION_CHILD_DIRTIED H5C_NOTIFY_ACTION_CHILD_DIRTIED
#define H5AC_NOTIFY_ACTION_CHILD_CLEANED H5C_NOTIFY_ACTION_CHILD_CLEANED
+#define H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED
+#define H5AC_NOTIFY_ACTION_CHILD_SERIALIZED H5C_NOTIFY_ACTION_CHILD_SERIALIZED
#define H5AC__CLASS_NO_FLAGS_SET H5C__CLASS_NO_FLAGS_SET
#define H5AC__CLASS_SPECULATIVE_LOAD_FLAG H5C__CLASS_SPECULATIVE_LOAD_FLAG
/* The following flags should only appear in test code */
-#define H5AC__CLASS_SKIP_READS H5C__CLASS_SKIP_READS
-#define H5AC__CLASS_SKIP_WRITES H5C__CLASS_SKIP_WRITES
+#define H5AC__CLASS_SKIP_READS H5C__CLASS_SKIP_READS
+#define H5AC__CLASS_SKIP_WRITES H5C__CLASS_SKIP_WRITES
typedef H5C_get_initial_load_size_func_t H5AC_get_initial_load_size_func_t;
typedef H5C_get_final_load_size_func_t H5AC_get_final_load_size_func_t;
@@ -216,6 +226,8 @@ typedef struct H5AC_proxy_entry_t {
size_t nchildren; /* Number of children */
size_t ndirty_children; /* Number of dirty children */
/* (Note that this currently duplicates some cache functionality) */
+ size_t nunser_children; /* Number of unserialized children */
+ /* (Note that this currently duplicates some cache functionality) */
} H5AC_proxy_entry_t;
@@ -313,7 +325,13 @@ H5_DLLVAR hid_t H5AC_rawdata_dxpl_id;
}
#endif /* H5_HAVE_PARALLEL */
-
+#define H5AC__DEFAULT_CACHE_IMAGE_CONFIG \
+{ \
+ /* int32_t version = */ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION, \
+ /* hbool_t generate_image = */ FALSE, \
+ /* hbool_t save_resize_status = */ FALSE, \
+ /* int32_t entry_ageout = */ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE \
+}
/*
* Library prototypes.
*/
@@ -338,31 +356,65 @@ H5_DLLVAR hid_t H5AC_rawdata_dxpl_id;
#define H5AC__TAKE_OWNERSHIP_FLAG H5C__TAKE_OWNERSHIP_FLAG
#define H5AC__FLUSH_LAST_FLAG H5C__FLUSH_LAST_FLAG
#define H5AC__FLUSH_COLLECTIVELY_FLAG H5C__FLUSH_COLLECTIVELY_FLAG
-#define H5AC__EVICT_ALLOW_LAST_PINS_FLAG H5C__EVICT_ALLOW_LAST_PINS_FLAG
/* #defines of flags used to report entry status in the
* H5AC_get_entry_status() call.
*/
-#define H5AC_ES__IN_CACHE 0x0001
-#define H5AC_ES__IS_DIRTY 0x0002
-#define H5AC_ES__IS_PROTECTED 0x0004
-#define H5AC_ES__IS_PINNED 0x0008
-#define H5AC_ES__IS_FLUSH_DEP_PARENT 0x0010
-#define H5AC_ES__IS_FLUSH_DEP_CHILD 0x0020
-#define H5AC_ES__IS_CORKED 0x0040
+#define H5AC_ES__IN_CACHE 0x0001
+#define H5AC_ES__IS_DIRTY 0x0002
+#define H5AC_ES__IS_PROTECTED 0x0004
+#define H5AC_ES__IS_PINNED 0x0008
+#define H5AC_ES__IS_FLUSH_DEP_PARENT 0x0010
+#define H5AC_ES__IS_FLUSH_DEP_CHILD 0x0020
+#define H5AC_ES__IS_CORKED 0x0040
+#define H5AC_ES__IMAGE_IS_UP_TO_DATE 0x0080
+
+/* Metadata entry class declarations */
+H5_DLLVAR const H5AC_class_t H5AC_BT[1];
+H5_DLLVAR const H5AC_class_t H5AC_SNODE[1];
+H5_DLLVAR const H5AC_class_t H5AC_LHEAP_PRFX[1];
+H5_DLLVAR const H5AC_class_t H5AC_LHEAP_DBLK[1];
+H5_DLLVAR const H5AC_class_t H5AC_GHEAP[1];
+H5_DLLVAR const H5AC_class_t H5AC_OHDR[1];
+H5_DLLVAR const H5AC_class_t H5AC_OHDR_CHK[1];
+H5_DLLVAR const H5AC_class_t H5AC_BT2_HDR[1];
+H5_DLLVAR const H5AC_class_t H5AC_BT2_INT[1];
+H5_DLLVAR const H5AC_class_t H5AC_BT2_LEAF[1];
+H5_DLLVAR const H5AC_class_t H5AC_FHEAP_HDR[1];
+H5_DLLVAR const H5AC_class_t H5AC_FHEAP_DBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_FHEAP_IBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_FSPACE_HDR[1];
+H5_DLLVAR const H5AC_class_t H5AC_FSPACE_SINFO[1];
+H5_DLLVAR const H5AC_class_t H5AC_SOHM_TABLE[1];
+H5_DLLVAR const H5AC_class_t H5AC_SOHM_LIST[1];
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_HDR[1];
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_IBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_SBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1];
+H5_DLLVAR const H5AC_class_t H5AC_FARRAY_HDR[1];
+H5_DLLVAR const H5AC_class_t H5AC_FARRAY_DBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1];
+H5_DLLVAR const H5AC_class_t H5AC_SUPERBLOCK[1];
+H5_DLLVAR const H5AC_class_t H5AC_DRVRINFO[1];
+H5_DLLVAR const H5AC_class_t H5AC_EPOCH_MARKER[1];
+H5_DLLVAR const H5AC_class_t H5AC_PROXY_ENTRY[1];
+H5_DLLVAR const H5AC_class_t H5AC_PREFETCHED_ENTRY[1];
/* external function declarations: */
H5_DLL herr_t H5AC_init(void);
-H5_DLL herr_t H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr);
+H5_DLL herr_t H5AC_create(const H5F_t *f, H5AC_cache_config_t *config_ptr,
+ H5AC_cache_image_config_t * image_config_ptr);
H5_DLL herr_t H5AC_get_entry_status(const H5F_t *f, haddr_t addr,
unsigned *status_ptr);
H5_DLL herr_t H5AC_insert_entry(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
haddr_t addr, void *thing, unsigned int flags);
H5_DLL herr_t H5AC_pin_protected_entry(void *thing);
+H5_DLL herr_t H5AC_prep_for_file_close(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5AC_create_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL void * H5AC_protect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
haddr_t addr, void *udata, unsigned flags);
@@ -374,6 +426,8 @@ H5_DLL herr_t H5AC_unprotect(H5F_t *f, hid_t dxpl_id, const H5AC_class_t *type,
H5_DLL herr_t H5AC_flush(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5AC_mark_entry_dirty(void *thing);
H5_DLL herr_t H5AC_mark_entry_clean(void *thing);
+H5_DLL herr_t H5AC_mark_entry_unserialized(void *thing);
+H5_DLL herr_t H5AC_mark_entry_serialized(void *thing);
H5_DLL herr_t H5AC_move_entry(H5F_t *f, const H5AC_class_t *type,
haddr_t old_addr, haddr_t new_addr, hid_t dxpl_id);
H5_DLL herr_t H5AC_dest(H5F_t *f, hid_t dxpl_id);
@@ -384,13 +438,22 @@ H5_DLL herr_t H5AC_remove_entry(void *entry);
H5_DLL herr_t H5AC_get_cache_auto_resize_config(const H5AC_t * cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_get_cache_size(H5AC_t *cache_ptr, size_t *max_size_ptr,
- size_t *min_clean_size_ptr, size_t *cur_size_ptr, int32_t *cur_num_entries_ptr);
+ size_t *min_clean_size_ptr, size_t *cur_size_ptr, uint32_t *cur_num_entries_ptr);
H5_DLL herr_t H5AC_get_cache_hit_rate(H5AC_t *cache_ptr, double *hit_rate_ptr);
H5_DLL herr_t H5AC_reset_cache_hit_rate_stats(H5AC_t *cache_ptr);
H5_DLL herr_t H5AC_set_cache_auto_resize_config(H5AC_t *cache_ptr,
H5AC_cache_config_t *config_ptr);
H5_DLL herr_t H5AC_validate_config(H5AC_cache_config_t *config_ptr);
+/* Cache image routines */
+H5_DLL herr_t H5AC_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr,
+ hsize_t len, hbool_t rw);
+H5_DLL herr_t H5AC_validate_cache_image_config(H5AC_cache_image_config_t *config_ptr);
+H5_DLL hbool_t H5AC_cache_image_pending(const H5F_t *f);
+H5_DLL herr_t H5AC_force_cache_image_load(H5F_t * f, hid_t dxpl_id);
+H5_DLL herr_t H5AC_get_mdc_image_info(H5AC_t *cache_ptr, haddr_t *image_addr,
+ hsize_t *image_len);
+
/* Tag & Ring routines */
H5_DLL herr_t H5AC_tag(hid_t dxpl_id, haddr_t metadata_tag, haddr_t *prev_tag);
H5_DLL herr_t H5AC_flush_tagged_metadata(H5F_t * f, haddr_t metadata_tag, hid_t dxpl_id);
@@ -402,6 +465,8 @@ H5_DLL herr_t H5AC_get_entry_ring(const H5F_t *f, haddr_t addr, H5AC_ring_t *rin
H5_DLL herr_t H5AC_set_ring(hid_t dxpl_id, H5AC_ring_t ring, H5P_genplist_t **dxpl,
H5AC_ring_t *orig_ring);
H5_DLL herr_t H5AC_reset_ring(H5P_genplist_t *dxpl, H5AC_ring_t orig_ring);
+H5_DLL herr_t H5AC_unsettle_entry_ring(void *entry);
+H5_DLL herr_t H5AC_unsettle_ring(H5F_t * f, H5AC_ring_t ring);
H5_DLL herr_t H5AC_expunge_tag_type_metadata(H5F_t *f, hid_t dxpl_id, haddr_t tag, int type_id, unsigned flags);
H5_DLL herr_t H5AC_get_tag(const void *thing, /*OUT*/ haddr_t *tag);
@@ -420,7 +485,18 @@ H5_DLL herr_t H5AC_add_candidate(H5AC_t * cache_ptr, haddr_t addr);
/* Debugging functions */
H5_DLL herr_t H5AC_stats(const H5F_t *f);
+#ifndef NDEBUG
H5_DLL herr_t H5AC_dump_cache(const H5F_t *f);
+H5_DLL herr_t H5AC_get_entry_ptr_from_addr(const H5F_t *f, haddr_t addr,
+ void **entry_ptr_ptr);
+H5_DLL herr_t H5AC_flush_dependency_exists(H5F_t *f, haddr_t parent_addr,
+ haddr_t child_addr, hbool_t *fd_exists_ptr);
+H5_DLL herr_t H5AC_verify_entry_type(const H5F_t *f, haddr_t addr,
+ const H5AC_class_t *expected_type, hbool_t *in_cache_ptr,
+ hbool_t *type_ok_ptr);
+H5_DLL hbool_t H5AC_get_serialization_in_progress(H5F_t *f);
+H5_DLL hbool_t H5AC_cache_is_clean(const H5F_t *f, H5AC_ring_t inner_ring);
+#endif /* NDEBUG */ /* end debugging functions */
#endif /* !_H5ACprivate_H */
diff --git a/src/H5ACproxy_entry.c b/src/H5ACproxy_entry.c
index 66aacb3..105a531 100644
--- a/src/H5ACproxy_entry.c
+++ b/src/H5ACproxy_entry.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -312,6 +310,10 @@ H5AC_proxy_entry_add_child(H5AC_proxy_entry_t *pentry, H5F_t *f, hid_t dxpl_id,
if(H5AC_mark_entry_clean(pentry) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTCLEAN, FAIL, "can't mark proxy entry clean")
+ /* Proxies start out serialized (insertions are automatically marked unserialized) */
+ if(H5AC_mark_entry_serialized(pentry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "can't mark proxy entry clean")
+
/* If there are currently parents, iterate over the list of parents, creating flush dependency on them */
if(pentry->parents)
if(H5SL_iterate(pentry->parents, H5AC__proxy_entry_add_child_cb, pentry) < 0)
@@ -438,6 +440,7 @@ H5AC_proxy_entry_dest(H5AC_proxy_entry_t *pentry)
HDassert(NULL == pentry->parents);
HDassert(0 == pentry->nchildren);
HDassert(0 == pentry->ndirty_children);
+ HDassert(0 == pentry->nunser_children);
/* Free the proxy entry object */
pentry = H5FL_FREE(H5AC_proxy_entry_t, pentry);
@@ -549,6 +552,7 @@ H5AC__proxy_entry_notify(H5AC_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
/* Sanity checks */
HDassert(0 == pentry->ndirty_children);
+ HDassert(0 == pentry->nunser_children);
/* No action */
break;
@@ -590,6 +594,29 @@ H5AC__proxy_entry_notify(H5AC_notify_action_t action, void *_thing)
HGOTO_ERROR(H5E_CACHE, H5E_CANTCLEAN, FAIL, "can't mark proxy entry clean")
break;
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ /* Increment # of unserialized children */
+ pentry->nunser_children++;
+
+ /* Check for first unserialized child */
+ if(1 == pentry->nunser_children)
+ if(H5AC_mark_entry_unserialized(pentry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNSERIALIZE, FAIL, "can't mark proxy entry unserialized")
+ break;
+
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
+ /* Sanity check */
+ HDassert(pentry->nunser_children > 0);
+
+ /* Decrement # of unserialized children */
+ pentry->nunser_children--;
+
+ /* Check for last unserialized child */
+ if(0 == pentry->nunser_children)
+ if(H5AC_mark_entry_serialized(pentry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "can't mark proxy entry serialized")
+ break;
+
default:
#ifdef NDEBUG
HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown notify action from metadata cache")
diff --git a/src/H5ACpublic.h b/src/H5ACpublic.h
index dd16764..654a877 100644
--- a/src/H5ACpublic.h
+++ b/src/H5ACpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -70,65 +68,65 @@ extern "C" {
* version number, or an error will be flagged.
*
* rpt_fcn_enabled: Boolean field used to enable and disable the default
- * reporting function. This function is invoked every time the
- * automatic cache resize code is run, and reports on its activities.
+ * reporting function. This function is invoked every time the
+ * automatic cache resize code is run, and reports on its activities.
*
- * This is a debugging function, and should normally be turned off.
+ * This is a debugging function, and should normally be turned off.
*
* open_trace_file: Boolean field indicating whether the trace_file_name
- * field should be used to open a trace file for the cache.
+ * field should be used to open a trace file for the cache.
*
* *** DEPRECATED *** Use H5Fstart/stop logging functions instead
*
- * The trace file is a debuging feature that allow the capture of
- * top level metadata cache requests for purposes of debugging and/or
- * optimization. This field should normally be set to FALSE, as
- * trace file collection imposes considerable overhead.
+ * The trace file is a debuging feature that allow the capture of
+ * top level metadata cache requests for purposes of debugging and/or
+ * optimization. This field should normally be set to FALSE, as
+ * trace file collection imposes considerable overhead.
*
- * This field should only be set to TRUE when the trace_file_name
- * contains the full path of the desired trace file, and either
- * there is no open trace file on the cache, or the close_trace_file
- * field is also TRUE.
+ * This field should only be set to TRUE when the trace_file_name
+ * contains the full path of the desired trace file, and either
+ * there is no open trace file on the cache, or the close_trace_file
+ * field is also TRUE.
*
* close_trace_file: Boolean field indicating whether the current trace
- * file (if any) should be closed.
+ * file (if any) should be closed.
*
* *** DEPRECATED *** Use H5Fstart/stop logging functions instead
*
- * See the above comments on the open_trace_file field. This field
- * should be set to FALSE unless there is an open trace file on the
- * cache that you wish to close.
+ * See the above comments on the open_trace_file field. This field
+ * should be set to FALSE unless there is an open trace file on the
+ * cache that you wish to close.
*
* trace_file_name: Full path of the trace file to be opened if the
- * open_trace_file field is TRUE.
+ * open_trace_file field is TRUE.
*
* *** DEPRECATED *** Use H5Fstart/stop logging functions instead
*
- * In the parallel case, an ascii representation of the mpi rank of
- * the process will be appended to the file name to yield a unique
- * trace file name for each process.
+ * In the parallel case, an ascii representation of the mpi rank of
+ * the process will be appended to the file name to yield a unique
+ * trace file name for each process.
*
- * The length of the path must not exceed H5AC__MAX_TRACE_FILE_NAME_LEN
- * characters.
+ * The length of the path must not exceed H5AC__MAX_TRACE_FILE_NAME_LEN
+ * characters.
*
* evictions_enabled: Boolean field used to either report the current
- * evictions enabled status of the cache, or to set the cache's
- * evictions enabled status.
- *
- * In general, the metadata cache should always be allowed to
- * evict entries. However, in some cases it is advantageous to
- * disable evictions briefly, and thereby postpone metadata
- * writes. However, this must be done with care, as the cache
- * can grow quickly. If you do this, re-enable evictions as
- * soon as possible and monitor cache size.
- *
- * At present, evictions can only be disabled if automatic
- * cache resizing is also disabled (that is, ( incr_mode ==
- * H5C_incr__off ) && ( decr_mode == H5C_decr__off )). There
- * is no logical reason why this should be so, but it simplifies
- * implementation and testing, and I can't think of any reason
- * why it would be desireable. If you can think of one, I'll
- * revisit the issue.
+ * evictions enabled status of the cache, or to set the cache's
+ * evictions enabled status.
+ *
+ * In general, the metadata cache should always be allowed to
+ * evict entries. However, in some cases it is advantageous to
+ * disable evictions briefly, and thereby postpone metadata
+ * writes. However, this must be done with care, as the cache
+ * can grow quickly. If you do this, re-enable evictions as
+ * soon as possible and monitor cache size.
+ *
+ * At present, evictions can only be disabled if automatic
+ * cache resizing is also disabled (that is, ( incr_mode ==
+ * H5C_incr__off ) && ( decr_mode == H5C_decr__off )). There
+ * is no logical reason why this should be so, but it simplifies
+ * implementation and testing, and I can't think of any reason
+ * why it would be desireable. If you can think of one, I'll
+ * revisit the issue.
*
* set_initial_size: Boolean flag indicating whether the size of the
* initial size of the cache is to be set to the value given in
@@ -368,80 +366,80 @@ extern "C" {
*
* PHDF5 uses several strategies to prevent such inconsistencies in metadata,
* all of which use the fact that the same stream of dirty metadata is seen
- * by all processes for purposes of synchronization. This is done by
+ * by all processes for purposes of synchronization. This is done by
* having each process count the number of bytes of dirty metadata generated,
- * and then running a "sync point" whenever this count exceeds a user
+ * and then running a "sync point" whenever this count exceeds a user
* specified threshold (see dirty_bytes_threshold below).
*
- * The current metadata write strategy is indicated by the
+ * The current metadata write strategy is indicated by the
* metadata_write_strategy field. The possible values of this field, along
* with the associated metadata write strategies are discussed below.
*
* dirty_bytes_threshold: Threshold of dirty byte creation used to
- * synchronize updates between caches. (See above for outline and
- * motivation.)
+ * synchronize updates between caches. (See above for outline and
+ * motivation.)
*
- * This value MUST be consistant across all processes accessing the
- * file. This field is ignored unless HDF5 has been compiled for
- * parallel.
+ * This value MUST be consistant across all processes accessing the
+ * file. This field is ignored unless HDF5 has been compiled for
+ * parallel.
*
* metadata_write_strategy: Integer field containing a code indicating the
- * desired metadata write strategy. The valid values of this field
- * are enumerated and discussed below:
+ * desired metadata write strategy. The valid values of this field
+ * are enumerated and discussed below:
*
*
- * H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ * H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
*
- * When metadata_write_strategy is set to this value, only process
- * zero is allowed to write dirty metadata to disk. All other
- * processes must retain dirty metadata until they are informed at
- * a sync point that the dirty metadata in question has been written
- * to disk.
+ * When metadata_write_strategy is set to this value, only process
+ * zero is allowed to write dirty metadata to disk. All other
+ * processes must retain dirty metadata until they are informed at
+ * a sync point that the dirty metadata in question has been written
+ * to disk.
*
- * When the sync point is reached (or when there is a user generated
- * flush), process zero flushes sufficient entries to bring it into
- * complience with its min clean size (or flushes all dirty entries in
- * the case of a user generated flush), broad casts the list of
- * entries just cleaned to all the other processes, and then exits
- * the sync point.
+ * When the sync point is reached (or when there is a user generated
+ * flush), process zero flushes sufficient entries to bring it into
+ * complience with its min clean size (or flushes all dirty entries in
+ * the case of a user generated flush), broad casts the list of
+ * entries just cleaned to all the other processes, and then exits
+ * the sync point.
*
- * Upon receipt of the broadcast, the other processes mark the indicated
- * entries as clean, and leave the sync point as well.
+ * Upon receipt of the broadcast, the other processes mark the indicated
+ * entries as clean, and leave the sync point as well.
*
*
- * H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ * H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
*
- * In the distributed metadata write strategy, process zero still makes
- * the decisions as to what entries should be flushed, but the actual
- * flushes are distributed across the processes in the computation to
- * the extent possible.
+ * In the distributed metadata write strategy, process zero still makes
+ * the decisions as to what entries should be flushed, but the actual
+ * flushes are distributed across the processes in the computation to
+ * the extent possible.
*
- * In this strategy, when a sync point is triggered (either by dirty
- * metadata creation or manual flush), all processes enter a barrier.
+ * In this strategy, when a sync point is triggered (either by dirty
+ * metadata creation or manual flush), all processes enter a barrier.
*
- * On the other side of the barrier, process 0 constructs an ordered
- * list of the entries to be flushed, and then broadcasts this list
- * to the caches in all the processes.
+ * On the other side of the barrier, process 0 constructs an ordered
+ * list of the entries to be flushed, and then broadcasts this list
+ * to the caches in all the processes.
*
- * All processes then scan the list of entries to be flushed, flushing
- * some, and marking the rest as clean. The algorithm for this purpose
- * ensures that each entry in the list is flushed exactly once, and
- * all are marked clean in each cache.
+ * All processes then scan the list of entries to be flushed, flushing
+ * some, and marking the rest as clean. The algorithm for this purpose
+ * ensures that each entry in the list is flushed exactly once, and
+ * all are marked clean in each cache.
*
- * Note that in the case of a flush of the cache, no message passing
- * is necessary, as all processes have the same list of dirty entries,
- * and all of these entries must be flushed. Thus in this case it is
- * sufficient for each process to sort its list of dirty entries after
- * leaving the initial barrier, and use this list as if it had been
- * received from process zero.
+ * Note that in the case of a flush of the cache, no message passing
+ * is necessary, as all processes have the same list of dirty entries,
+ * and all of these entries must be flushed. Thus in this case it is
+ * sufficient for each process to sort its list of dirty entries after
+ * leaving the initial barrier, and use this list as if it had been
+ * received from process zero.
+ *
+ * To avoid possible messages from the past/future, all caches must
+ * wait until all caches are done before leaving the sync point.
*
- * To avoid possible messages from the past/future, all caches must
- * wait until all caches are done before leaving the sync point.
- *
****************************************************************************/
-#define H5AC__CURR_CACHE_CONFIG_VERSION 1
-#define H5AC__MAX_TRACE_FILE_NAME_LEN 1024
+#define H5AC__CURR_CACHE_CONFIG_VERSION 1
+#define H5AC__MAX_TRACE_FILE_NAME_LEN 1024
#define H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY 0
#define H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED 1
@@ -451,9 +449,9 @@ typedef struct H5AC_cache_config_t
/* general configuration fields: */
int version;
- hbool_t rpt_fcn_enabled;
+ hbool_t rpt_fcn_enabled;
- hbool_t open_trace_file;
+ hbool_t open_trace_file;
hbool_t close_trace_file;
char trace_file_name[H5AC__MAX_TRACE_FILE_NAME_LEN + 1];
@@ -508,6 +506,67 @@ typedef struct H5AC_cache_config_t
} H5AC_cache_config_t;
+/****************************************************************************
+ *
+ * structure H5AC_cache_image_config_t
+ *
+ * H5AC_cache_image_ctl_t is a public structure intended for use in public
+ * APIs. At least in its initial incarnation, it is a copy of struct
+ * H5C_cache_image_ctl_t.
+ *
+ * The fields of the structure are discussed individually below:
+ *
+ * version: Integer field containing the version number of this version
+ * of the H5C_image_ctl_t structure. Any instance of
+ * H5C_image_ctl_t passed to the cache must have a known
+ * version number, or an error will be flagged.
+ *
+ * generate_image: Boolean flag indicating whether a cache image should
+ * be created on file close.
+ *
+ * save_resize_status: Boolean flag indicating whether the cache image
+ * should include the adaptive cache resize configuration and status.
+ * Note that this field is ignored at present.
+ *
+ * entry_ageout: Integer field indicating the maximum number of
+ * times a prefetched entry can appear in subsequent cache images.
+ * This field exists to allow the user to avoid the buildup of
+ * infrequently used entries in long sequences of cache images.
+ *
+ * The value of this field must lie in the range
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE (-1) to
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX (100).
+ *
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE means that no limit
+ * is imposed on number of times a prefeteched entry can appear
+ * in subsequent cache images.
+ *
+ * A value of 0 prevents prefetched entries from being included
+ * in cache images.
+ *
+ * Positive integers restrict prefetched entries to the specified
+ * number of appearances.
+ *
+ * Note that the number of subsequent cache images that a prefetched
+ * entry has appeared in is tracked in an 8 bit field. Thus, while
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX can be increased from its
+ * current value, any value in excess of 255 will be the functional
+ * equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
+ *
+ ****************************************************************************/
+
+#define H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION 1
+
+#define H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE -1
+#define H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX 100
+
+typedef struct H5AC_cache_image_config_t {
+ int version;
+ hbool_t generate_image;
+ hbool_t save_resize_status;
+ int entry_ageout;
+} H5AC_cache_image_config_t;
+
#ifdef __cplusplus
}
#endif
diff --git a/src/H5Abtree2.c b/src/H5Abtree2.c
index a9c77d2..ed67e0f 100644
--- a/src/H5Abtree2.c
+++ b/src/H5Abtree2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Adense.c b/src/H5Adense.c
index 3dc3a42..b1903a4 100644
--- a/src/H5Adense.c
+++ b/src/H5Adense.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Adeprec.c b/src/H5Adeprec.c
index a4713a7..9221254 100644
--- a/src/H5Adeprec.c
+++ b/src/H5Adeprec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Aint.c b/src/H5Aint.c
index e31cd83..160c7fb 100644
--- a/src/H5Aint.c
+++ b/src/H5Aint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Amodule.h b/src/H5Amodule.h
index e347fe1..8ed056b 100644
--- a/src/H5Amodule.h
+++ b/src/H5Amodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Apkg.h b/src/H5Apkg.h
index 5858147..6d5a83a 100644
--- a/src/H5Apkg.h
+++ b/src/H5Apkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Aprivate.h b/src/H5Aprivate.h
index 6b62692..b285920 100644
--- a/src/H5Aprivate.h
+++ b/src/H5Aprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Apublic.h b/src/H5Apublic.h
index 99ca90e..586940b 100644
--- a/src/H5Apublic.h
+++ b/src/H5Apublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Atest.c b/src/H5Atest.c
index 3dcca87..b923637 100644
--- a/src/H5Atest.c
+++ b/src/H5Atest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B.c b/src/H5B.c
index 2c49c81..e64a695 100644
--- a/src/H5B.c
+++ b/src/H5B.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2.c b/src/H5B2.c
index e9f6150..7e679cd 100644
--- a/src/H5B2.c
+++ b/src/H5B2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2cache.c b/src/H5B2cache.c
index 6954e6c..2e1d37b 100644
--- a/src/H5B2cache.c
+++ b/src/H5B2cache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -479,6 +477,8 @@ H5B2__cache_hdr_notify(H5AC_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -905,6 +905,8 @@ H5B2__cache_int_notify(H5AC_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -1291,6 +1293,8 @@ H5B2__cache_leaf_notify(H5AC_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
diff --git a/src/H5B2dbg.c b/src/H5B2dbg.c
index 19ca89a..3890ae0 100644
--- a/src/H5B2dbg.c
+++ b/src/H5B2dbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2hdr.c b/src/H5B2hdr.c
index 251a33d..ab017c6 100644
--- a/src/H5B2hdr.c
+++ b/src/H5B2hdr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2int.c b/src/H5B2int.c
index 47100fd..c72bc98 100644
--- a/src/H5B2int.c
+++ b/src/H5B2int.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2internal.c b/src/H5B2internal.c
index e74ae59..1716c44 100644
--- a/src/H5B2internal.c
+++ b/src/H5B2internal.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2leaf.c b/src/H5B2leaf.c
index 4f8b8e6..d900761 100644
--- a/src/H5B2leaf.c
+++ b/src/H5B2leaf.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2module.h b/src/H5B2module.h
index 0fc30a5..35c982c 100644
--- a/src/H5B2module.h
+++ b/src/H5B2module.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5B2pkg.h b/src/H5B2pkg.h
index 7b1ec4d..e24d2eb 100644
--- a/src/H5B2pkg.h
+++ b/src/H5B2pkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -307,15 +305,6 @@ typedef struct H5B2_node_info_test_t {
/* Package Private Variables */
/*****************************/
-/* H5B2 header inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_BT2_HDR[1];
-
-/* H5B2 internal node inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_BT2_INT[1];
-
-/* H5B2 leaf node inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_BT2_LEAF[1];
-
/* Declare a free list to manage the H5B2_internal_t struct */
H5FL_EXTERN(H5B2_internal_t);
diff --git a/src/H5B2private.h b/src/H5B2private.h
index 161e25e..e4bbffa 100644
--- a/src/H5B2private.h
+++ b/src/H5B2private.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2public.h b/src/H5B2public.h
index 43ec5d7..6e0b964 100644
--- a/src/H5B2public.h
+++ b/src/H5B2public.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5B2stat.c b/src/H5B2stat.c
index da721c6..df99ad5 100644
--- a/src/H5B2stat.c
+++ b/src/H5B2stat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
diff --git a/src/H5B2test.c b/src/H5B2test.c
index aec2aba..c10e5a8 100644
--- a/src/H5B2test.c
+++ b/src/H5B2test.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5Bcache.c b/src/H5Bcache.c
index b2be829..a0a75c8 100644
--- a/src/H5Bcache.c
+++ b/src/H5Bcache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Bdbg.c b/src/H5Bdbg.c
index b22264d..3881b44 100644
--- a/src/H5Bdbg.c
+++ b/src/H5Bdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Bmodule.h b/src/H5Bmodule.h
index 6800b26..bc46752 100644
--- a/src/H5Bmodule.h
+++ b/src/H5Bmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Bpkg.h b/src/H5Bpkg.h
index 41e0951..14dce4f 100644
--- a/src/H5Bpkg.h
+++ b/src/H5Bpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -73,9 +71,6 @@ typedef struct H5B_cache_ud_t {
/* Package Private Variables */
/*****************************/
-/* H5B header inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_BT[1];
-
/* Declare a free list to manage the haddr_t sequence information */
H5FL_SEQ_EXTERN(haddr_t);
diff --git a/src/H5Bprivate.h b/src/H5Bprivate.h
index 02fb82c..cb038ec 100644
--- a/src/H5Bprivate.h
+++ b/src/H5Bprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Bpublic.h b/src/H5Bpublic.h
index 0016996..1764f61 100644
--- a/src/H5Bpublic.h
+++ b/src/H5Bpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5C.c b/src/H5C.c
index faf99ff..e6770ec 100644
--- a/src/H5C.c
+++ b/src/H5C.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -46,9 +44,9 @@
* - Change protect/unprotect to lock/unlock.
*
* - Flush entries in increasing address order in
- * H5C_make_space_in_cache().
+ * H5C__make_space_in_cache().
*
- * - Also in H5C_make_space_in_cache(), use high and low water marks
+ * - Also in H5C__make_space_in_cache(), use high and low water marks
* to reduce the number of I/O calls.
*
* - When flushing, attempt to combine contiguous entries to reduce
@@ -75,7 +73,7 @@
/****************/
#include "H5Cmodule.h" /* This source code file is part of the H5C module */
-#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_FRIEND /* suppress error about including H5Fpkg */
/***********/
@@ -136,15 +134,12 @@ static herr_t H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr);
static herr_t H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr);
static herr_t H5C__flash_increase_cache_size(H5C_t * cache_ptr,
- size_t old_entry_size,
- size_t new_entry_size);
+ size_t old_entry_size, size_t new_entry_size);
-static herr_t H5C_flush_invalidate_cache(const H5F_t * f,
- hid_t dxpl_id,
- unsigned flags);
+static herr_t H5C_flush_invalidate_cache(H5F_t *f, hid_t dxpl_id, unsigned flags);
-static herr_t H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id,
- H5C_ring_t ring, unsigned flags);
+static herr_t H5C_flush_invalidate_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned flags);
static herr_t H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring,
unsigned flags);
@@ -158,21 +153,17 @@ static void * H5C_load_entry(H5F_t * f,
haddr_t addr,
void * udata);
-static herr_t H5C_make_space_in_cache(H5F_t * f,
- hid_t dxpl_id,
- size_t space_needed,
- hbool_t write_permitted);
-
static herr_t H5C__mark_flush_dep_dirty(H5C_cache_entry_t * entry);
static herr_t H5C__mark_flush_dep_clean(H5C_cache_entry_t * entry);
+static herr_t H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring);
+static herr_t H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id,
+ H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr);
+
static herr_t H5C__verify_len_eoa(H5F_t *f, const H5C_class_t * type,
haddr_t addr, size_t *len, hbool_t actual);
-static herr_t H5C__generate_image(const H5F_t *f, H5C_t * cache_ptr, H5C_cache_entry_t *entry_ptr,
- hid_t dxpl_id);
-
#if H5C_DO_SLIST_SANITY_CHECKS
static hbool_t H5C_entry_in_skip_list(H5C_t * cache_ptr,
H5C_cache_entry_t *target_ptr);
@@ -246,7 +237,7 @@ H5C_t *
H5C_create(size_t max_cache_size,
size_t min_clean_size,
int max_type_id,
- const char * (* type_name_table_ptr),
+ const H5C_class_t * const * class_table_ptr,
H5C_write_permitted_func_t check_write_permitted,
hbool_t write_permitted,
H5C_log_flush_func_t log_flush,
@@ -264,21 +255,21 @@ H5C_create(size_t max_cache_size,
HDassert( max_type_id >= 0 );
HDassert( max_type_id < H5C__MAX_NUM_TYPE_IDS );
- HDassert( type_name_table_ptr );
+ HDassert( class_table_ptr );
for ( i = 0; i <= max_type_id; i++ ) {
- HDassert( (type_name_table_ptr)[i] );
- HDassert( HDstrlen(( type_name_table_ptr)[i]) > 0 );
- }
+ HDassert( (class_table_ptr)[i] );
+ HDassert(HDstrlen((class_table_ptr)[i]->name) > 0);
+ } /* end for */
if(NULL == (cache_ptr = H5FL_CALLOC(H5C_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
if(NULL == (cache_ptr->slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list")
if(NULL == (cache_ptr->tag_list = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list for tagged entry addresses.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, NULL, "can't create skip list for tagged entry addresses")
/* If we get this far, we should succeed. Go ahead and initialize all
* the fields.
@@ -300,7 +291,7 @@ H5C_create(size_t max_cache_size,
cache_ptr->max_type_id = max_type_id;
- cache_ptr->type_name_table_ptr = type_name_table_ptr;
+ cache_ptr->class_table_ptr = class_table_ptr;
cache_ptr->max_cache_size = max_cache_size;
cache_ptr->min_clean_size = min_clean_size;
@@ -311,6 +302,7 @@ H5C_create(size_t max_cache_size,
cache_ptr->log_flush = log_flush;
cache_ptr->evictions_enabled = TRUE;
+ cache_ptr->close_warning_received = FALSE;
cache_ptr->index_len = 0;
cache_ptr->index_size = (size_t)0;
@@ -327,6 +319,14 @@ H5C_create(size_t max_cache_size,
cache_ptr->slist_ring_size[i] = (size_t)0;
} /* end for */
+ for(i = 0; i < H5C__HASH_TABLE_LEN; i++)
+ (cache_ptr->index)[i] = NULL;
+
+ cache_ptr->il_len = 0;
+ cache_ptr->il_size = (size_t)0;
+ cache_ptr->il_head = NULL;
+ cache_ptr->il_tail = NULL;
+
/* Tagging Field Initializations */
cache_ptr->ignore_tags = FALSE;
@@ -339,9 +339,6 @@ H5C_create(size_t max_cache_size,
cache_ptr->slist_size_increase = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- for(i = 0; i < H5C__HASH_TABLE_LEN; i++)
- (cache_ptr->index)[i] = NULL;
-
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
cache_ptr->entry_watched_for_removal = NULL;
@@ -386,6 +383,8 @@ H5C_create(size_t max_cache_size,
cache_ptr->resize_enabled = FALSE;
cache_ptr->cache_full = FALSE;
cache_ptr->size_decreased = FALSE;
+ cache_ptr->resize_in_progress = FALSE;
+ cache_ptr->msic_in_progress = FALSE;
(cache_ptr->resize_ctl).version = H5C__CURR_AUTO_SIZE_CTL_VER;
(cache_ptr->resize_ctl).rpt_fcn = NULL;
@@ -431,20 +430,52 @@ H5C_create(size_t max_cache_size,
((cache_ptr->epoch_markers)[i]).magic =
H5C__H5C_CACHE_ENTRY_T_MAGIC;
((cache_ptr->epoch_markers)[i]).addr = (haddr_t)i;
- ((cache_ptr->epoch_markers)[i]).type = &H5C__epoch_marker_class;
+ ((cache_ptr->epoch_markers)[i]).type = H5AC_EPOCH_MARKER;
}
- if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
+ /* Initialize cache image generation on file close related fields.
+ * Initial value of image_ctl must match H5C__DEFAULT_CACHE_IMAGE_CTL
+ * in H5Cprivate.h.
+ */
+ cache_ptr->image_ctl.version = H5C__CURR_CACHE_IMAGE_CTL_VER;
+ cache_ptr->image_ctl.generate_image = FALSE;
+ cache_ptr->image_ctl.save_resize_status = FALSE;
+ cache_ptr->image_ctl.entry_ageout = -1;
+ cache_ptr->image_ctl.flags = H5C_CI__ALL_FLAGS;
+
+ cache_ptr->serialization_in_progress= FALSE;
+ cache_ptr->load_image = FALSE;
+ cache_ptr->image_loaded = FALSE;
+ cache_ptr->delete_image = FALSE;
+ cache_ptr->image_addr = HADDR_UNDEF;
+ cache_ptr->image_len = 0;
+ cache_ptr->image_data_len = 0;
+
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+ cache_ptr->entry_fd_height_change_counter = 0;
+
+ cache_ptr->num_entries_in_image = 0;
+ cache_ptr->image_entries = NULL;
+ cache_ptr->image_buffer = NULL;
+
+ /* initialize free space manager related fields: */
+ cache_ptr->rdfsm_settled = FALSE;
+ cache_ptr->mdfsm_settled = FALSE;
+ if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
/* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
- "H5C_reset_cache_hit_rate_stats failed.")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "H5C_reset_cache_hit_rate_stats failed")
H5C_stats__reset(cache_ptr);
cache_ptr->prefix[0] = '\0'; /* empty string */
+#ifndef NDEBUG
+ cache_ptr->get_entry_ptr_from_addr_counter = 0;
+#endif /* NDEBUG */
+
/* Set return value */
ret_value = cache_ptr;
@@ -681,6 +712,101 @@ H5C_free_tag_list_cb(void *_item, void H5_ATTR_UNUSED *key, void H5_ATTR_UNUSED
/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_prep_for_file_close
+ *
+ * Purpose: This function should be called just prior to the cache
+ * flushes at file close. There should be no protected
+ * entries in the cache at this point.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/3/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_prep_for_file_close(H5F_t *f, hid_t dxpl_id)
+{
+ H5C_t * cache_ptr;
+ hbool_t image_generated = FALSE; /* Whether a cache image was generated */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* For now at least, it is possible to receive the
+ * close warning more than once -- the following
+ * if statement handles this.
+ */
+ if(cache_ptr->close_warning_received)
+ HGOTO_DONE(SUCCEED)
+ cache_ptr->close_warning_received = TRUE;
+
+ /* Make certain there aren't any protected entries */
+ HDassert(cache_ptr->pl_len == 0);
+
+ /* Prepare cache image */
+ if(H5C__prep_image_for_file_close(f, dxpl_id, &image_generated) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create cache image")
+
+#ifdef H5_HAVE_PARALLEL
+ if ( ( H5F_INTENT(f) & H5F_ACC_RDWR ) &&
+ ( ! image_generated ) &&
+ ( cache_ptr->aux_ptr != NULL ) &&
+ ( f->shared->fs_persist ) ) {
+ /* If persistent free space managers are enabled, flushing the
+ * metadata cache may result in the deletion, insertion, and/or
+ * dirtying of entries.
+ *
+ * This is a problem in PHDF5, as it breaks two invariants of
+ * our management of the metadata cache across all processes:
+ *
+ * 1) Entries will not be dirtied, deleted, inserted, or moved
+ * during flush in the parallel case.
+ *
+ * 2) All processes contain the same set of dirty metadata
+ * entries on entry to a sync point.
+ *
+ * To solve this problem for the persistent free space managers,
+ * serialize the metadata cache on all processes prior to the
+ * first sync point on file shutdown. The shutdown warning is
+ * a convenient location for this call.
+ *
+ * This is sufficient since:
+ *
+ * 1) FSM settle routines are only invoked on file close. Since
+ * serialization make the same settle calls as flush on file
+ * close, and since the close warning is issued after all
+ * non FSM related space allocations and just before the
+ * first sync point on close, this call will leave the caches
+ * in a consistant state across the processes if they were
+ * consistant before.
+ *
+ * 2) Since the FSM settle routines are only invoked once during
+ * file close, invoking them now will prevent their invocation
+ * during a flush, and thus avoid any resulting entrie dirties,
+ * deletions, insertion, or moves during the flush.
+ */
+ if(H5C__serialize_cache(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialization of the cache failed")
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_prep_for_file_close() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C_dest
*
* Purpose: Flush all data to disk and destroy the cache.
@@ -715,11 +841,22 @@ H5C_dest(H5F_t * f, hid_t dxpl_id)
/* Sanity check */
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->close_warning_received);
+
+#if H5AC_DUMP_IMAGE_STATS_ON_CLOSE
+ if(H5C_image_stats(cache_ptr, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't display cache image stats")
+#endif /* H5AC_DUMP_IMAGE_STATS_ON_CLOSE */
/* Flush and invalidate all cache entries */
if(H5C_flush_invalidate_cache(f, dxpl_id, H5C__NO_FLAGS_SET) < 0 )
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ /* Generate & write cache image if requested */
+ if(cache_ptr->image_ctl.generate_image)
+ if(H5C__generate_cache_image(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "Can't generate metadata cache image")
+
if(cache_ptr->slist_ptr != NULL) {
H5SL_close(cache_ptr->slist_ptr);
cache_ptr->slist_ptr = NULL;
@@ -731,6 +868,12 @@ H5C_dest(H5F_t * f, hid_t dxpl_id)
} /* end if */
#ifndef NDEBUG
+#if H5C_DO_SANITY_CHECKS
+ if(cache_ptr->get_entry_ptr_from_addr_counter > 0)
+ HDfprintf(stdout, "*** %ld calls to H5C_get_entry_ptr_from_add(). ***\n",
+ cache_ptr->get_entry_ptr_from_addr_counter);
+#endif /* H5C_DO_SANITY_CHECKS */
+
cache_ptr->magic = 0;
#endif /* NDEBUG */
@@ -756,14 +899,12 @@ done:
herr_t
H5C_evict(H5F_t * f, hid_t dxpl_id)
{
- H5C_t *cache_ptr = f->shared->cache;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
/* Sanity check */
- HDassert(cache_ptr);
- HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(f);
/* Flush and invalidate all cache entries except the pinned entries */
if(H5C_flush_invalidate_cache(f, dxpl_id, H5C__EVICT_ALLOW_LAST_PINS_FLAG) < 0 )
@@ -809,7 +950,7 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
#if H5C_DO_EXTREME_SANITY_CHECKS
if(H5C_validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* Look for entry in cache */
@@ -823,15 +964,9 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
/* Check for entry being pinned or protected */
if(entry_ptr->is_protected)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is protected")
if(entry_ptr->is_pinned)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned.")
-#ifdef H5_HAVE_PARALLEL
- if(entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-#endif /* H5_HAVE_PARALLEL */
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "Target entry is pinned")
/* If we get this far, call H5C__flush_single_entry() with the
* H5C__FLUSH_INVALIDATE_FLAG and the H5C__FLUSH_CLEAR_ONLY_FLAG.
@@ -850,7 +985,7 @@ H5C_expunge_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
if(H5C_validate_lru_list(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit.\n")
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "LRU extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -898,6 +1033,10 @@ done:
*
* JRM -- 8/30/15
*
+ * Modified function to call the free space manager
+ * settling functions.
+ * JRM -- 6/9/16
+ *
*-------------------------------------------------------------------------
*/
herr_t
@@ -905,12 +1044,12 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
{
#if H5C_DO_SANITY_CHECKS
int i;
- int32_t index_len = 0;
+ uint32_t index_len = 0;
size_t index_size = (size_t)0;
size_t clean_index_size = (size_t)0;
size_t dirty_index_size = (size_t)0;
size_t slist_size = (size_t)0;
- int32_t slist_len = 0;
+ uint32_t slist_len = 0;
#endif /* H5C_DO_SANITY_CHECKS */
H5C_ring_t ring;
H5C_t * cache_ptr;
@@ -957,7 +1096,7 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
(H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
(H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
@@ -969,7 +1108,7 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
if(destroy) {
if(H5C_flush_invalidate_cache(f, dxpl_id, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate failed")
} /* end if */
else {
/* flush each ring, starting from the outermost ring and
@@ -977,8 +1116,41 @@ H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
*/
ring = H5C_RING_USER;
while(ring < H5C_RING_NTYPES) {
+
+ /* Only call the free space manager settle routines when close
+ * warning has been received.
+ */
+ if(cache_ptr->close_warning_received) {
+ switch(ring) {
+ case H5C_RING_USER:
+ break;
+
+ case H5C_RING_RDFSM:
+ /* Settle raw data FSM */
+ if(!cache_ptr->rdfsm_settled)
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+ break;
+
+ case H5C_RING_MDFSM:
+ /* Settle metadata FSM */
+ if(!cache_ptr->mdfsm_settled)
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+ break;
+
+ case H5C_RING_SBE:
+ case H5C_RING_SB:
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
+ break;
+ } /* end switch */
+ } /* end if */
+
if(H5C_flush_ring(f, dxpl_id, ring, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush ring failed")
ring++;
} /* end while */
} /* end else */
@@ -1019,9 +1191,8 @@ H5C_flush_to_min_clean(H5F_t * f,
hid_t dxpl_id)
{
H5C_t * cache_ptr;
- herr_t result;
hbool_t write_permitted;
-#if 0 /* modified code -- commented out for now */
+#if 0 /* modified code -- commented out for now */ /* JRM */
int i;
int flushed_entries_count = 0;
size_t flushed_entries_size = 0;
@@ -1041,36 +1212,19 @@ H5C_flush_to_min_clean(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if ( cache_ptr->check_write_permitted != NULL ) {
-
- result = (cache_ptr->check_write_permitted)(f, &write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Can't get write_permitted")
- }
- } else {
-
+ if(cache_ptr->check_write_permitted != NULL) {
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't get write_permitted")
+ } /* end if */
+ else
write_permitted = cache_ptr->write_permitted;
- }
- if ( ! write_permitted ) {
+ if(!write_permitted)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cache write is not permitted!?!")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "cache write is not permitted!?!\n");
- }
#if 1 /* original code */
- result = H5C_make_space_in_cache(f,
- dxpl_id,
- (size_t)0,
- write_permitted);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_make_space_in_cache failed.")
- }
+ if(H5C__make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__make_space_in_cache failed")
#else /* modified code -- commented out for now */
if ( cache_ptr->max_cache_size > cache_ptr->index_size ) {
@@ -1108,12 +1262,8 @@ H5C_flush_to_min_clean(H5F_t * f,
*/
flushed_entries_list = (haddr_t *)H5MM_malloc(sizeof(haddr_t) *
(size_t)(cache_ptr->slist_len));
-
- if ( flushed_entries_list == NULL ) {
-
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, \
- "memory allocation failed for flushed entries list")
- }
+ if(flushed_entries_list == NULL)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flushed entries list")
/* Scan the dirty LRU list from tail forward and mark sufficient
* entries to free up the necessary space. Keep a list of the
@@ -1143,13 +1293,8 @@ H5C_flush_to_min_clean(H5F_t * f,
/* Flush the marked entries */
- result = H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id,
- H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG);
-
- if ( result < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_cache failed.")
- }
+ if(H5C_flush_cache(f, primary_dxpl_id, secondary_dxpl_id, H5C__FLUSH_MARKED_ENTRIES_FLAG | H5C__FLUSH_IGNORE_PROTECTED_FLAG) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_flush_cache failed")
/* Now touch up the LRU list so as to place the flushed entries in
* the order they they would be in if we had flushed them in the
@@ -1224,8 +1369,9 @@ H5C_insert_entry(H5F_t * f,
hbool_t set_flush_marker;
hbool_t write_permitted = TRUE;
size_t empty_space;
- H5C_cache_entry_t *entry_ptr;
+ H5C_cache_entry_t *entry_ptr = NULL;
H5C_cache_entry_t *test_entry_ptr;
+ hbool_t entry_tagged = FALSE;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1238,6 +1384,7 @@ H5C_insert_entry(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( type );
+ HDassert( type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type );
HDassert( type->image_len );
HDassert( H5F_addr_defined(addr) );
HDassert( thing );
@@ -1245,14 +1392,10 @@ H5C_insert_entry(H5F_t * f,
#if H5C_DO_EXTREME_SANITY_CHECKS
/* no need to verify that entry is not already in the index as */
/* we already make that check below. */
-
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on entry.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
set_flush_marker = ( (flags & H5C__SET_FLUSH_MARKER_FLAG) != 0 );
@@ -1277,9 +1420,9 @@ H5C_insert_entry(H5F_t * f,
if(test_entry_ptr != NULL) {
if(test_entry_ptr == entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "entry already in cache")
else
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "duplicate entry in cache")
} /* end if */
entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
@@ -1322,15 +1465,18 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->ring = ring;
- /* Initialize flush dependency height fields */
- entry_ptr->flush_dep_parent = NULL;
- entry_ptr->flush_dep_nparents = 0;
- entry_ptr->flush_dep_parent_nalloc = 0;
- entry_ptr->flush_dep_nchildren = 0;
- entry_ptr->flush_dep_ndirty_children = 0;
+ /* Initialize flush dependency fields */
+ entry_ptr->flush_dep_parent = NULL;
+ entry_ptr->flush_dep_nparents = 0;
+ entry_ptr->flush_dep_parent_nalloc = 0;
+ entry_ptr->flush_dep_nchildren = 0;
+ entry_ptr->flush_dep_ndirty_children = 0;
+ entry_ptr->flush_dep_nunser_children = 0;
entry_ptr->ht_next = NULL;
entry_ptr->ht_prev = NULL;
+ entry_ptr->il_next = NULL;
+ entry_ptr->il_prev = NULL;
entry_ptr->next = NULL;
entry_ptr->prev = NULL;
@@ -1343,16 +1489,38 @@ H5C_insert_entry(H5F_t * f,
entry_ptr->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
+ /* initialize cache image related fields */
+ entry_ptr->include_in_image = FALSE;
+ entry_ptr->lru_rank = 0;
+ entry_ptr->image_dirty = FALSE;
+ entry_ptr->fd_parent_count = 0;
+ entry_ptr->fd_parent_addrs = NULL;
+ entry_ptr->fd_child_count = 0;
+ entry_ptr->fd_dirty_child_count = 0;
+ entry_ptr->image_fd_height = 0;
+ entry_ptr->prefetched = FALSE;
+ entry_ptr->prefetch_type_id = 0;
+ entry_ptr->age = 0;
+ entry_ptr->prefetched_dirty = FALSE;
+#ifndef NDEBUG /* debugging field */
+ entry_ptr->serialization_count = 0;
+#endif /* NDEBUG */
+
+ entry_ptr->tl_next = NULL;
+ entry_ptr->tl_prev = NULL;
+ entry_ptr->tag_info = NULL;
+
/* Apply tag to newly inserted entry */
if(H5C__tag_entry(cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
+ entry_tagged = TRUE;
H5C__RESET_CACHE_ENTRY_STATS(entry_ptr)
if(cache_ptr->flash_size_increase_possible &&
(entry_ptr->size > cache_ptr->flash_size_increase_threshold))
if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__flash_increase_cache_size failed")
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
@@ -1382,7 +1550,7 @@ H5C_insert_entry(H5F_t * f,
/* Note that space_needed is just the amount of space that
* needed to insert the new entry without exceeding the cache
- * size limit. The subsequent call to H5C_make_space_in_cache()
+ * size limit. The subsequent call to H5C__make_space_in_cache()
* may evict the entries required to free more or less space
* depending on conditions. It MAY be less if the cache is
* currently undersized, or more if the cache is oversized.
@@ -1405,9 +1573,9 @@ H5C_insert_entry(H5F_t * f,
* no point in worrying about the third.
*/
- if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C_make_space_in_cache failed.")
- }
+ if(H5C__make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINS, FAIL, "H5C__make_space_in_cache failed")
+ } /* end if */
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -1418,10 +1586,10 @@ H5C_insert_entry(H5F_t * f,
H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, entry_ptr, FAIL)
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done.\n")
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed just before done")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* If the entry's type has a 'notify' callback send a 'after insertion'
@@ -1436,7 +1604,7 @@ H5C_insert_entry(H5F_t * f,
#ifdef H5_HAVE_PARALLEL
/* Get the dataset transfer property list */
if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list");
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a property list")
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI)) {
coll_access = (H5P_USER_TRUE == f->coll_md_read ? TRUE : FALSE);
@@ -1473,12 +1641,16 @@ H5C_insert_entry(H5F_t * f,
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit.\n")
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+ if(ret_value < 0 && entry_tagged)
+ if(H5C__untag_entry(cache_ptr, entry_ptr) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove entry from tag list")
+
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_insert_entry() */
@@ -1532,12 +1704,25 @@ H5C_mark_entry_dirty(void *thing)
/* set the dirtied flag */
entry_ptr->dirtied = TRUE;
- } else if ( entry_ptr->is_pinned ) {
+ /* reset image_up_to_date */
+ if(entry_ptr->image_up_to_date) {
+ entry_ptr->image_up_to_date = FALSE;
+
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+ }/* end if */
+ } /* end if */
+ else if ( entry_ptr->is_pinned ) {
hbool_t was_clean; /* Whether the entry was previously clean */
+ hbool_t image_was_up_to_date;
/* Remember previous dirty status */
was_clean = !entry_ptr->is_dirty;
+ /* Check if image is up to date */
+ image_was_up_to_date = entry_ptr->image_up_to_date;
+
/* Mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
entry_ptr->image_up_to_date = FALSE;
@@ -1565,6 +1750,10 @@ H5C_mark_entry_dirty(void *thing)
if(H5C__mark_flush_dep_dirty(entry_ptr) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep dirty flag")
} /* end if */
+ if(image_was_up_to_date)
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
} /* end if */
else
HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Entry is neither pinned nor protected??")
@@ -1654,6 +1843,99 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5C_mark_entry_unserialized
+ *
+ * Purpose: Mark a pinned or protected entry as unserialized. The target
+ * entry MUST be either pinned or protected, and MAY be both.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 12/23/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_mark_entry_unserialized(void *thing)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(H5F_addr_defined(entry->addr));
+
+ if(entry->is_protected || entry->is_pinned) {
+ HDassert(!entry->is_read_only);
+
+ /* Reset image_up_to_date */
+ if(entry->image_up_to_date) {
+ entry->image_up_to_date = FALSE;
+
+ if(entry->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_unserialized(entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSET, FAIL, "Can't propagate serialization status to fd parents")
+ }/* end if */
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKUNSERIALIZED, FAIL, "Entry to unserialize is neither pinned nor protected??")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_mark_entry_unserialized() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_mark_entry_serialized
+ *
+ * Purpose: Mark a pinned entry as serialized. The target entry MUST be
+ * pinned.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * 12/23/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_mark_entry_serialized(void *_thing)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_thing;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(H5F_addr_defined(entry->addr));
+
+ /* Operate on pinned entry */
+ if(entry->is_protected)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "entry is protected")
+ else if(entry->is_pinned) {
+ /* Check for entry changing status and do notifications, etc. */
+ if(!entry->image_up_to_date) {
+ /* Set the image_up_to_date flag */
+ entry->image_up_to_date = TRUE;
+
+ /* Propagate the serialize up the flush dependency chain, if appropriate */
+ if(entry->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_serialized(entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Can't propagate flush dep serialize")
+ } /* end if */
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKSERIALIZED, FAIL, "Entry is not pinned??")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_mark_entry_serialized() */
+
+
+/*-------------------------------------------------------------------------
*
* Function: H5C_move_entry
*
@@ -1690,7 +1972,7 @@ H5C_move_entry(H5C_t * cache_ptr,
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
(H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
(H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
H5C__SEARCH_INDEX(cache_ptr, old_addr, entry_ptr, FAIL)
@@ -1734,11 +2016,10 @@ H5C_move_entry(H5C_t * cache_ptr,
* don't mark it as dirty either, lest we confuse the flush call back.
*/
if(!entry_ptr->destroy_in_progress) {
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
if(entry_ptr->in_slist) {
HDassert(cache_ptr->slist_ptr);
-
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE)
} /* end if */
} /* end if */
@@ -1755,7 +2036,12 @@ H5C_move_entry(H5C_t * cache_ptr,
entry_ptr->is_dirty = TRUE;
/* This shouldn't be needed, but it keeps the test code happy */
- entry_ptr->image_up_to_date = FALSE;
+ if(entry_ptr->image_up_to_date) {
+ entry_ptr->image_up_to_date = FALSE;
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+ } /* end if */
/* Modify cache data structures */
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL)
@@ -1790,7 +2076,7 @@ done:
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
(H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
(H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit.\n")
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -1831,17 +2117,14 @@ H5C_resize_entry(void *thing, size_t new_size)
/* Check for usage errors */
if(new_size <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "New size is non-positive")
if(!(entry_ptr->is_pinned || entry_ptr->is_protected))
HGOTO_ERROR(H5E_CACHE, H5E_BADTYPE, FAIL, "Entry isn't pinned or protected??")
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on entry.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* update for change in entry size if necessary */
@@ -1853,7 +2136,14 @@ H5C_resize_entry(void *thing, size_t new_size)
/* mark the entry as dirty if it isn't already */
entry_ptr->is_dirty = TRUE;
- entry_ptr->image_up_to_date = FALSE;
+
+ /* Reset the image up-to-date status */
+ if(entry_ptr->image_up_to_date) {
+ entry_ptr->image_up_to_date = FALSE;
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+ } /* end if */
/* Release the current image */
if(entry_ptr->image_ptr)
@@ -1931,14 +2221,10 @@ H5C_resize_entry(void *thing, size_t new_size)
} /* end if */
done:
-
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on exit.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0))
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -2034,13 +2320,10 @@ H5C_pin_protected_entry(void *thing)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on entry.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2053,15 +2336,11 @@ H5C_pin_protected_entry(void *thing)
HGOTO_ERROR(H5E_CACHE, H5E_CANTPIN, FAIL, "Can't pin entry by client")
done:
-
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on exit.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -2088,32 +2367,6 @@ done:
*
* Programmer: John Mainzer - 6/2/04
*
- * JRM -- 11/13/08
- * Modified function to call H5C_make_space_in_cache() when
- * the min_clean_size is violated, not just when there isn't
- * enough space for and entry that has just been loaded.
- *
- * The purpose of this modification is to avoid "metadata
- * blizzards" in the write only case. In such instances,
- * the cache was allowed to fill with dirty metadata. When
- * we finally needed to evict an entry to make space, we had
- * to flush out a whole cache full of metadata -- which has
- * interesting performance effects. We hope to avoid (or
- * perhaps more accurately hide) this effect by maintaining
- * the min_clean_size, which should force us to start flushing
- * entries long before we actually have to evict something
- * to make space.
- *
- * JRM -- 9/1/14
- * Replace the old rw parameter with the flags parameter.
- * This allows H5C_protect to accept flags other than
- * H5C__READ_ONLY_FLAG.
- *
- * Added support for the H5C__FLUSH_LAST_FLAG.
- * At present, this flag is only applied if the entry is
- * not in cache, and is loaded into the cache as a result of
- * this call.
- *
*-------------------------------------------------------------------------
*/
void *
@@ -2152,18 +2405,23 @@ H5C_protect(H5F_t * f,
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
HDassert( type );
+ HDassert( type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type );
HDassert( H5F_addr_defined(addr) );
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, \
- "an extreme sanity check failed on entry.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+ /* Load the cache image, if requested */
+ if(cache_ptr->load_image) {
+ cache_ptr->load_image = FALSE;
+ if(H5C__load_cache_image(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "Can't load cache image")
+ } /* end if */
+
read_only = ( (flags & H5C__READ_ONLY_FLAG) != 0 );
flush_last = ( (flags & H5C__FLUSH_LAST_FLAG) != 0 );
@@ -2193,9 +2451,24 @@ H5C_protect(H5F_t * f,
/* first check to see if the target is in cache */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, NULL)
- if ( entry_ptr != NULL ) {
+ if(entry_ptr != NULL) {
if(entry_ptr->ring != ring)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "ring type mismatch occured for cache entry")
+
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ if(entry_ptr->prefetched) {
+ /* This call removes the prefetched entry from the cache,
+ * and replaces it with an entry deserialized from the
+ * image of the prefetched entry.
+ */
+ if(H5C__deserialize_prefetched_entry(f, dxpl_id, cache_ptr, &entry_ptr, type, addr, udata) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't deserialize prefetched entry")
+
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(entry_ptr->addr == addr);
+ } /* end if */
/* Check for trying to load the wrong type of entry from an address */
if(entry_ptr->type != type)
@@ -2205,7 +2478,7 @@ H5C_protect(H5F_t * f,
marked as collective, and is clean, it is possible that
other processes will not have it in its cache and will
expect a bcast of the entry from process 0. So process 0
- will bcast the entry to all other ranks. Ranks that do have
+ will bcast the entry to all other ranks. Ranks that _do_ have
the entry in their cache still have to participate in the
bcast. */
#ifdef H5_HAVE_PARALLEL
@@ -2263,7 +2536,7 @@ H5C_protect(H5F_t * f,
/* Get the tag from the DXPL */
if((H5P_get(dxpl, H5AC_TAG_NAME, &tag)) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to query property value");
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "unable to query property value")
if(H5C_verify_tag(entry_ptr->type->id, tag) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, NULL, "tag verification failed")
@@ -2288,6 +2561,8 @@ H5C_protect(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, NULL, "can't load entry")
entry_ptr = (H5C_cache_entry_t *)thing;
+ cache_ptr->entries_loaded_counter++;
+
entry_ptr->ring = ring;
#ifdef H5_HAVE_PARALLEL
if(H5F_HAS_FEATURE(f, H5FD_FEAT_HAS_MPI) && entry_ptr->coll_access)
@@ -2305,7 +2580,7 @@ H5C_protect(H5F_t * f,
( entry_ptr->size > cache_ptr->flash_size_increase_threshold ) ) {
if(H5C__flash_increase_cache_size(cache_ptr, 0, entry_ptr->size) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__flash_increase_cache_size failed")
}
if(cache_ptr->index_size >= cache_ptr->max_cache_size)
@@ -2314,7 +2589,7 @@ H5C_protect(H5F_t * f,
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
/* try to free up if necceary and if evictions are permitted. Note
- * that if evictions are enabled, we will call H5C_make_space_in_cache()
+ * that if evictions are enabled, we will call H5C__make_space_in_cache()
* regardless if the min_free_space requirement is not met.
*/
if ( ( cache_ptr->evictions_enabled ) &&
@@ -2336,26 +2611,20 @@ H5C_protect(H5F_t * f,
HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 1")
else
have_write_permitted = TRUE;
- } else {
-
+ } /* end if */
+ else {
write_permitted = cache_ptr->write_permitted;
-
have_write_permitted = TRUE;
+ } /* end else */
- }
-
- HDassert( entry_ptr->size <= H5C_MAX_ENTRY_SIZE );
-
+ HDassert(entry_ptr->size <= H5C_MAX_ENTRY_SIZE);
space_needed = entry_ptr->size;
-
- if ( space_needed > cache_ptr->max_cache_size ) {
-
+ if(space_needed > cache_ptr->max_cache_size)
space_needed = cache_ptr->max_cache_size;
- }
/* Note that space_needed is just the amount of space that
* needed to insert the new entry without exceeding the cache
- * size limit. The subsequent call to H5C_make_space_in_cache()
+ * size limit. The subsequent call to H5C__make_space_in_cache()
* may evict the entries required to free more or less space
* depending on conditions. It MAY be less if the cache is
* currently undersized, or more if the cache is oversized.
@@ -2382,9 +2651,9 @@ H5C_protect(H5F_t * f,
* see no point in worrying about the fourth.
*/
- if(H5C_make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 1.")
- }
+ if(H5C__make_space_in_cache(f, dxpl_id, space_needed, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
+ } /* end if */
/* Insert the entry in the hash table. It can't be dirty yet, so
* we don't even check to see if it should go in the skip list.
@@ -2422,38 +2691,31 @@ H5C_protect(H5F_t * f,
/* Record that the entry was loaded, to trigger a notify callback later */
/* (After the entry is fully added to the cache) */
was_loaded = TRUE;
- }
-
- HDassert( entry_ptr->addr == addr );
- HDassert( entry_ptr->type == type );
-
- if ( entry_ptr->is_protected ) {
-
- if ( ( read_only ) && ( entry_ptr->is_read_only ) ) {
+ } /* end else */
- HDassert( entry_ptr->ro_ref_count > 0 );
+ HDassert(entry_ptr->addr == addr);
+ HDassert(entry_ptr->type == type);
+ if(entry_ptr->is_protected) {
+ if(read_only && entry_ptr->is_read_only) {
+ HDassert(entry_ptr->ro_ref_count > 0);
(entry_ptr->ro_ref_count)++;
-
- } else {
-
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, \
- "Target already protected & not read only?!?.")
- }
- } else {
-
+ } /* end if */
+ else
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Target already protected & not read only?!?")
+ } /* end if */
+ else {
H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, entry_ptr, NULL)
entry_ptr->is_protected = TRUE;
if ( read_only ) {
-
entry_ptr->is_read_only = TRUE;
entry_ptr->ro_ref_count = 1;
- }
+ } /* end if */
entry_ptr->dirtied = FALSE;
- }
+ } /* end else */
H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit)
@@ -2471,7 +2733,7 @@ H5C_protect(H5F_t * f,
if ( cache_ptr->check_write_permitted != NULL ) {
if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted 2")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Can't get write_permitted")
else
have_write_permitted = TRUE;
} else {
@@ -2483,16 +2745,14 @@ H5C_protect(H5F_t * f,
}
}
- if ( ( cache_ptr->resize_enabled ) &&
- ( cache_ptr->cache_accesses >=
- (cache_ptr->resize_ctl).epoch_length ) ) {
+ if(cache_ptr->resize_enabled &&
+ (cache_ptr->cache_accesses >= (cache_ptr->resize_ctl).epoch_length)) {
if(H5C__auto_adjust_cache_size(f, dxpl_id, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed.")
- }
-
- if ( cache_ptr->size_decreased ) {
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "Cache auto-resize failed")
+ } /* end if */
+ if(cache_ptr->size_decreased) {
cache_ptr->size_decreased = FALSE;
/* check to see if the cache is now oversized due to the cache
@@ -2500,7 +2760,7 @@ H5C_protect(H5F_t * f,
* bring the cache size down to the current maximum cache size.
*
* Also, if the min_clean_size requirement is not met, we
- * should also call H5C_make_space_in_cache() to bring us
+ * should also call H5C__make_space_in_cache() to bring us
* into complience.
*/
@@ -2517,10 +2777,10 @@ H5C_protect(H5F_t * f,
if(cache_ptr->index_size > cache_ptr->max_cache_size)
cache_ptr->cache_full = TRUE;
- if(H5C_make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C_make_space_in_cache failed 2.")
+ if(H5C__make_space_in_cache(f, dxpl_id, (size_t)0, write_permitted) < 0 )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, NULL, "H5C__make_space_in_cache failed")
}
- }
+ } /* end if */
}
/* If we loaded the entry and the entry's type has a 'notify' callback, send
@@ -2557,10 +2817,10 @@ H5C_protect(H5F_t * f,
done:
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) )
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit.\n")
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, NULL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
@@ -2587,7 +2847,7 @@ H5C_reset_cache_hit_rate_stats(H5C_t * cache_ptr)
FUNC_ENTER_NOAPI(FAIL)
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
cache_ptr->cache_hits = 0;
cache_ptr->cache_accesses = 0;
@@ -2627,27 +2887,27 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
FUNC_ENTER_NOAPI(FAIL)
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
if(config_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
if(config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown config version")
/* check general configuration section of the config: */
if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_GENERAL) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config.")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in general configuration fields of new config")
/* check size increase control fields of the config: */
if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INCREMENT) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config.")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size increase control fields of new config")
/* check size decrease control fields of the config: */
if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_DECREMENT) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config.")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "error in the size decrease control fields of new config")
/* check for conflicts between size increase and size decrease controls: */
if(H5C_validate_resize_config(config_ptr, H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config.")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "conflicting threshold fields in new config")
/* will set the increase possible fields to FALSE later if needed */
cache_ptr->size_increase_possible = TRUE;
@@ -2667,7 +2927,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown incr_mode?!?!?")
} /* end switch */
/* logically, this is were configuration for flash cache size increases
@@ -2701,7 +2961,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown decr_mode?!?!?")
} /* end switch */
if(config_ptr->max_size == config_ptr->min_size) {
@@ -2757,18 +3017,18 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
/* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
/* remove excess epoch markers if any */
if((config_ptr->decr_mode == H5C_decr__age_out_with_threshold) ||
(config_ptr->decr_mode == H5C_decr__age_out)) {
if(cache_ptr->epoch_markers_active > cache_ptr->resize_ctl.epochs_before_eviction)
if(H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
} /* end if */
else if(cache_ptr->epoch_markers_active > 0) {
if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
}
/* configure flash size increase facility. We wait until the
@@ -2792,7 +3052,7 @@ H5C_set_cache_auto_resize_config(H5C_t *cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
break;
} /* end switch */
} /* end if */
@@ -2823,7 +3083,7 @@ H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled)
FUNC_ENTER_NOAPI(FAIL)
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr on entry")
/* There is no fundamental reason why we should not permit
* evictions to be disabled while automatic resize is enabled.
@@ -2834,7 +3094,7 @@ H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled)
if((evictions_enabled != TRUE) &&
((cache_ptr->resize_ctl.incr_mode != H5C_incr__off) ||
(cache_ptr->resize_ctl.decr_mode != H5C_decr__off)))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't disable evictions when auto resize enabled")
cache_ptr->evictions_enabled = evictions_enabled;
@@ -2929,13 +3189,10 @@ H5C_unpin_entry(void *_entry_ptr)
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on entry.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
@@ -2944,19 +3201,14 @@ H5C_unpin_entry(void *_entry_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry from client")
done:
-
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on exit.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_unpin_entry() */
@@ -3043,13 +3295,10 @@ H5C_unprotect(H5F_t * f,
was_clean = ! ( entry_ptr->is_dirty );
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on entry.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
/* if the entry has multiple read only protects, just decrement
@@ -3077,7 +3326,6 @@ H5C_unprotect(H5F_t * f,
if(H5C_unpin_entry_from_client(cache_ptr, entry_ptr, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPIN, FAIL, "Can't unpin entry by client")
} /* end if */
-
} else {
if(entry_ptr->is_read_only) {
/* Sanity check */
@@ -3122,18 +3370,13 @@ H5C_unprotect(H5F_t * f,
/* Mark the entry as dirty if appropriate */
entry_ptr->is_dirty = (entry_ptr->is_dirty || dirtied);
- /* the image_up_to_date field was introduced to support
- * journaling. Until we re-introduce journaling, this
- * field should be equal to !entry_ptr->is_dirty.
- *
- * When journaling is re-enabled it should be set
- * to FALSE if dirtied is TRUE.
- */
-#if 1 /* JRM */
- entry_ptr->image_up_to_date = FALSE;
-#else /* JRM */
- entry_ptr->image_up_to_date = !entry_ptr->is_dirty;
-#endif /* JRM */
+ if(dirtied)
+ if(entry_ptr->image_up_to_date) {
+ entry_ptr->image_up_to_date = FALSE;
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_unserialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+ } /* end if */
/* Check for newly dirtied entry */
if(was_clean && entry_ptr->is_dirty) {
@@ -3205,16 +3448,16 @@ H5C_unprotect(H5F_t * f,
* makes good use of existing code.
* JRM - 5/19/04
*/
- if ( deleted ) {
+ if(deleted) {
unsigned flush_flags = (H5C__FLUSH_CLEAR_ONLY_FLAG |
H5C__FLUSH_INVALIDATE_FLAG);
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
if(test_entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
else if(test_entry_ptr != entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?")
/* Set the 'free file space' flag for the flush, if needed */
if(free_file_space)
@@ -3227,46 +3470,181 @@ H5C_unprotect(H5F_t * f,
/* Delete the entry from the skip list on destroy */
flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
+ HDassert(((!was_clean) || dirtied) == entry_ptr->in_slist);
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, flush_flags) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't flush entry")
-
- }
+ } /* end if */
#ifdef H5_HAVE_PARALLEL
- else if ( clear_entry ) {
+ else if(clear_entry) {
/* verify that the target entry is in the cache. */
H5C__SEARCH_INDEX(cache_ptr, addr, test_entry_ptr, FAIL)
if(test_entry_ptr == NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "entry not in hash table?!?")
else if(test_entry_ptr != entry_ptr)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "hash table contains multiple entries for addr?!?")
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTUNPROTECT, FAIL, "Can't clear entry")
- }
+ } /* end else if */
#endif /* H5_HAVE_PARALLEL */
}
H5C__UPDATE_STATS_FOR_UNPROTECT(cache_ptr)
done:
-
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on exit.\n");
- }
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0)) {
+ HDONE_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_unprotect() */
/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_unsettle_entry_ring
+ *
+ * Purpose: Advise the metadata cache that the specified entry's free space
+ * manager ring is no longer settled (if it was on entry).
+ *
+ * If the target free space manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target free space manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target free space manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * January 3, 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unsettle_entry_ring(void *_entry)
+{
+ H5C_cache_entry_t *entry = (H5C_cache_entry_t *)_entry; /* Entry whose ring to unsettle */
+ H5C_t *cache; /* Cache for file */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(entry);
+ HDassert(entry->ring != H5C_RING_UNDEFINED);
+ HDassert((H5C_RING_USER == entry->ring) || (H5C_RING_RDFSM == entry->ring) || (H5C_RING_MDFSM == entry->ring));
+ cache = entry->cache_ptr;
+ HDassert(cache);
+ HDassert(cache->magic == H5C__H5C_T_MAGIC);
+
+ switch(entry->ring) {
+ case H5C_RING_USER:
+ /* Do nothing */
+ break;
+
+ case H5C_RING_RDFSM:
+ if(cache->rdfsm_settled) {
+ if(cache->flush_in_progress || cache->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
+ cache->rdfsm_settled = FALSE;
+ } /* end if */
+ break;
+
+ case H5C_RING_MDFSM:
+ if(cache->mdfsm_settled) {
+ if(cache->flush_in_progress || cache->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
+ cache->mdfsm_settled = FALSE;
+ } /* end if */
+ break;
+
+ default:
+ HDassert(FALSE); /* this should be un-reachable */
+ break;
+ } /* end switch */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unsettle_entry_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_unsettle_ring()
+ *
+ * Purpose: Advise the metadata cache that the specified free space
+ * manager ring is no longer settled (if it was on entry).
+ *
+ * If the target free space manager ring is already
+ * unsettled, do nothing, and return SUCCEED.
+ *
+ * If the target free space manager ring is settled, and
+ * we are not in the process of a file shutdown, mark
+ * the ring as unsettled, and return SUCCEED.
+ *
+ * If the target free space manager is settled, and we
+ * are in the process of a file shutdown, post an error
+ * message, and return FAIL.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 10/15/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_unsettle_ring(H5F_t * f, H5C_ring_t ring)
+{
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert((H5C_RING_RDFSM == ring) || (H5C_RING_MDFSM == ring));
+ cache_ptr = f->shared->cache;
+ HDassert(H5C__H5C_T_MAGIC == cache_ptr->magic);
+
+ switch(ring) {
+ case H5C_RING_RDFSM:
+ if(cache_ptr->rdfsm_settled) {
+ if(cache_ptr->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected rdfsm ring unsettle")
+ cache_ptr->rdfsm_settled = FALSE;
+ } /* end if */
+ break;
+
+ case H5C_RING_MDFSM:
+ if(cache_ptr->mdfsm_settled) {
+ if(cache_ptr->close_warning_received)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unexpected mdfsm ring unsettle")
+ cache_ptr->mdfsm_settled = FALSE;
+ } /* end if */
+ break;
+
+ default:
+ HDassert(FALSE); /* this should be un-reachable */
+ break;
+ } /* end switch */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_unsettle_ring() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C_validate_resize_config()
*
* Purpose: Run a sanity check on the specified sections of the
@@ -3290,18 +3668,13 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
FUNC_ENTER_NOAPI(FAIL)
- if ( config_ptr == NULL ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry.")
- }
-
- if ( config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version.")
- }
+ if(config_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL config_ptr on entry")
+ if(config_ptr->version != H5C__CURR_AUTO_SIZE_CTL_VER)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown config version")
- if ( (tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0 ) {
+ if((tests & H5C_RESIZE_CFG__VALIDATE_GENERAL) != 0) {
if(config_ptr->max_size > H5C__MAX_MAX_CACHE_SIZE)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "max_size too big")
@@ -3312,43 +3685,29 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
if(config_ptr->min_size > config_ptr->max_size)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_size > max_size")
- if ( ( config_ptr->set_initial_size ) &&
- ( ( config_ptr->initial_size < config_ptr->min_size ) ||
- ( config_ptr->initial_size > config_ptr->max_size ) ) ) {
+ if(config_ptr->set_initial_size &&
+ ((config_ptr->initial_size < config_ptr->min_size) ||
+ (config_ptr->initial_size > config_ptr->max_size)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "initial_size must be in the interval [min_size, max_size]")
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "initial_size must be in the interval [min_size, max_size]");
- }
-
- if ( ( config_ptr->min_clean_fraction < (double)0.0f ) ||
- ( config_ptr->min_clean_fraction > (double)1.0f ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "min_clean_fraction must be in the interval [0.0, 1.0]");
- }
+ if((config_ptr->min_clean_fraction < (double)0.0f) ||
+ (config_ptr->min_clean_fraction > (double)1.0f))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "min_clean_fraction must be in the interval [0.0, 1.0]")
- if ( config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH ) {
+ if(config_ptr->epoch_length < H5C__MIN_AR_EPOCH_LENGTH)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small")
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too small");
- }
-
- if ( config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big");
- }
+ if(config_ptr->epoch_length > H5C__MAX_AR_EPOCH_LENGTH)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epoch_length too big")
} /* H5C_RESIZE_CFG__VALIDATE_GENERAL */
- if ( (tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0 ) {
-
- if ( ( config_ptr->incr_mode != H5C_incr__off ) &&
- ( config_ptr->incr_mode != H5C_incr__threshold ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode");
- }
-
- if ( config_ptr->incr_mode == H5C_incr__threshold ) {
+ if((tests & H5C_RESIZE_CFG__VALIDATE_INCREMENT) != 0) {
+ if((config_ptr->incr_mode != H5C_incr__off) &&
+ (config_ptr->incr_mode != H5C_incr__threshold))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid incr_mode")
+ if(config_ptr->incr_mode == H5C_incr__threshold) {
if((config_ptr->lower_hr_threshold < (double)0.0f) ||
(config_ptr->lower_hr_threshold > (double)1.0f))
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "lower_hr_threshold must be in the range [0.0, 1.0]")
@@ -3361,33 +3720,24 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
*/
} /* H5C_incr__threshold */
- switch ( config_ptr->flash_incr_mode )
- {
+ switch(config_ptr->flash_incr_mode) {
case H5C_flash_incr__off:
/* nothing to do here */
break;
case H5C_flash_incr__add_space:
- if ( ( config_ptr->flash_multiple < (double)0.1f ) ||
- ( config_ptr->flash_multiple > (double)10.0f ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "flash_multiple must be in the range [0.1, 10.0]");
- }
-
- if ( ( config_ptr->flash_threshold < (double)0.1f ) ||
- ( config_ptr->flash_threshold > (double)1.0f ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "flash_threshold must be in the range [0.1, 1.0]");
- }
+ if((config_ptr->flash_multiple < (double)0.1f) ||
+ (config_ptr->flash_multiple > (double)10.0f))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "flash_multiple must be in the range [0.1, 10.0]")
+ if((config_ptr->flash_threshold < (double)0.1f) ||
+ (config_ptr->flash_threshold > (double)1.0f))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "flash_threshold must be in the range [0.1, 1.0]")
break;
default:
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "Invalid flash_incr_mode");
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid flash_incr_mode")
break;
- }
+ } /* end switch */
} /* H5C_RESIZE_CFG__VALIDATE_INCREMENT */
@@ -3399,39 +3749,27 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
( config_ptr->decr_mode != H5C_decr__age_out_with_threshold )
) {
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode");
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Invalid decr_mode")
}
if ( config_ptr->decr_mode == H5C_decr__threshold ) {
+ if(config_ptr->upper_hr_threshold > (double)1.0f)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be <= 1.0")
- if ( config_ptr->upper_hr_threshold > (double)1.0f ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "upper_hr_threshold must be <= 1.0");
- }
-
- if ( ( config_ptr->decrement > (double)1.0f ) ||
- ( config_ptr->decrement < (double)0.0f ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "decrement must be in the interval [0.0, 1.0]");
- }
+ if((config_ptr->decrement > (double)1.0f) ||
+ (config_ptr->decrement < (double)0.0f))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "decrement must be in the interval [0.0, 1.0]")
/* no need to check max_decrement as it is a size_t
* and thus must be non-negative.
*/
} /* H5C_decr__threshold */
- if ( ( config_ptr->decr_mode == H5C_decr__age_out ) ||
- ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold )
- ) {
-
- if ( config_ptr->epochs_before_eviction < 1 ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "epochs_before_eviction must be positive");
- }
+ if((config_ptr->decr_mode == H5C_decr__age_out) ||
+ (config_ptr->decr_mode == H5C_decr__age_out_with_threshold)) {
+ if(config_ptr->epochs_before_eviction < 1)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction must be positive")
if(config_ptr->epochs_before_eviction > H5C__MAX_EPOCH_MARKERS)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "epochs_before_eviction too big")
@@ -3445,43 +3783,24 @@ H5C_validate_resize_config(H5C_auto_size_ctl_t * config_ptr,
*/
} /* H5C_decr__age_out || H5C_decr__age_out_with_threshold */
- if ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold ) {
-
- if ( ( config_ptr->upper_hr_threshold > (double)1.0f ) ||
- ( config_ptr->upper_hr_threshold < (double)0.0f ) ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "upper_hr_threshold must be in the interval [0.0, 1.0]");
- }
+ if(config_ptr->decr_mode == H5C_decr__age_out_with_threshold) {
+ if((config_ptr->upper_hr_threshold > (double)1.0f) ||
+ (config_ptr->upper_hr_threshold < (double)0.0f))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "upper_hr_threshold must be in the interval [0.0, 1.0]")
} /* H5C_decr__age_out_with_threshold */
-
} /* H5C_RESIZE_CFG__VALIDATE_DECREMENT */
if ( (tests & H5C_RESIZE_CFG__VALIDATE_INTERACTIONS) != 0 ) {
-
- if ( ( config_ptr->incr_mode == H5C_incr__threshold )
- &&
- ( ( config_ptr->decr_mode == H5C_decr__threshold )
- ||
- ( config_ptr->decr_mode == H5C_decr__age_out_with_threshold )
- )
- &&
- ( config_ptr->lower_hr_threshold
- >=
- config_ptr->upper_hr_threshold
- )
- ) {
-
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, \
- "conflicting threshold fields in config.")
- }
+ if((config_ptr->incr_mode == H5C_incr__threshold)
+ && ((config_ptr->decr_mode == H5C_decr__threshold) ||
+ (config_ptr->decr_mode == H5C_decr__age_out_with_threshold))
+ && (config_ptr->lower_hr_threshold >= config_ptr->upper_hr_threshold))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conflicting threshold fields in config")
} /* H5C_RESIZE_CFG__VALIDATE_INTERACTIONS */
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_validate_resize_config() */
@@ -3577,6 +3896,7 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for flush dependency parent list")
child_entry->flush_dep_parent_nalloc *= 2;
} /* end else */
+ cache_ptr->entry_fd_height_change_counter++;
} /* end if */
/* Add the dependency to the child's parent array */
@@ -3599,6 +3919,20 @@ H5C_create_flush_dependency(void * parent_thing, void * child_thing)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry dirty flag set")
} /* end if */
+ /* adjust the parent's number of unserialized children. Note
+ * that it is possible for and entry to be clean and unserialized.
+ */
+ if(!child_entry->image_up_to_date) {
+ HDassert(parent_entry->flush_dep_nunser_children < parent_entry->flush_dep_nchildren);
+
+ parent_entry->flush_dep_nunser_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
+ if(parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry serialized flag reset")
+ } /* end if */
+
/* Post-conditions, for successful operation */
HDassert(parent_entry->is_pinned);
HDassert(parent_entry->flush_dep_nchildren > 0);
@@ -3710,6 +4044,18 @@ H5C_destroy_flush_dependency(void *parent_thing, void * child_thing)
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry dirty flag reset")
} /* end if */
+ /* adjust parent entry's number of unserialized children */
+ if(!child_entry->image_up_to_date) {
+ HDassert(parent_entry->flush_dep_nunser_children > 0);
+
+ parent_entry->flush_dep_nunser_children--;
+
+ /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
+ if(parent_entry->type->notify &&
+ (parent_entry->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, parent_entry) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry serialized flag set")
+ } /* end if */
+
/* Shrink or free the parent array if apporpriate */
if(child_entry->flush_dep_nparents == 0) {
child_entry->flush_dep_parent = (H5C_cache_entry_t **)H5FL_BLK_FREE(parent, child_entry->flush_dep_parent);
@@ -3758,7 +4104,7 @@ H5C__auto_adjust_cache_size(H5F_t * f,
hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
- herr_t result;
+ hbool_t reentrant_call = FALSE;
hbool_t inserted_epoch_marker = FALSE;
size_t new_max_cache_size = 0;
size_t old_max_cache_size = 0;
@@ -3778,29 +4124,33 @@ H5C__auto_adjust_cache_size(H5F_t * f,
HDassert( (double)0.0f <= (cache_ptr->resize_ctl).min_clean_fraction );
HDassert( (cache_ptr->resize_ctl).min_clean_fraction <= (double)100.0f );
- if ( !cache_ptr->resize_enabled ) {
+ /* check to see if cache_ptr->resize_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the resize
+ * process. To avoid an infinite recursion, set reentrant_call to
+ * TRUE, and goto done.
+ */
+ if(cache_ptr->resize_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled.")
- }
+ cache_ptr->resize_in_progress = TRUE;
- HDassert( ( (cache_ptr->resize_ctl).incr_mode != H5C_incr__off ) || \
- ( (cache_ptr->resize_ctl).decr_mode != H5C_decr__off ) );
+ if(!cache_ptr->resize_enabled)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Auto cache resize disabled")
- if ( H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED ) {
+ HDassert(((cache_ptr->resize_ctl).incr_mode != H5C_incr__off) || \
+ ((cache_ptr->resize_ctl).decr_mode != H5C_decr__off));
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate.")
- }
+ if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
HDassert( ( (double)0.0f <= hit_rate ) && ( hit_rate <= (double)1.0f ) );
- switch ( (cache_ptr->resize_ctl).incr_mode )
- {
+ switch((cache_ptr->resize_ctl).incr_mode) {
case H5C_incr__off:
- if ( cache_ptr->size_increase_possible ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "size_increase_possible but H5C_incr__off?!?!?")
- }
+ if(cache_ptr->size_increase_possible)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "size_increase_possible but H5C_incr__off?!?!?")
break;
case H5C_incr__threshold:
@@ -3850,7 +4200,7 @@ H5C__auto_adjust_cache_size(H5F_t * f,
break;
default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
}
/* If the decr_mode is either age out or age out with threshold, we
@@ -3877,17 +4227,10 @@ H5C__auto_adjust_cache_size(H5F_t * f,
)
) {
- result = H5C__autoadjust__ageout__insert_new_marker(cache_ptr);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "can't insert new epoch marker.")
-
- } else {
+ if(H5C__autoadjust__ageout__insert_new_marker(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't insert new epoch marker")
- inserted_epoch_marker = TRUE;
- }
+ inserted_epoch_marker = TRUE;
}
/* don't run the cache size decrease code unless the cache size
@@ -3947,32 +4290,18 @@ H5C__auto_adjust_cache_size(H5F_t * f,
case H5C_decr__age_out_with_threshold:
case H5C_decr__age_out:
- if ( ! inserted_epoch_marker ) {
-
- if ( ! cache_ptr->size_decrease_possible ) {
-
+ if(!inserted_epoch_marker) {
+ if(!cache_ptr->size_decrease_possible)
status = decrease_disabled;
-
- } else {
-
- result = H5C__autoadjust__ageout(f,
- dxpl_id,
- hit_rate,
- &status,
- &new_max_cache_size,
- write_permitted);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "ageout code failed.")
- }
- }
- }
+ else {
+ if(H5C__autoadjust__ageout(f, dxpl_id, hit_rate, &status, &new_max_cache_size, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ageout code failed")
+ } /* end else */
+ } /* end if */
break;
default:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unknown incr_mode")
}
}
@@ -3988,13 +4317,8 @@ H5C__auto_adjust_cache_size(H5F_t * f,
) {
/* move last epoch marker to the head of the LRU list */
- result = H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "error cycling epoch marker.")
- }
+ if(H5C__autoadjust__ageout__cycle_epoch_marker(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error cycling epoch marker")
}
if ( ( status == increase ) || ( status == decrease ) ) {
@@ -4035,9 +4359,7 @@ H5C__auto_adjust_cache_size(H5F_t * f,
switch ( (cache_ptr->resize_ctl).flash_incr_mode )
{
case H5C_flash_incr__off:
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "flash_size_increase_possible but H5C_flash_incr__off?!")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
break;
case H5C_flash_incr__add_space:
@@ -4048,15 +4370,13 @@ H5C__auto_adjust_cache_size(H5F_t * f,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Unknown flash_incr_mode?!?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
break;
}
}
}
if ( (cache_ptr->resize_ctl).rpt_fcn != NULL ) {
-
(*((cache_ptr->resize_ctl).rpt_fcn))
(cache_ptr,
H5C__CURR_AUTO_RESIZE_RPT_FCN_VER,
@@ -4068,17 +4388,18 @@ H5C__auto_adjust_cache_size(H5F_t * f,
new_min_clean_size);
}
- if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
-
+ if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
/* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_reset_cache_hit_rate_stats failed.")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
done:
+ /* Sanity checks */
+ HDassert(cache_ptr->resize_in_progress);
+ if(!reentrant_call)
+ cache_ptr->resize_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->resize_in_progress));
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C__auto_adjust_cache_size() */
@@ -4110,7 +4431,6 @@ H5C__autoadjust__ageout(H5F_t * f,
hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
- herr_t result;
size_t test_size;
herr_t ret_value = SUCCEED; /* Return value */
@@ -4123,17 +4443,9 @@ H5C__autoadjust__ageout(H5F_t * f,
HDassert( ( new_max_cache_size_ptr ) && ( *new_max_cache_size_ptr == 0 ) );
/* remove excess epoch markers if any */
- if ( cache_ptr->epoch_markers_active >
- (cache_ptr->resize_ctl).epochs_before_eviction ) {
-
- result = H5C__autoadjust__ageout__remove_excess_markers(cache_ptr);
-
- if ( result != SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "can't remove excess epoch markers.")
- }
- }
+ if(cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction)
+ if(H5C__autoadjust__ageout__remove_excess_markers(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "can't remove excess epoch markers")
if ( ( (cache_ptr->resize_ctl).decr_mode == H5C_decr__age_out )
||
@@ -4149,7 +4461,7 @@ H5C__autoadjust__ageout(H5F_t * f,
/* evict aged out cache entries if appropriate... */
if(H5C__autoadjust__ageout__evict_aged_out_entries(f, dxpl_id, write_permitted) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error flushing aged out entries")
/* ... and then reduce cache size if appropriate */
if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
@@ -4231,11 +4543,8 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if ( cache_ptr->epoch_markers_active <= 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "No active epoch markers on entry?!?!?.")
- }
+ if(cache_ptr->epoch_markers_active <= 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "No active epoch markers on entry?!?!?")
/* remove the last marker from both the ring buffer and the LRU list */
@@ -4247,15 +4556,10 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
cache_ptr->epoch_marker_ringbuf_size -= 1;
- if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
- }
-
- if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
-
+ if(cache_ptr->epoch_marker_ringbuf_size < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if((cache_ptr->epoch_marker_active)[i] != TRUE)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
- }
H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
(cache_ptr)->LRU_head_ptr, \
@@ -4268,9 +4572,9 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
* the ring buffer.
*/
- HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
- HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
- HDassert( ((cache_ptr->epoch_markers)[i]).prev == NULL );
+ HDassert(((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i);
+ HDassert(((cache_ptr->epoch_markers)[i]).next == NULL);
+ HDassert(((cache_ptr->epoch_markers)[i]).prev == NULL);
cache_ptr->epoch_marker_ringbuf_last =
(cache_ptr->epoch_marker_ringbuf_last + 1) %
@@ -4280,10 +4584,8 @@ H5C__autoadjust__ageout__cycle_epoch_marker(H5C_t * cache_ptr)
cache_ptr->epoch_marker_ringbuf_size += 1;
- if ( cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow.")
- }
+ if(cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \
(cache_ptr)->LRU_head_ptr, \
@@ -4345,19 +4647,6 @@ done:
*
* Programmer: John Mainzer, 11/22/04
*
- * Changes: Modified function to detect deletions of entries
- * during a scan of the LRU, and where appropriate,
- * restart the scan to avoid proceeding with a next
- * entry that is no longer in the cache.
- *
- * Note the absence of checks after flushes of clean
- * entries. As a second entry can only be removed by
- * by a call to the pre_serialize or serialize callback
- * of the first, and as these callbacks will not be called
- * on clean entries, no checks are needed.
- *
- * JRM -- 4/6/15
- *
*-------------------------------------------------------------------------
*/
static herr_t
@@ -4402,10 +4691,10 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
entry_ptr = cache_ptr->LRU_tail_ptr;
while ( ( entry_ptr != NULL ) &&
- ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) &&
+ ( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) &&
( bytes_evicted < eviction_size_limit ) )
{
- hbool_t corked = FALSE;
+ hbool_t skipping_entry = FALSE;
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert( ! (entry_ptr->is_protected) );
@@ -4419,9 +4708,11 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
prev_is_dirty = prev_ptr->is_dirty;
if(entry_ptr->is_dirty ) {
+ HDassert(!entry_ptr->prefetched_dirty);
+
/* dirty corked entry is skipped */
if(entry_ptr->tag_info && entry_ptr->tag_info->corked)
- corked = TRUE;
+ skipping_entry = TRUE;
else {
/* reset entries_removed_counter and
* last_entry_removed_ptr prior to the call to
@@ -4441,47 +4732,40 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
restart_scan = TRUE;
} /* end else */
} /* end if */
- else {
+ else if(!entry_ptr->prefetched_dirty) {
bytes_evicted += entry_ptr->size;
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
- }
+ } /* end else-if */
+ else {
+ HDassert(!entry_ptr->is_dirty);
+ HDassert(entry_ptr->prefetched_dirty);
- if ( prev_ptr != NULL ) {
+ skipping_entry = TRUE;
+ } /* end else */
- if(corked) /* dirty corked entry is skipped */
+ if(prev_ptr != NULL) {
+ if(skipping_entry)
entry_ptr = prev_ptr;
-
- else if ( ( restart_scan )
- ||
- ( prev_ptr->is_dirty != prev_is_dirty )
- ||
- ( prev_ptr->next != next_ptr )
- ||
- ( prev_ptr->is_protected )
- ||
- ( prev_ptr->is_pinned ) ) {
-
- /* something has happened to the LRU -- start over
+ else if(restart_scan || (prev_ptr->is_dirty != prev_is_dirty)
+ || (prev_ptr->next != next_ptr)
+ || (prev_ptr->is_protected)
+ || (prev_ptr->is_pinned)) {
+ /* Something has happened to the LRU -- start over
* from the tail.
*/
restart_scan = FALSE;
entry_ptr = cache_ptr->LRU_tail_ptr;
H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
-
- } else {
-
+ } /* end else-if */
+ else
entry_ptr = prev_ptr;
-
- }
- } else {
-
+ } /* end if */
+ else
entry_ptr = NULL;
-
- }
} /* end while */
/* for now at least, don't bother to maintain the minimum clean size,
@@ -4499,9 +4783,9 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
* entry).
*/
- } else /* ! write_permitted */ {
-
- /* since we are not allowed to write, all we can do is evict
+ } /* end if */
+ else /* ! write_permitted */ {
+ /* Since we are not allowed to write, all we can do is evict
* any clean entries that we may encounter before we either
* hit the eviction size limit, or encounter the epoch marker.
*
@@ -4514,23 +4798,19 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
* performance implications, but it shouldn't cause any net
* slowdown.
*/
-
- HDassert( H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS );
-
+ HDassert(H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS);
entry_ptr = cache_ptr->LRU_tail_ptr;
-
- while ( ( entry_ptr != NULL ) &&
- ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) &&
- ( bytes_evicted < eviction_size_limit ) )
- {
- HDassert( ! (entry_ptr->is_protected) );
+ while(entry_ptr != NULL &&
+ ((entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID) &&
+ (bytes_evicted < eviction_size_limit)) {
+ HDassert(!(entry_ptr->is_protected));
prev_ptr = entry_ptr->prev;
- if ( ! (entry_ptr->is_dirty) ) {
+ if(!(entry_ptr->is_dirty) && !(entry_ptr->prefetched_dirty))
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush clean entry")
- }
+
/* just skip the entry if it is dirty, as we can't do
* anything with it now since we can't write.
*
@@ -4538,21 +4818,15 @@ H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * f,
* and thus we needn't test to see if the LRU has been changed
* out from under us.
*/
-
entry_ptr = prev_ptr;
-
} /* end while */
- }
-
- if ( cache_ptr->index_size < cache_ptr->max_cache_size ) {
+ } /* end else */
+ if(cache_ptr->index_size < cache_ptr->max_cache_size)
cache_ptr->cache_full = FALSE;
- }
done:
-
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C__autoadjust__ageout__evict_aged_out_entries() */
@@ -4581,23 +4855,16 @@ H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if ( cache_ptr->epoch_markers_active >=
- (cache_ptr->resize_ctl).epochs_before_eviction ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Already have a full complement of markers.")
- }
+ if(cache_ptr->epoch_markers_active >= (cache_ptr->resize_ctl).epochs_before_eviction)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Already have a full complement of markers")
/* find an unused marker */
i = 0;
- while ( ( (cache_ptr->epoch_marker_active)[i] ) &&
- ( i < H5C__MAX_EPOCH_MARKERS ) )
- {
+ while((cache_ptr->epoch_marker_active)[i] && i < H5C__MAX_EPOCH_MARKERS)
i++;
- }
if(i >= H5C__MAX_EPOCH_MARKERS)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't find unused marker")
HDassert( ((cache_ptr->epoch_markers)[i]).addr == (haddr_t)i );
HDassert( ((cache_ptr->epoch_markers)[i]).next == NULL );
@@ -4615,7 +4882,7 @@ H5C__autoadjust__ageout__insert_new_marker(H5C_t * cache_ptr)
if ( cache_ptr->epoch_marker_ringbuf_size > H5C__MAX_EPOCH_MARKERS ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer overflow")
}
H5C__DLL_PREPEND((&((cache_ptr->epoch_markers)[i])), \
@@ -4674,15 +4941,11 @@ H5C__autoadjust__ageout__remove_all_markers(H5C_t * cache_ptr)
cache_ptr->epoch_marker_ringbuf_size -= 1;
- if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
- }
-
- if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
+ if(cache_ptr->epoch_marker_ringbuf_size < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if((cache_ptr->epoch_marker_active)[i] != TRUE)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
- }
/* remove the epoch marker from the LRU list */
H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
@@ -4740,15 +5003,10 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)
HDassert( cache_ptr );
HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- if ( cache_ptr->epoch_markers_active <=
- (cache_ptr->resize_ctl).epochs_before_eviction ) {
+ if(cache_ptr->epoch_markers_active <= (cache_ptr->resize_ctl).epochs_before_eviction)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry")
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "no excess markers on entry.")
- }
-
- while ( cache_ptr->epoch_markers_active >
- (cache_ptr->resize_ctl).epochs_before_eviction )
- {
+ while(cache_ptr->epoch_markers_active > (cache_ptr->resize_ctl).epochs_before_eviction) {
/* get the index of the last epoch marker in the LRU list
* and remove it from the ring buffer.
*/
@@ -4762,15 +5020,10 @@ H5C__autoadjust__ageout__remove_excess_markers(H5C_t * cache_ptr)
cache_ptr->epoch_marker_ringbuf_size -= 1;
- if ( cache_ptr->epoch_marker_ringbuf_size < 0 ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow.")
- }
-
- if ( (cache_ptr->epoch_marker_active)[i] != TRUE ) {
-
+ if(cache_ptr->epoch_marker_ringbuf_size < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "ring buffer underflow")
+ if((cache_ptr->epoch_marker_active)[i] != TRUE)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "unused marker in LRU?!?")
- }
/* remove the epoch marker from the LRU list */
H5C__DLL_REMOVE((&((cache_ptr->epoch_markers)[i])), \
@@ -4839,11 +5092,8 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
HDassert( new_entry_size > cache_ptr->flash_size_increase_threshold );
HDassert( old_entry_size < new_entry_size );
- if ( old_entry_size >= new_entry_size ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "old_entry_size >= new_entry_size")
- }
+ if(old_entry_size >= new_entry_size)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "old_entry_size >= new_entry_size")
space_needed = new_entry_size - old_entry_size;
@@ -4856,8 +5106,7 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
switch ( (cache_ptr->resize_ctl).flash_incr_mode )
{
case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "flash_size_increase_possible but H5C_flash_incr__off?!")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
break;
case H5C_flash_incr__add_space:
@@ -4877,8 +5126,7 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Unknown flash_incr_mode?!?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
break;
}
@@ -4907,8 +5155,7 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
switch ( (cache_ptr->resize_ctl).flash_incr_mode )
{
case H5C_flash_incr__off:
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "flash_size_increase_possible but H5C_flash_incr__off?!")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flash_size_increase_possible but H5C_flash_incr__off?!")
break;
case H5C_flash_incr__add_space:
@@ -4919,8 +5166,7 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
break;
default: /* should be unreachable */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Unknown flash_incr_mode?!?!?.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown flash_incr_mode?!?!?")
break;
}
@@ -4934,10 +5180,8 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
/* get the hit rate for the reporting function. Should still
* be good as we havent reset the hit rate statistics.
*/
- if ( H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate.")
- }
+ if(H5C_get_cache_hit_rate(cache_ptr, &hit_rate) != SUCCEED)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Can't get hit rate")
(*((cache_ptr->resize_ctl).rpt_fcn))
(cache_ptr,
@@ -4950,12 +5194,9 @@ H5C__flash_increase_cache_size(H5C_t * cache_ptr,
new_min_clean_size);
}
- if ( H5C_reset_cache_hit_rate_stats(cache_ptr) != SUCCEED ) {
-
+ if(H5C_reset_cache_hit_rate_stats(cache_ptr) < 0)
/* this should be impossible... */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "H5C_reset_cache_hit_rate_stats failed.")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C_reset_cache_hit_rate_stats failed")
}
done:
@@ -5003,7 +5244,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
+H5C_flush_invalidate_cache(H5F_t *f, hid_t dxpl_id, unsigned flags)
{
H5C_t * cache_ptr;
H5C_ring_t ring;
@@ -5021,8 +5262,8 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
#if H5C_DO_SANITY_CHECKS
{
int32_t i;
- int32_t index_len = 0;
- int32_t slist_len = 0;
+ uint32_t index_len = 0;
+ uint32_t slist_len = 0;
size_t index_size = (size_t)0;
size_t clean_index_size = (size_t)0;
size_t dirty_index_size = (size_t)0;
@@ -5057,7 +5298,7 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
/* remove ageout markers if present */
if(cache_ptr->epoch_markers_active > 0)
if(H5C__autoadjust__ageout__remove_all_markers(cache_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "error removing all epoch markers")
/* flush invalidate each ring, starting from the outermost ring and
* working inward.
@@ -5065,7 +5306,7 @@ H5C_flush_invalidate_cache(const H5F_t * f, hid_t dxpl_id, unsigned flags)
ring = H5C_RING_USER;
while(ring < H5C_RING_NTYPES) {
if(H5C_flush_invalidate_ring(f, dxpl_id, ring, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush invalidate ring failed")
ring++;
} /* end while */
@@ -5148,25 +5389,25 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
+H5C_flush_invalidate_ring(H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
unsigned flags)
{
- H5C_t * cache_ptr;
+ H5C_t *cache_ptr;
hbool_t restart_slist_scan;
- int32_t protected_entries = 0;
- int32_t i;
- int32_t cur_ring_pel_len;
- int32_t old_ring_pel_len;
- unsigned cooked_flags;
- unsigned evict_flags;
- H5SL_node_t * node_ptr = NULL;
- H5C_cache_entry_t * entry_ptr = NULL;
- H5C_cache_entry_t * next_entry_ptr = NULL;
+ uint32_t protected_entries = 0;
+ int32_t i;
+ int32_t cur_ring_pel_len;
+ int32_t old_ring_pel_len;
+ unsigned cooked_flags;
+ unsigned evict_flags;
+ H5SL_node_t *node_ptr = NULL;
+ H5C_cache_entry_t *entry_ptr = NULL;
+ H5C_cache_entry_t *next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t initial_slist_len = 0;
+ uint32_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
- herr_t ret_value = SUCCEED;
+ herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(FAIL)
@@ -5348,9 +5589,10 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
* everything we can before we flag an error.
*/
protected_entries++;
- } else if(entry_ptr->is_pinned) {
+ } /* end if */
+ else if(entry_ptr->is_pinned) {
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__DURING_FLUSH_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty pinned entry flush failed")
if(cache_ptr->slist_changed) {
/* The slist has been modified by something
@@ -5364,10 +5606,10 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
cache_ptr->slist_changed = FALSE;
H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr);
} /* end if */
- } /* end if */
+ } /* end else-if */
else {
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "dirty entry flush destroy failed")
if(cache_ptr->slist_changed) {
/* The slist has been modified by something
@@ -5396,8 +5638,8 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
*/
if(node_ptr == NULL) {
- HDassert(cache_ptr->slist_len == (initial_slist_len + cache_ptr->slist_len_increase));
- HDassert((int64_t)cache_ptr->slist_size == ((int64_t)initial_slist_size + cache_ptr->slist_size_increase));
+ HDassert(cache_ptr->slist_len == (uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase));
+ HDassert(cache_ptr->slist_size == (size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase));
} /* end if */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -5412,77 +5654,92 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
*
* Writes to disk are possible here.
*/
- for(i = 0; i < H5C__HASH_TABLE_LEN; i++) {
- next_entry_ptr = cache_ptr->index[i];
- while(next_entry_ptr != NULL) {
- entry_ptr = next_entry_ptr;
- HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(entry_ptr->ring >= ring);
+ /* reset the counters so that we can detect insertions, loads,
+ * and moves caused by the pre_serialize and serialize calls.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
- next_entry_ptr = entry_ptr->ht_next;
- HDassert((next_entry_ptr == NULL) ||
- (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
+ next_entry_ptr = cache_ptr->il_head;
+ while(next_entry_ptr != NULL) {
+ entry_ptr = next_entry_ptr;
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= ring);
- if(((!entry_ptr->flush_me_last) ||
- ((entry_ptr->flush_me_last) &&
- (cache_ptr->num_last_entries >= cache_ptr->slist_len))) &&
- (entry_ptr->flush_dep_nchildren == 0) &&
- (entry_ptr->ring == ring)) {
+ next_entry_ptr = entry_ptr->il_next;
+ HDassert((next_entry_ptr == NULL) ||
+ (next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC));
- if(entry_ptr->is_protected) {
- /* we have major problems -- but lets flush and
- * destroy everything we can before we flag an
- * error.
- */
- protected_entries++;
- if(!entry_ptr->in_slist)
- HDassert(!(entry_ptr->is_dirty));
- } else if(!(entry_ptr->is_pinned)) {
-
- /* if *entry_ptr is dirty, it is possible
- * that one or more other entries may be
- * either removed from the cache, loaded
- * into the cache, or moved to a new location
- * in the file as a side effect of the flush.
- *
- * It's also possible that removing a clean
- * entry will remove the last child of a proxy
- * entry, allowing it to be removed also and
- * invalidating the next_entry_ptr.
- *
- * If either of these happen, and one of the target
- * or proxy entries happens to be the next entry in
- * the hash bucket, we could either find ourselves
- * either scanning a non-existant entry, scanning
- * through a different bucket, or skipping an entry.
- *
- * Neither of these are good, so restart the
- * the scan at the head of the hash bucket
- * after the flush if we detect that the next_entry_ptr
- * becomes invalid.
- *
- * This is not as inefficient at it might seem,
- * as hash buckets typically have at most two
- * or three entries.
- */
- cache_ptr->entry_watched_for_removal = next_entry_ptr;
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed.")
-
- /* Check for the next entry getting removed */
- if(NULL != next_entry_ptr && NULL == cache_ptr->entry_watched_for_removal) {
- /* update stats for hash bucket scan restart here.
- * -- JRM
- */
- next_entry_ptr = cache_ptr->index[i];
- H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
- } /* end if */
- else
- cache_ptr->entry_watched_for_removal = NULL;
+ if((!entry_ptr->flush_me_last || (entry_ptr->flush_me_last && cache_ptr->num_last_entries >= cache_ptr->slist_len))
+ && entry_ptr->flush_dep_nchildren == 0 && entry_ptr->ring == ring) {
+ if(entry_ptr->is_protected) {
+ /* we have major problems -- but lets flush and
+ * destroy everything we can before we flag an
+ * error.
+ */
+ protected_entries++;
+ if(!entry_ptr->in_slist)
+ HDassert(!(entry_ptr->is_dirty));
+ } /* end if */
+ else if(!(entry_ptr->is_pinned)) {
+ /* if *entry_ptr is dirty, it is possible
+ * that one or more other entries may be
+ * either removed from the cache, loaded
+ * into the cache, or moved to a new location
+ * in the file as a side effect of the flush.
+ *
+ * It's also possible that removing a clean
+ * entry will remove the last child of a proxy
+ * entry, allowing it to be removed also and
+ * invalidating the next_entry_ptr.
+ *
+ * If either of these happen, and one of the target
+ * or proxy entries happens to be the next entry in
+ * the hash bucket, we could either find ourselves
+ * either scanning a non-existant entry, scanning
+ * through a different bucket, or skipping an entry.
+ *
+ * Neither of these are good, so restart the
+ * the scan at the head of the hash bucket
+ * after the flush if we detect that the next_entry_ptr
+ * becomes invalid.
+ *
+ * This is not as inefficient at it might seem,
+ * as hash buckets typically have at most two
+ * or three entries.
+ */
+ cache_ptr->entry_watched_for_removal = next_entry_ptr;
+
+ if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (cooked_flags | H5C__DURING_FLUSH_FLAG | H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Entry flush destroy failed")
+
+ /* Restart the index list scan if necessary. Must
+ * do this if the next entry is evicted, and also if
+ * one or more entries are inserted, loaded, or moved
+ * as these operations can result in part of the scan
+ * being skipped -- which can cause a spurious failure
+ * if this results in the size of the pinned entry
+ * failing to decline during the pass.
+ */
+ if((NULL != next_entry_ptr && NULL == cache_ptr->entry_watched_for_removal)
+ || (cache_ptr->entries_loaded_counter > 0)
+ || (cache_ptr->entries_inserted_counter > 0)
+ || (cache_ptr->entries_relocated_counter > 0)) {
+
+ next_entry_ptr = cache_ptr->il_head;
+
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
} /* end if */
+ else
+ cache_ptr->entry_watched_for_removal = NULL;
} /* end if */
- } /* end while loop scanning hash table bin */
+ } /* end if */
} /* end for loop scanning hash table */
/* We can't do anything if entries are pinned. The
@@ -5493,32 +5750,33 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
* of pinned entries from pass to pass. If it stops
* shrinking before it hits zero, we scream and die.
*/
- old_ring_pel_len = cur_ring_pel_len;
+ old_ring_pel_len = cur_ring_pel_len;
entry_ptr = cache_ptr->pel_head_ptr;
cur_ring_pel_len = 0;
while(entry_ptr != NULL) {
HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->ring >= ring);
- if(entry_ptr->ring == ring)
+ if(entry_ptr->ring == ring)
cur_ring_pel_len++;
entry_ptr = entry_ptr->next;
} /* end while */
- if((cur_ring_pel_len > 0) && (cur_ring_pel_len >= old_ring_pel_len)) {
+ /* Check if the number of pinned entries in the ring is positive, and
+ * it is not declining. Scream and die if so.
+ */
+ if(cur_ring_pel_len > 0 && cur_ring_pel_len >= old_ring_pel_len) {
/* Don't error if allowed to have pinned entries remaining */
- if(evict_flags)
+ if(evict_flags)
HGOTO_DONE(TRUE)
- /* The number of pinned entries in the ring is positive, and
- * it is not declining. Scream and die.
- */
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entry count not decreasing, cur_ring_pel_len = %d, old_ring_pel_len = %d, ring = %d", (int)cur_ring_pel_len, (int)old_ring_pel_len, (int)ring)
} /* end if */
HDassert(protected_entries == cache_ptr->pl_len);
- if((protected_entries > 0) && (protected_entries == cache_ptr->index_len))
+
+ if(protected_entries > 0 && protected_entries == cache_ptr->index_len)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Only protected entries left in cache, protected_entries = %d", (int)protected_entries)
} /* main while loop */
@@ -5536,9 +5794,9 @@ H5C_flush_invalidate_ring(const H5F_t * f, hid_t dxpl_id, H5C_ring_t ring,
HDassert(protected_entries <= cache_ptr->pl_len);
if(protected_entries > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Cache has protected entries")
else if(cur_ring_pel_len > 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't unpin all pinned entries in ring")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -5578,12 +5836,12 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
hbool_t ignore_protected;
hbool_t tried_to_flush_protected_entry = FALSE;
hbool_t restart_slist_scan;
- int32_t protected_entries = 0;
+ uint32_t protected_entries = 0;
H5SL_node_t * node_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
H5C_cache_entry_t * next_entry_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
- int64_t initial_slist_len = 0;
+ uint32_t initial_slist_len = 0;
size_t initial_slist_size = 0;
#endif /* H5C_DO_SANITY_CHECKS */
int i;
@@ -5602,7 +5860,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
(H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
(H5C_validate_lru_list(cache_ptr) < 0))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry.\n");
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
ignore_protected = ( (flags & H5C__FLUSH_IGNORE_PROTECTED_FLAG) != 0 );
@@ -5724,7 +5982,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
if(!flush_marked_entries || entry_ptr->flush_marker)
HDassert(entry_ptr->ring >= ring);
- /* advance node pointer now, before we delete its target
+ /* Advance node pointer now, before we delete its target
* from the slist.
*/
node_ptr = H5SL_next(node_ptr);
@@ -5733,19 +5991,29 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
if(NULL == next_entry_ptr)
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "next_entry_ptr == NULL ?!?!")
+ HDassert(next_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(next_entry_ptr->is_dirty);
+ HDassert(next_entry_ptr->in_slist);
+
+ if(!flush_marked_entries || next_entry_ptr->flush_marker)
+ HDassert(next_entry_ptr->ring >= ring);
+
HDassert(entry_ptr != next_entry_ptr);
} /* end if */
else
next_entry_ptr = NULL;
- if(((!flush_marked_entries) || (entry_ptr->flush_marker)) &&
- ((!entry_ptr->flush_me_last) ||
- (entry_ptr->flush_me_last &&
- ((cache_ptr->num_last_entries >= cache_ptr->slist_len) ||
- (flush_marked_entries && entry_ptr->flush_marker)))) &&
- ( ( entry_ptr->flush_dep_nchildren == 0 ) ||
- ( entry_ptr->flush_dep_ndirty_children == 0 ) ) &&
- (entry_ptr->ring == ring)) {
+ if((!flush_marked_entries || entry_ptr->flush_marker)
+ && (!entry_ptr->flush_me_last ||
+ (entry_ptr->flush_me_last
+ && (cache_ptr->num_last_entries >= cache_ptr->slist_len
+ || (flush_marked_entries && entry_ptr->flush_marker))))
+ && (entry_ptr->flush_dep_nchildren == 0
+ || entry_ptr->flush_dep_ndirty_children == 0)
+ && entry_ptr->ring == ring) {
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+
if(entry_ptr->is_protected) {
/* we probably have major problems -- but lets
* flush everything we can before we decide
@@ -5756,7 +6024,7 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
} /* end if */
else {
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, (flags | H5C__DURING_FLUSH_FLAG)) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry")
if(cache_ptr->slist_changed) {
/* The slist has been modified by something
@@ -5778,8 +6046,8 @@ H5C_flush_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring, unsigned flags)
#if H5C_DO_SANITY_CHECKS
/* Verify that the slist size and length are as expected. */
- HDassert((initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
- HDassert((size_t)((int64_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
+ HDassert((uint32_t)((int32_t)initial_slist_len + cache_ptr->slist_len_increase) == cache_ptr->slist_len);
+ HDassert((size_t)((ssize_t)initial_slist_size + cache_ptr->slist_size_increase) == cache_ptr->slist_size);
#endif /* H5C_DO_SANITY_CHECKS */
} /* while */
@@ -5831,38 +6099,10 @@ done:
*
* Programmer: John Mainzer, 5/5/04
*
- * Changes: Refactored function to remove the type_ptr parameter.
- *
- * JRM -- 8/7/14
- *
- * Added code to check for slist changes in pre_serialize and
- * serialize calls, and set
- * cache_ptr->slist_change_in_pre_serialize and
- * cache_ptr->slist_change_in_serialize as appropriate.
- *
- * JRM -- 12/13/14
- *
- * Refactored function to delay all modifications of the
- * metadata cache data structures until after any calls
- * to the pre-serialize or serialize callbacks.
- *
- * Need to do this, as some pre-serialize or serialize
- * calls result in calls to the metadata cache and
- * modifications to its data structures. Thus, at the
- * time of any such call, the target entry flags and
- * the metadata cache must all be consistant.
- *
- * Also added the entry_size_change_ptr parameter, which
- * allows the function to report back any change in the size
- * of the entry during the flush. Such size changes may
- * occur during the pre-serialize callback.
- *
- * JRM -- 12/24/14
- *
*-------------------------------------------------------------------------
*/
herr_t
-H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
+H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_ptr,
unsigned flags)
{
H5C_t * cache_ptr; /* Cache for file */
@@ -5875,7 +6115,10 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
hbool_t write_entry; /* internal flag */
hbool_t destroy_entry; /* internal flag */
hbool_t generate_image; /* internal flag */
+ hbool_t update_page_buffer; /* internal flag */
hbool_t was_dirty;
+ hbool_t suppress_image_entry_writes = FALSE;
+ hbool_t suppress_image_entry_frees = FALSE;
haddr_t entry_addr = HADDR_UNDEF;
herr_t ret_value = SUCCEED; /* Return value */
@@ -5886,7 +6129,9 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
HDassert(cache_ptr);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(entry_ptr->ring != H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->type);
/* setup external flags from the flags parameter */
destroy = ((flags & H5C__FLUSH_INVALIDATE_FLAG) != 0);
@@ -5896,6 +6141,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
del_from_slist_on_destroy = ((flags & H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) != 0);
during_flush = ((flags & H5C__DURING_FLUSH_FLAG) != 0);
generate_image = ((flags & H5C__GENERATE_IMAGE_FLAG) != 0);
+ update_page_buffer = ((flags & H5C__UPDATE_PAGE_BUFFER_FLAG) != 0);
/* Set the flag for destroying the entry, based on the 'take ownership'
* and 'destroy' flags
@@ -5905,10 +6151,6 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
else
destroy_entry = destroy;
-#ifdef H5_HAVE_PARALLEL
- HDassert(FALSE == entry_ptr->coll_access);
-#endif
-
/* we will write the entry to disk if it exists, is dirty, and if the
* clear only flag is not set.
*/
@@ -5917,44 +6159,64 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
else
write_entry = FALSE;
+ /* if we have received close warning, and we have been instructed to
+ * generate a metadata cache image, and we have actually constructed
+ * the entry images, set suppress_image_entry_frees to TRUE.
+ *
+ * Set suppress_image_entry_writes to TRUE if indicated by the
+ * image_ctl flags.
+ */
+ if(cache_ptr->close_warning_received && cache_ptr->image_ctl.generate_image
+ && cache_ptr->num_entries_in_image > 0 && cache_ptr->image_entries) {
+ /* Sanity checks */
+ HDassert(entry_ptr->image_up_to_date || !(entry_ptr->include_in_image));
+ HDassert(entry_ptr->image_ptr || !(entry_ptr->include_in_image));
+ HDassert((!clear_only) || !(entry_ptr->include_in_image));
+ HDassert((!take_ownership) || !(entry_ptr->include_in_image));
+ HDassert((!free_file_space) || !(entry_ptr->include_in_image));
+
+ suppress_image_entry_frees = TRUE;
+
+ if(cache_ptr->image_ctl.flags & H5C_CI__SUPRESS_ENTRY_WRITES)
+ suppress_image_entry_writes = TRUE;
+ } /* end if */
+
/* run initial sanity checks */
#if H5C_DO_SANITY_CHECKS
if(entry_ptr->in_slist) {
HDassert(entry_ptr->is_dirty);
if((entry_ptr->flush_marker) && (!entry_ptr->is_dirty))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks.")
- } else {
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry in slist failed sanity checks")
+ } /* end if */
+ else {
HDassert(!entry_ptr->is_dirty);
HDassert(!entry_ptr->flush_marker);
if((entry_ptr->is_dirty) || (entry_ptr->flush_marker))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks.")
- }
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry failed sanity checks")
+ } /* end else */
#endif /* H5C_DO_SANITY_CHECKS */
if(entry_ptr->is_protected) {
- HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->is_protected);
/* Attempt to flush a protected entry -- scream and die. */
- HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_PROTECT, FAIL, "Attempt to flush a protected entry")
} /* end if */
- /* set entry_ptr->flush_in_progress = TRUE and set
+ /* Set entry_ptr->flush_in_progress = TRUE and set
* entry_ptr->flush_marker = FALSE
*
- * in the parallel case, do some sanity checking in passing.
- */
- HDassert(entry_ptr->type);
-
- was_dirty = entry_ptr->is_dirty; /* needed later for logging */
-
- /* We will set flush_in_progress back to FALSE at the end if the
+ * We will set flush_in_progress back to FALSE at the end if the
* entry still exists at that point.
*/
entry_ptr->flush_in_progress = TRUE;
entry_ptr->flush_marker = FALSE;
+ /* Preserve current dirty state for later */
+ was_dirty = entry_ptr->is_dirty;
+
/* The entry is dirty, and we are doing a flush, a flush destroy or have
* been requested to generate an image. In those cases, serialize the
* entry.
@@ -5971,6 +6233,9 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
} /* end if */
if(!(entry_ptr->image_up_to_date)) {
+ /* Sanity check */
+ HDassert(!entry_ptr->prefetched);
+
/* Generate the entry's image */
if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't generate entry's image")
@@ -5991,7 +6256,17 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Write when writes are always forbidden!?!?!")
#endif /* H5C_DO_SANITY_CHECKS */
- if(((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0) {
+ /* Write the image to disk unless the write is suppressed.
+ *
+ * This happens if both suppress_image_entry_writes and
+ * entry_ptr->include_in_image are TRUE, or if the
+ * H5AC__CLASS_SKIP_WRITES is set in the entry's type. This
+ * flag should only be used in test code
+ */
+ if((!suppress_image_entry_writes || !entry_ptr->include_in_image)
+ && (((entry_ptr->type->flags) & H5C__CLASS_SKIP_WRITES) == 0)) {
+ H5FD_mem_t mem_type = H5FD_MEM_DEFAULT;
+
#ifdef H5_HAVE_PARALLEL
if(cache_ptr->coll_write_list) {
if(H5SL_insert(cache_ptr->coll_write_list, entry_ptr, &entry_ptr->addr) < 0)
@@ -5999,8 +6274,16 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
} /* end if */
else
#endif /* H5_HAVE_PARALLEL */
- if(H5F_block_write(f, entry_ptr->type->mem_type, entry_ptr->addr, entry_ptr->size, dxpl_id, entry_ptr->image_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file.")
+
+ if(entry_ptr->prefetched) {
+ HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ mem_type = cache_ptr->class_table_ptr[entry_ptr->prefetch_type_id]->mem_type;
+ } /* end if */
+ else
+ mem_type = entry_ptr->type->mem_type;
+
+ if(H5F_block_write(f, mem_type, entry_ptr->addr, entry_ptr->size, dxpl_id, entry_ptr->image_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write image to file")
} /* end if */
/* if the entry has a notify callback, notify it that we have
@@ -6063,19 +6346,28 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
*
* 2) Delete it from the skip list if requested.
*
- * 3) Update the replacement policy for eviction
+ * 3) Delete it from the collective read access list.
+ *
+ * 4) Update the replacement policy for eviction
*
- * 4) Remove it from the tag list for this object
+ * 5) Remove it from the tag list for this object
*
* Finally, if the destroy_entry flag is set, discard the
* entry.
*/
-
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr)
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL)
if(entry_ptr->in_slist && del_from_slist_on_destroy)
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, during_flush)
+#ifdef H5_HAVE_PARALLEL
+ /* Check for collective read access flag */
+ if(entry_ptr->coll_access) {
+ entry_ptr->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
+
H5C__UPDATE_RP_FOR_EVICTION(cache_ptr, entry_ptr, FAIL)
/* Remove entry from tag list */
@@ -6122,11 +6414,12 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry dirty flag cleared")
/* Propagate the clean flag up the flush dependency chain if appropriate */
- HDassert(entry_ptr->flush_dep_ndirty_children == 0);
- if(entry_ptr->flush_dep_nparents > 0)
- if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep clean flag")
- } /* end if */
+ if(entry_ptr->flush_dep_ndirty_children != 0)
+ HDassert(entry_ptr->flush_dep_ndirty_children == 0);
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_clean(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTMARKDIRTY, FAIL, "Can't propagate flush dep clean flag")
+ } /* end if */
} /* end else */
/* reset the flush_in progress flag */
@@ -6145,10 +6438,29 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
/* Sanity check */
HDassert(0 == entry_ptr->flush_dep_nparents);
- /* Start by freeing the buffer for the on disk image */
- if(entry_ptr->image_ptr != NULL)
+ /* if both suppress_image_entry_frees and entry_ptr->include_in_image
+ * are true, simply set entry_ptr->image_ptr to NULL, as we have
+ * another pointer to the buffer in an instance of H5C_image_entry_t
+ * in cache_ptr->image_entries.
+ *
+ * Otherwise, free the buffer if it exists.
+ */
+ if(suppress_image_entry_frees && entry_ptr->include_in_image)
+ entry_ptr->image_ptr = NULL;
+ else if(entry_ptr->image_ptr != NULL)
entry_ptr->image_ptr = H5MM_xfree(entry_ptr->image_ptr);
+ /* If the entry is not a prefetched entry, verify that the flush
+ * dependency parents addresses array has been transfered.
+ *
+ * If the entry is prefetched, the free_isr routine will dispose of
+ * the flush dependency parents adresses array if necessary.
+ */
+ if(!entry_ptr->prefetched) {
+ HDassert(0 == entry_ptr->fd_parent_count);
+ HDassert(NULL == entry_ptr->fd_parent_addrs);
+ } /* end if */
+
/* Check whether we should free the space in the file that
* the entry occupies
*/
@@ -6232,7 +6544,7 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
HDassert(entry_ptr->image_ptr == NULL);
if(entry_ptr->type->free_icr((void *)entry_ptr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
} /* end if */
else {
HDassert(take_ownership);
@@ -6245,14 +6557,26 @@ H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id, H5C_cache_entry_t *entry_
} /* end else */
} /* if (destroy) */
+ /* Check if we have to update the page buffer with cleared entries
+ * so it doesn't go out of date
+ */
+ if(update_page_buffer) {
+ /* Sanity check */
+ HDassert(!destroy);
+ HDassert(entry_ptr->image_ptr);
+
+ if(f->shared->page_buf && f->shared->page_buf->page_size >= entry_ptr->size)
+ if(H5PB_update_entry(f->shared->page_buf, entry_ptr->addr, entry_ptr->size, entry_ptr->image_ptr) > 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Failed to update PB with metadata cache")
+ } /* end if */
+
if(cache_ptr->log_flush)
if((cache_ptr->log_flush)(cache_ptr, entry_addr, was_dirty, flags) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "log_flush callback failed")
done:
HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( ! entry_ptr->flush_in_progress ) );
-
HDassert( ( ret_value != SUCCEED ) || ( destroy_entry ) ||
( take_ownership ) || ( ! entry_ptr->is_dirty ) );
@@ -6310,14 +6634,14 @@ H5C__verify_len_eoa(H5F_t *f, const H5C_class_t *type, haddr_t addr,
/* Check if the amount of data to read will be past the EOA */
if(H5F_addr_gt((addr + *len), eoa)) {
if(actual)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "actual len exceeds EOA")
else
/* Trim down the length of the metadata */
*len = (size_t)(eoa - addr);
} /* end if */
if(*len <= 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "len not positive after adjustment for EOA")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -6397,7 +6721,7 @@ H5C_load_entry(H5F_t * f,
/* Allocate the buffer for reading the on-disk entry image */
if(NULL == (image = (uint8_t *)H5MM_malloc(len + H5C_IMAGE_EXTRA_SPACE)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
#if H5C_DO_MEMORY_SANITY_CHECKS
HDmemcpy(image + len, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
@@ -6474,7 +6798,7 @@ H5C_load_entry(H5F_t * f,
if(actual_len != len) {
/* Verify that the length isn't past the EOA for the file */
if(H5C__verify_len_eoa(f, type, addr, &actual_len, TRUE) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA.")
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "actual_len exceeds EOA")
/* Expand buffer to new size */
if(NULL == (new_image = H5MM_realloc(image, actual_len + H5C_IMAGE_EXTRA_SPACE)))
@@ -6575,52 +6899,76 @@ H5C_load_entry(H5F_t * f,
HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6) );
- entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
- entry->cache_ptr = f->shared->cache;
- entry->addr = addr;
- entry->size = len;
+ entry->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ entry->cache_ptr = f->shared->cache;
+ entry->addr = addr;
+ entry->size = len;
HDassert(entry->size < H5C_MAX_ENTRY_SIZE);
- entry->image_ptr = image;
- entry->image_up_to_date = TRUE;
- entry->type = type;
- entry->is_dirty = dirty;
- entry->dirtied = FALSE;
- entry->is_protected = FALSE;
- entry->is_read_only = FALSE;
- entry->ro_ref_count = 0;
- entry->is_pinned = FALSE;
- entry->in_slist = FALSE;
- entry->flush_marker = FALSE;
+ entry->image_ptr = image;
+ entry->image_up_to_date = !dirty;
+ entry->type = type;
+ entry->is_dirty = dirty;
+ entry->dirtied = FALSE;
+ entry->is_protected = FALSE;
+ entry->is_read_only = FALSE;
+ entry->ro_ref_count = 0;
+ entry->is_pinned = FALSE;
+ entry->in_slist = FALSE;
+ entry->flush_marker = FALSE;
#ifdef H5_HAVE_PARALLEL
- entry->clear_on_unprotect = FALSE;
- entry->flush_immediately = FALSE;
- entry->coll_access = coll_access;
+ entry->clear_on_unprotect = FALSE;
+ entry->flush_immediately = FALSE;
+ entry->coll_access = coll_access;
#endif /* H5_HAVE_PARALLEL */
- entry->flush_in_progress = FALSE;
- entry->destroy_in_progress = FALSE;
+ entry->flush_in_progress = FALSE;
+ entry->destroy_in_progress = FALSE;
- entry->ring = H5C_RING_UNDEFINED;
+ entry->ring = H5C_RING_UNDEFINED;
- /* Initialize flush dependency height fields */
- entry->flush_dep_parent = NULL;
- entry->flush_dep_nparents = 0;
- entry->flush_dep_parent_nalloc = 0;
- entry->flush_dep_nchildren = 0;
- entry->flush_dep_ndirty_children = 0;
- entry->ht_next = NULL;
- entry->ht_prev = NULL;
+ /* Initialize flush dependency fields */
+ entry->flush_dep_parent = NULL;
+ entry->flush_dep_nparents = 0;
+ entry->flush_dep_parent_nalloc = 0;
+ entry->flush_dep_nchildren = 0;
+ entry->flush_dep_ndirty_children = 0;
+ entry->flush_dep_nunser_children = 0;
+ entry->ht_next = NULL;
+ entry->ht_prev = NULL;
+ entry->il_next = NULL;
+ entry->il_prev = NULL;
- entry->next = NULL;
- entry->prev = NULL;
+ entry->next = NULL;
+ entry->prev = NULL;
- entry->aux_next = NULL;
- entry->aux_prev = NULL;
+ entry->aux_next = NULL;
+ entry->aux_prev = NULL;
#ifdef H5_HAVE_PARALLEL
- entry->coll_next = NULL;
- entry->coll_prev = NULL;
+ entry->coll_next = NULL;
+ entry->coll_prev = NULL;
#endif /* H5_HAVE_PARALLEL */
+ /* initialize cache image related fields */
+ entry->include_in_image = FALSE;
+ entry->lru_rank = 0;
+ entry->image_dirty = FALSE;
+ entry->fd_parent_count = 0;
+ entry->fd_parent_addrs = NULL;
+ entry->fd_child_count = 0;
+ entry->fd_dirty_child_count = 0;
+ entry->image_fd_height = 0;
+ entry->prefetched = FALSE;
+ entry->prefetch_type_id = 0;
+ entry->age = 0;
+ entry->prefetched_dirty = FALSE;
+#ifndef NDEBUG /* debugging field */
+ entry->serialization_count = 0;
+#endif /* NDEBUG */
+
+ entry->tl_next = NULL;
+ entry->tl_prev = NULL;
+ entry->tag_info = NULL;
+
H5C__RESET_CACHE_ENTRY_STATS(entry);
ret_value = thing;
@@ -6641,7 +6989,7 @@ done:
/*-------------------------------------------------------------------------
*
- * Function: H5C_make_space_in_cache
+ * Function: H5C__make_space_in_cache
*
* Purpose: Attempt to evict cache entries until the index_size
* is at least needed_space below max_cache_size.
@@ -6659,100 +7007,67 @@ done:
* Thus the function simply does its best, returning success
* unless an error is encountered.
*
- * The primary_dxpl_id and secondary_dxpl_id parameters
- * specify the dxpl_ids used on the first write occasioned
- * by the call (primary_dxpl_id), and on all subsequent
- * writes (secondary_dxpl_id). This is useful in the metadata
- * cache, but may not be needed elsewhere. If so, just use the
- * same dxpl_id for both parameters.
- *
* Observe that this function cannot occasion a read.
*
* Return: Non-negative on success/Negative on failure.
*
* Programmer: John Mainzer, 5/14/04
*
- * Changes: Modified function to skip over entries with the
- * flush_in_progress flag set. If this is not done,
- * an infinite recursion is possible if the cache is
- * full, and the pre-serialize or serialize routine
- * attempts to load another entry.
- *
- * This error was exposed by a re-factor of the
- * H5C__flush_single_entry() routine. However, it was
- * a potential bug from the moment that entries were
- * allowed to load other entries on flush.
- *
- * In passing, note that the primary and secondary dxpls
- * mentioned in the comment above have been replaced by
- * a single dxpl at some point, and thus the discussion
- * above is somewhat obsolete. Date of this change is
- * unkown.
- *
- * JRM -- 12/26/14
- *
- * Modified function to detect deletions of entries
- * during a scan of the LRU, and where appropriate,
- * restart the scan to avoid proceeding with a next
- * entry that is no longer in the cache.
- *
- * Note the absence of checks after flushes of clean
- * entries. As a second entry can only be removed by
- * by a call to the pre_serialize or serialize callback
- * of the first, and as these callbacks will not be called
- * on clean entries, no checks are needed.
- *
- * JRM -- 4/6/15
- *
*-------------------------------------------------------------------------
*/
-static herr_t
-H5C_make_space_in_cache(H5F_t * f,
- hid_t dxpl_id,
- size_t space_needed,
- hbool_t write_permitted)
+herr_t
+H5C__make_space_in_cache(H5F_t *f, hid_t dxpl_id, size_t space_needed,
+ hbool_t write_permitted)
{
H5C_t * cache_ptr = f->shared->cache;
#if H5C_COLLECT_CACHE_STATS
int32_t clean_entries_skipped = 0;
+ int32_t dirty_pf_entries_skipped = 0;
int32_t total_entries_scanned = 0;
#endif /* H5C_COLLECT_CACHE_STATS */
- int32_t entries_examined = 0;
- int32_t initial_list_len;
+ uint32_t entries_examined = 0;
+ uint32_t initial_list_len;
size_t empty_space;
+ hbool_t reentrant_call = FALSE;
hbool_t prev_is_dirty = FALSE;
hbool_t didnt_flush_entry = FALSE;
hbool_t restart_scan;
H5C_cache_entry_t * entry_ptr;
H5C_cache_entry_t * prev_ptr;
H5C_cache_entry_t * next_ptr;
- int32_t num_corked_entries = 0;
+ uint32_t num_corked_entries = 0;
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
- HDassert( f );
- HDassert( cache_ptr );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( cache_ptr->index_size ==
- (cache_ptr->clean_index_size + cache_ptr->dirty_index_size) );
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->index_size == (cache_ptr->clean_index_size + cache_ptr->dirty_index_size));
- if ( write_permitted ) {
+ /* check to see if cache_ptr->msic_in_progress is TRUE. If it, this
+ * is a re-entrant call via a client callback called in the make
+ * space in cache process. To avoid an infinite recursion, set
+ * reentrant_call to TRUE, and goto done.
+ */
+ if(cache_ptr->msic_in_progress) {
+ reentrant_call = TRUE;
+ HGOTO_DONE(SUCCEED);
+ } /* end if */
+
+ cache_ptr->msic_in_progress = TRUE;
+ if ( write_permitted ) {
restart_scan = FALSE;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
- if ( cache_ptr->index_size >= cache_ptr->max_cache_size ) {
-
+ if(cache_ptr->index_size >= cache_ptr->max_cache_size)
empty_space = 0;
-
- } else {
-
+ else
empty_space = cache_ptr->max_cache_size - cache_ptr->index_size;
- }
-
while ( ( ( (cache_ptr->index_size + space_needed)
>
cache_ptr->max_cache_size
@@ -6788,8 +7103,9 @@ H5C_make_space_in_cache(H5F_t * f,
++num_corked_entries;
didnt_flush_entry = TRUE;
- } else if ( ( (entry_ptr->type)->id != H5C__EPOCH_MARKER_TYPE ) &&
- ( ! entry_ptr->flush_in_progress ) ) {
+ } else if ( ( (entry_ptr->type)->id != H5AC_EPOCH_MARKER_ID ) &&
+ ( ! entry_ptr->flush_in_progress ) &&
+ ( ! entry_ptr->prefetched_dirty ) ) {
didnt_flush_entry = FALSE;
@@ -6815,13 +7131,6 @@ H5C_make_space_in_cache(H5F_t * f,
cache_ptr->entries_removed_counter = 0;
cache_ptr->last_entry_removed_ptr = NULL;
-#ifdef H5_HAVE_PARALLEL
- if(TRUE == entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
-#endif
-
if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
@@ -6855,10 +7164,16 @@ H5C_make_space_in_cache(H5F_t * f,
} else {
- /* Skip epoch markers and entries that are in the process
- * of being flushed.
+ /* Skip epoch markers, entries that are in the process
+ * of being flushed, and entries marked as prefetched_dirty
+ * (occurs in the R/O case only).
*/
didnt_flush_entry = TRUE;
+
+#if H5C_COLLECT_CACHE_STATS
+ if(entry_ptr->prefetched_dirty)
+ dirty_pf_entries_skipped++;
+#endif /* H5C_COLLECT_CACHE_STATS */
}
if ( prev_ptr != NULL ) {
@@ -6923,6 +7238,7 @@ H5C_make_space_in_cache(H5F_t * f,
cache_ptr->calls_to_msic++;
cache_ptr->total_entries_skipped_in_msic += clean_entries_skipped;
+ cache_ptr->total_dirty_pf_entries_skipped_in_msic += dirty_pf_entries_skipped;
cache_ptr->total_entries_scanned_in_msic += total_entries_scanned;
if ( clean_entries_skipped > cache_ptr->max_entries_skipped_in_msic ) {
@@ -6930,6 +7246,9 @@ H5C_make_space_in_cache(H5F_t * f,
cache_ptr->max_entries_skipped_in_msic = clean_entries_skipped;
}
+ if(dirty_pf_entries_skipped > cache_ptr->max_dirty_pf_entries_skipped_in_msic)
+ cache_ptr->max_dirty_pf_entries_skipped_in_msic = dirty_pf_entries_skipped;
+
if ( total_entries_scanned > cache_ptr->max_entries_scanned_in_msic ) {
cache_ptr->max_entries_scanned_in_msic = total_entries_scanned;
@@ -6977,14 +7296,20 @@ H5C_make_space_in_cache(H5F_t * f,
prev_ptr = entry_ptr->aux_prev;
+ if ( ( !(entry_ptr->prefetched_dirty) )
#ifdef H5_HAVE_PARALLEL
- if(!(entry_ptr->coll_access)) {
+ && ( ! (entry_ptr->coll_access) )
#endif /* H5_HAVE_PARALLEL */
- if(H5C__flush_single_entry(f, dxpl_id, entry_ptr, H5C__FLUSH_INVALIDATE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush entry")
-#ifdef H5_HAVE_PARALLEL
+ ) {
+
+ if ( H5C__flush_single_entry(f, dxpl_id, entry_ptr,
+ H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to flush entry")
+
} /* end if */
-#endif /* H5_HAVE_PARALLEL */
/* we are scanning the clean LRU, so the serialize function
* will not be called on any entry -- thus there is no
@@ -6998,10 +7323,14 @@ H5C_make_space_in_cache(H5F_t * f,
}
done:
+ /* Sanity checks */
+ HDassert(cache_ptr->msic_in_progress);
+ if(!reentrant_call)
+ cache_ptr->msic_in_progress = FALSE;
+ HDassert((!reentrant_call) || (cache_ptr->msic_in_progress));
FUNC_LEAVE_NOAPI(ret_value)
-
-} /* H5C_make_space_in_cache() */
+} /* H5C__make_space_in_cache() */
/*-------------------------------------------------------------------------
@@ -7445,7 +7774,6 @@ H5C_entry_in_skip_list(H5C_t * cache_ptr, H5C_cache_entry_t *target_ptr)
return(in_slist);
} /* H5C_entry_in_skip_list() */
-
#endif /* H5C_DO_SLIST_SANITY_CHECKS */
@@ -7677,9 +8005,102 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__mark_flush_dep_clean() */
-#ifndef NDEBUG
/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_serialized()
+ *
+ * Purpose: Decrement the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
+ * the target entry becoming serialized.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/30/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr)
+{
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+
+ /* Iterate over the parent entries, if any */
+ for(u = 0; u < entry_ptr->flush_dep_nparents; u++) {
+
+ HDassert(entry_ptr->flush_dep_parent);
+ HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children > 0);
+
+ /* decrement the parents number of unserialized children */
+ entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children--;
+
+ /* If the parent has a 'notify' callback, send a 'child entry serialized' notice */
+ if(entry_ptr->flush_dep_parent[u]->type->notify &&
+ (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_SERIALIZED, entry_ptr->flush_dep_parent[u]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry serialized flag set")
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_serialized() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__mark_flush_dep_unserialized()
+ *
+ * Purpose: Decrement the flush_dep_nunser_children fields of all the
+ * target entry's flush dependency parents in response to
+ * the target entry becoming unserialized.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/30/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr)
+{
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+
+ /* Iterate over the parent entries, if any */
+ for(u = 0; u < entry_ptr->flush_dep_nparents; u++) {
+ /* Sanity check */
+ HDassert(entry_ptr->flush_dep_parent);
+ HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children <
+ entry_ptr->flush_dep_parent[u]->flush_dep_nchildren);
+
+ /* increment parents number of usserialized children */
+ entry_ptr->flush_dep_parent[u]->flush_dep_nunser_children++;
+
+ /* If the parent has a 'notify' callback, send a 'child entry unserialized' notice */
+ if(entry_ptr->flush_dep_parent[u]->type->notify &&
+ (entry_ptr->flush_dep_parent[u]->type->notify)(H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, entry_ptr->flush_dep_parent[u]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify parent about child entry serialized flag reset")
+ } /* end for */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__mark_flush_dep_unserialized() */
+
+
+#ifndef NDEBUG
+/*-------------------------------------------------------------------------
* Function: H5C__assert_flush_dep_nocycle()
*
* Purpose: Assert recursively that base_entry is not the same as
@@ -7719,6 +8140,515 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_cache
+ *
+ * Purpose: Serialize (i.e. construct an on disk image) for all entries
+ * in the metadata cache including clean entries.
+ *
+ * Note that flush dependencies and "flush me last" flags
+ * must be observed in the serialization process.
+ *
+ * Note also that entries may be loaded, flushed, evicted,
+ * expunged, relocated, resized, or removed from the cache
+ * during this process, just as these actions may occur during
+ * a regular flush.
+ *
+ * However, we are given that the cache will contain no protected
+ * entries on entry to this routine (although entries may be
+ * briefly protected and then unprotected during the serialize
+ * process).
+ *
+ * The objective of this routine is serialize all entries and
+ * to force all entries into their actual locations on disk.
+ *
+ * The initial need for this routine is to settle all entries
+ * in the cache prior to construction of the metadata cache
+ * image so that the size of the cache image can be calculated.
+ * However, I gather that other uses for the routine are
+ * under consideration.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 7/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__serialize_cache(H5F_t *f, hid_t dxpl_id)
+{
+#if H5C_DO_SANITY_CHECKS
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+#ifndef NDEBUG
+ /* if this is a debug build, set the serialization_count field of
+ * each entry in the cache to zero before we start the serialization.
+ * This allows us to detect the case in which any entry is serialized
+ * more than once (a performance issues), and more importantly, the
+ * case is which any flush depencency parent is serializes more than
+ * once (a correctness issue).
+ */
+ {
+ H5C_cache_entry_t * scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while(scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ scan_ptr->serialization_count = 0;
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
+
+ /* set cache_ptr->serialization_in_progress to TRUE, and back
+ * to FALSE at the end of the function. Must maintain this flag
+ * to support H5C_get_serialization_in_progress(), which is in
+ * turn required to support sanity checking in some cache
+ * clients.
+ */
+ HDassert(!cache_ptr->serialization_in_progress);
+ cache_ptr->serialization_in_progress = TRUE;
+
+ /* Serialize each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ HDassert(cache_ptr->close_warning_received);
+ switch(ring) {
+ case H5C_RING_USER:
+ break;
+
+ case H5C_RING_RDFSM:
+ /* Settle raw data FSM */
+ if(!cache_ptr->rdfsm_settled)
+ if(H5MF_settle_raw_data_fsm(f, dxpl_id, &cache_ptr->rdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "RD FSM settle failed")
+ break;
+
+ case H5C_RING_MDFSM:
+ /* Settle metadata FSM */
+ if(!cache_ptr->mdfsm_settled)
+ if(H5MF_settle_meta_data_fsm(f, dxpl_id, &cache_ptr->mdfsm_settled) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "MD FSM settle failed")
+ break;
+
+ case H5C_RING_SBE:
+ case H5C_RING_SB:
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown ring?!?!")
+ break;
+ } /* end switch */
+
+ if(H5C__serialize_ring(f, dxpl_id, ring) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "serialize ring failed")
+
+ ring++;
+ } /* end while */
+
+#ifndef NDEBUG
+ /* Verify that no entry has been serialized more than once.
+ * FD parents with multiple serializations should have been caught
+ * elsewhere, so no specific check for them here.
+ */
+ {
+ H5C_cache_entry_t * scan_ptr = NULL;
+
+ scan_ptr = cache_ptr->il_head;
+ while(scan_ptr != NULL) {
+ HDassert(scan_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(scan_ptr->serialization_count <= 1);
+
+ scan_ptr = scan_ptr->il_next;
+ } /* end while */
+ } /* end block */
+#endif /* NDEBUG */
+
+done:
+ cache_ptr->serialization_in_progress = FALSE;
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_cache() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_ring
+ *
+ * Purpose: Serialize the entries contained in the specified cache and
+ * ring. All entries in rings outside the specified ring
+ * must have been serialized on entry.
+ *
+ * If the cache contains protected entries in the specified
+ * ring, the function will fail, as protected entries cannot
+ * be serialized. However all unprotected entries in the
+ * target ring should be serialized before the function
+ * returns failure.
+ *
+ * If flush dependencies appear in the target ring, the
+ * function makes repeated passes through the index list
+ * serializing entries in flush dependency order.
+ *
+ * All entries outside the H5C_RING_SBE are marked for
+ * inclusion in the cache image. Entries in H5C_RING_SBE
+ * and below are marked for exclusion from the image.
+ *
+ * Return: Non-negative on success/Negative on failure or if there was
+ * a request to flush all items and something was protected.
+ *
+ * Programmer: John Mainzer
+ * 9/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring)
+{
+ hbool_t done = FALSE;
+ H5C_t * cache_ptr;
+ H5C_cache_entry_t * entry_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+ HDassert(cache_ptr->serialization_in_progress);
+
+ /* The objective here is to serialize all entries in the cache ring
+ * in flush dependency order.
+ *
+ * The basic algorithm is to scan the cache index list looking for
+ * unserialized entries that are either not in a flush dependency
+ * relationship, or which have no unserialized children. Any such
+ * entry is serialized and its flush dependency parents (if any) are
+ * informed -- allowing them to decrement their userialized child counts.
+ *
+ * However, this algorithm is complicated by the ability
+ * of client serialization callbacks to perform operations on
+ * on the cache which can result in the insertion, deletion,
+ * relocation, resize, dirty, flush, eviction, or removal (via the
+ * take ownership flag) of entries. Changes in the flush dependency
+ * structure are also possible.
+ *
+ * On the other hand, the algorithm is simplified by the fact that
+ * we are serializing, not flushing. Thus, as long as all entries
+ * are serialized correctly, it doesn't matter if we have to go back
+ * and serialize an entry a second time.
+ *
+ * These possible actions result in the following modfications to
+ * tha basic algorithm:
+ *
+ * 1) In the event of an entry expunge, eviction or removal, we must
+ * restart the scan as it is possible that the next entry in our
+ * scan is no longer in the cache. Were we to examine this entry,
+ * we would be accessing deallocated memory.
+ *
+ * 2) A resize, dirty, or insertion of an entry may result in the
+ * the increment of a flush dependency parent's dirty and/or
+ * unserialized child count. In the context of serializing the
+ * the cache, this is a non-issue, as even if we have already
+ * serialized the parent, it will be marked dirty and its image
+ * marked out of date if appropriate when the child is serialized.
+ *
+ * However, this is a major issue for a flush, as were this to happen
+ * in a flush, it would violate the invariant that the flush dependency
+ * feature is intended to enforce. As the metadata cache has no
+ * control over the behavior of cache clients, it has no way of
+ * preventing this behaviour. However, it should detect it if at all
+ * possible.
+ *
+ * Do this by maintaining a count of the number of times each entry is
+ * serialized during a cache serialization. If any flush dependency
+ * parent is serialized more than once, throw an assertion failure.
+ *
+ * 3) An entry relocation will typically change the location of the
+ * entry in the index list. This shouldn't cause problems as we
+ * will scan the index list until we make a complete pass without
+ * finding anything to serialize -- making relocations of either
+ * the current or next entries irrelevant.
+ *
+ * Note that since a relocation may result in our skipping part of
+ * the index list, we must always do at least one more pass through
+ * the index list after an entry relocation.
+ *
+ * 4) Changes in the flush dependency structure are possible on
+ * entry insertion, load, expunge, evict, or remove. Destruction
+ * of a flush dependency has no effect, as it can only relax the
+ * flush dependencies. Creation of a flush dependency can create
+ * an unserialized child of a flush dependency parent where all
+ * flush dependency children were previously serialized. Should
+ * this child dirty the flush dependency parent when it is serialized,
+ * the parent will be re-serialized.
+ *
+ * Per the discussion of 2) above, this is a non issue for cache
+ * serialization, and a major problem for cache flush. Using the
+ * same detection mechanism, throw an assertion failure if this
+ * condition appears.
+ *
+ * Observe that either eviction or removal of entries as a result of
+ * a serialization is not a problem as long as the flush depencency
+ * tree does not change beyond the removal of a leaf.
+ */
+ while(!done) {
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ done = TRUE; /* set to FALSE if any activity in inner loop */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Verify that either the entry is already serialized, or
+ * that it is assigned to either the target or an inner
+ * ring.
+ */
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ /* Skip flush me last entries or inner ring entries */
+ if(!entry_ptr->flush_me_last && entry_ptr->ring == ring) {
+
+ /* if we encounter an unserialized entry in the current
+ * ring that is not marked flush me last, we are not done.
+ */
+ if(!entry_ptr->image_up_to_date)
+ done = FALSE;
+
+ /* Serialize the entry if its image is not up to date
+ * and it has no unserialized flush dependency children.
+ */
+ if(!entry_ptr->image_up_to_date && entry_ptr->flush_dep_nunser_children == 0) {
+ HDassert(entry_ptr->serialization_count == 0);
+
+ /* Serialize the entry */
+ if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+
+ /* Check for the cache being perturbed during the entry serialize */
+ if((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0)) {
+
+#if H5C_COLLECT_CACHE_STATS
+ H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr);
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ /* Reset the counters */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* Restart scan */
+ entry_ptr = cache_ptr->il_head;
+ } /* end if */
+ else
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+ } /* while ( ! done ) */
+
+
+ /* Reset the counters so that we can detect insertions, loads,
+ * moves, and flush dependency height changes caused by the pre_serialize
+ * and serialize callbacks.
+ */
+ cache_ptr->entries_loaded_counter = 0;
+ cache_ptr->entries_inserted_counter = 0;
+ cache_ptr->entries_relocated_counter = 0;
+
+ /* At this point, all entries not marked "flush me last" and in
+ * the current ring or outside it should be serialized and have up
+ * to date images. Scan the index list again to serialize the
+ * "flush me last" entries (if they are in the current ring) and to
+ * verify that all other entries have up to date images.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring > H5C_RING_UNDEFINED);
+ HDassert(entry_ptr->ring < H5C_RING_NTYPES);
+ HDassert((entry_ptr->ring >= ring) || (entry_ptr->image_up_to_date));
+
+ if(entry_ptr->ring == ring) {
+ if(entry_ptr->flush_me_last) {
+ if(!entry_ptr->image_up_to_date) {
+ HDassert(entry_ptr->serialization_count == 0);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+
+ /* Serialize the entry */
+ if(H5C__serialize_single_entry(f, dxpl_id, cache_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "entry serialization failed")
+
+ /* Check for the cache changing */
+ if((cache_ptr->entries_loaded_counter > 0) ||
+ (cache_ptr->entries_inserted_counter > 0) ||
+ (cache_ptr->entries_relocated_counter > 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "flush_me_last entry serialization triggered restart")
+
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ HDassert(entry_ptr->serialization_count == 0);
+#ifndef NDEBUG
+ /* Increment serialization counter (to detect multiple serializations) */
+ entry_ptr->serialization_count++;
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+ else {
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->serialization_count <= 1);
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ } /* end else */
+ } /* if ( entry_ptr->ring == ring ) */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+
+done:
+ HDassert(cache_ptr->serialization_in_progress);
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_ring() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__serialize_single_entry
+ *
+ * Purpose: Serialize the cache entry pointed to by the entry_ptr
+ * parameter.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer, 7/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__serialize_single_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
+ H5C_cache_entry_t *entry_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(!entry_ptr->prefetched);
+ HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->flush_in_progress);
+ HDassert(entry_ptr->type);
+
+ /* Set entry_ptr->flush_in_progress to TRUE so the the target entry
+ * will not be evicted out from under us. Must set it back to FALSE
+ * when we are done.
+ */
+ entry_ptr->flush_in_progress = TRUE;
+
+ /* Allocate buffer for the entry image if required. */
+ if(NULL == entry_ptr->image_ptr) {
+ HDassert(entry_ptr->size > 0);
+ if(NULL == (entry_ptr->image_ptr = H5MM_malloc(entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)) )
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ HDmemcpy(((uint8_t *)entry_ptr->image_ptr) + image_size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+ } /* end if */
+
+ /* Generate image for entry */
+ if(H5C__generate_image(f, cache_ptr, entry_ptr, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTSERIALIZE, FAIL, "Can't generate image for cache entry")
+
+ /* Reset the flush_in progress flag */
+ entry_ptr->flush_in_progress = FALSE;
+
+done:
+ HDassert((ret_value != SUCCEED) || (!entry_ptr->flush_in_progress));
+ HDassert((ret_value != SUCCEED) || (entry_ptr->image_up_to_date));
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__serialize_single_entry() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5C__generate_image
*
* Purpose: Serialize an entry and generate its image.
@@ -7739,8 +8669,8 @@ H5C__assert_flush_dep_nocycle(const H5C_cache_entry_t * entry,
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
+herr_t
+H5C__generate_image(H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_ptr,
hid_t dxpl_id)
{
haddr_t new_addr = HADDR_UNDEF;
@@ -7749,10 +8679,18 @@ H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_p
unsigned serialize_flags = H5C__SERIALIZE_NO_FLAGS_SET;
herr_t ret_value = SUCCEED;
- FUNC_ENTER_STATIC
+ FUNC_ENTER_PACKAGE
/* Sanity check */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(!entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->is_dirty);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(entry_ptr->type);
/* make note of the entry's current address */
old_addr = entry_ptr->addr;
@@ -7766,8 +8704,7 @@ H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_p
/* Check for any flags set in the pre-serialize callback */
if(serialize_flags != H5C__SERIALIZE_NO_FLAGS_SET) {
/* Check for unexpected flags from serialize callback */
- if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG |
- H5C__SERIALIZE_MOVED_FLAG))
+ if(serialize_flags & ~(H5C__SERIALIZE_RESIZED_FLAG | H5C__SERIALIZE_MOVED_FLAG))
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unknown serialize flag(s)")
#ifdef H5_HAVE_PARALLEL
@@ -7798,12 +8735,15 @@ H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_p
* tests will be necessary.
*/
if(cache_ptr->aux_ptr != NULL)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "resize/move in serialize occured in parallel case")
#endif
/* If required, resize the buffer and update the entry and the cache
* data structures */
if(serialize_flags & H5C__SERIALIZE_RESIZED_FLAG) {
+ /* Sanity check */
+ HDassert(new_len > 0);
+
/* Allocate a new image buffer */
if(NULL == (entry_ptr->image_ptr = H5MM_realloc(entry_ptr->image_ptr, new_len + H5C_IMAGE_EXTRA_SPACE)))
HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
@@ -7814,9 +8754,8 @@ H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_p
/* Update statistics for resizing the entry */
H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_len);
- /* update the hash table for the size change */
- H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, \
- new_len, entry_ptr, !(entry_ptr->is_dirty));
+ /* Update the hash table for the size change */
+ H5C__UPDATE_INDEX_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len, entry_ptr, !(entry_ptr->is_dirty));
/* The entry can't be protected since we are in the process of
* flushing it. Thus we must update the replacement policy data
@@ -7829,10 +8768,11 @@ H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_p
* for the flush or flush destroy yet, the entry should
* be in the slist. Thus update it for the size change.
*/
+ HDassert(entry_ptr->is_dirty);
HDassert(entry_ptr->in_slist);
H5C__UPDATE_SLIST_FOR_SIZE_CHANGE(cache_ptr, entry_ptr->size, new_len);
- /* finally, update the entry for its new size */
+ /* Finally, update the entry for its new size */
entry_ptr->size = new_len;
} /* end if */
@@ -7840,18 +8780,19 @@ H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_p
* for a move
*/
if(serialize_flags & H5C__SERIALIZE_MOVED_FLAG) {
- H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr);
+ /* Update stats and entries relocated counter */
+ H5C__UPDATE_STATS_FOR_MOVE(cache_ptr, entry_ptr)
/* We must update cache data structures for the change in address */
if(entry_ptr->addr == old_addr) {
/* Delete the entry from the hash table and the slist */
- H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr);
+ H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, FAIL);
H5C__REMOVE_ENTRY_FROM_SLIST(cache_ptr, entry_ptr, FALSE);
- /* update the entry for its new address */
+ /* Update the entry for its new address */
entry_ptr->addr = new_addr;
- /* and then reinsert in the index and slist */
+ /* And then reinsert in the index and slist */
H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, FAIL);
H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, entry_ptr, FAIL);
} /* end if */
@@ -7868,6 +8809,17 @@ H5C__generate_image(const H5F_t *f, H5C_t *cache_ptr, H5C_cache_entry_t *entry_p
#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
entry_ptr->image_up_to_date = TRUE;
+ /* Propagate the fact that the entry is serialized up the
+ * flush dependency chain if appropriate. Since the image must
+ * have been out of date for this function to have been called
+ * (see assertion on entry), no need to check that -- only check
+ * for flush dependency parents.
+ */
+ HDassert(entry_ptr->flush_dep_nunser_children == 0);
+ if(entry_ptr->flush_dep_nparents > 0)
+ if(H5C__mark_flush_dep_serialized(entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "Can't propagate serialization status to fd parents")
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C__generate_image */
@@ -7937,11 +8889,20 @@ H5C_remove_entry(void *_entry)
/* Update the cache internal data structures as appropriate for a destroy.
* Specifically:
* 1) Delete it from the index
- * 2) Update the replacement policy for eviction
- * 3) Remove it from the tag list for this object
+ * 2) Delete it from the collective read access list
+ * 3) Update the replacement policy for eviction
+ * 4) Remove it from the tag list for this object
*/
- H5C__DELETE_FROM_INDEX(cache, entry)
+ H5C__DELETE_FROM_INDEX(cache, entry, FAIL)
+
+#ifdef H5_HAVE_PARALLEL
+ /* Check for collective read access flag */
+ if(entry->coll_access) {
+ entry->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache, entry, FAIL)
+ } /* end if */
+#endif /* H5_HAVE_PARALLEL */
H5C__UPDATE_RP_FOR_EVICTION(cache, entry, FAIL)
diff --git a/src/H5CS.c b/src/H5CS.c
index 477b5f9..0a3dcce 100644
--- a/src/H5CS.c
+++ b/src/H5CS.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5CSprivate.h b/src/H5CSprivate.h
index ab7f993..467dd9d 100644
--- a/src/H5CSprivate.h
+++ b/src/H5CSprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Cdbg.c b/src/H5Cdbg.c
index 16077c8..4a08d9b 100644
--- a/src/H5Cdbg.c
+++ b/src/H5Cdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -30,13 +28,18 @@
#include "H5Cmodule.h" /* This source code file is part of the H5C module */
+#define H5AC_FRIEND
+
+
+
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
-#include "H5Cpkg.h" /* Cache */
-#include "H5Eprivate.h" /* Error handling */
+#include "H5private.h" /* Generic Functions */
+#include "H5ACpkg.h" /* Metadata Cache */
+#include "H5Cpkg.h" /* Cache */
+#include "H5Eprivate.h" /* Error Handling */
/****************/
@@ -53,10 +56,6 @@
/* Local Prototypes */
/********************/
-#if 0 /* debugging routines */
-herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
-#endif /* debugging routines */
-
/*********************/
/* Package Variables */
@@ -73,6 +72,7 @@ herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
/*******************/
+#ifndef NDEBUG
/*-------------------------------------------------------------------------
* Function: H5C_dump_cache
@@ -104,7 +104,7 @@ H5C_dump_cache(H5C_t * cache_ptr, const char * cache_name)
/* First, create a skip list */
if(NULL == (slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
- HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create skip list.")
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTCREATE, FAIL, "can't create skip list")
/* Next, scan the index, and insert all entries in the skip list.
* Do this, as we want to display cache entries in increasing address
@@ -178,6 +178,85 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_dump_cache() */
+#endif /* NDEBUG */
+
+#ifndef NDEBUG
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_dump_cache_LRU
+ *
+ * Purpose: Print a summary of the contents of the metadata cache
+ * LRU for debugging purposes.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 10/10/10
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name)
+{
+ H5C_cache_entry_t * entry_ptr;
+ int i = 0;
+
+ FUNC_ENTER_NOAPI_NOERR
+
+ /* Sanity check */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_name != NULL );
+
+ HDfprintf(stdout, "\n\nDump of metadata cache LRU \"%s\"\n", cache_name);
+ HDfprintf(stdout, "LRU len = %d, LRU size = %d\n",
+ cache_ptr->LRU_list_len, (int)(cache_ptr->LRU_list_size));
+ HDfprintf(stdout, "index_size = %d, max_cache_size = %d, delta = %d\n\n",
+ (int)(cache_ptr->index_size), (int)(cache_ptr->max_cache_size),
+ (int)(cache_ptr->max_cache_size) - (int)(cache_ptr->index_size));
+
+ /* Print header */
+ HDfprintf(stdout, "Entry ");
+ HDfprintf(stdout, "| Address ");
+ HDfprintf(stdout, "| Tag ");
+ HDfprintf(stdout, "| Size ");
+ HDfprintf(stdout, "| Ring ");
+ HDfprintf(stdout, "| Type ");
+ HDfprintf(stdout, "| Dirty");
+ HDfprintf(stdout, "\n");
+
+ HDfprintf(stdout, "----------------------------------------------------------------------------------------------------------------\n");
+
+ entry_ptr = cache_ptr->LRU_head_ptr;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Print entry */
+ HDfprintf(stdout, "%s%5d ", cache_ptr->prefix, i);
+ HDfprintf(stdout, " 0x%16llx ", (long long)(entry_ptr->addr));
+
+ if(NULL == entry_ptr->tag_info)
+ HDfprintf(stdout, " %16s ", "N/A");
+ else
+ HDfprintf(stdout, " 0x%16llx ",
+ (long long)(entry_ptr->tag_info->tag));
+
+ HDfprintf(stdout, " %5lld ", (long long)(entry_ptr->size));
+ HDfprintf(stdout, " %d ", (int)(entry_ptr->ring));
+ HDfprintf(stdout, " %2d %-32s ", (int)(entry_ptr->type->id),
+ (entry_ptr->type->name));
+ HDfprintf(stdout, " %d", (int)(entry_ptr->is_dirty));
+ HDfprintf(stdout, "\n");
+
+ i++;
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+
+ HDfprintf(stdout, "----------------------------------------------------------------------------------------------------------------\n");
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5C_dump_cache_LRU() */
+#endif /* NDEBUG */
/*-------------------------------------------------------------------------
@@ -194,7 +273,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-#if 0 /* debugging routine */
+#ifndef NDEBUG
herr_t
H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
{
@@ -203,14 +282,14 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
H5C_cache_entry_t * entry_ptr = NULL;
H5SL_node_t * node_ptr = NULL;
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_NOERR
HDassert(cache_ptr != NULL);
HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert(calling_fcn != NULL);
HDfprintf(stdout, "\n\nDumping metadata cache skip list from %s.\n", calling_fcn);
- HDfprintf(stdout, " slist len = %d.\n", cache_ptr->slist_len);
+ HDfprintf(stdout, " slist len = %u.\n", cache_ptr->slist_len);
HDfprintf(stdout, " slist size = %lld.\n", (long long)(cache_ptr->slist_size));
if(cache_ptr->slist_len > 0) {
@@ -240,9 +319,9 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
(int)(entry_ptr->is_dirty),
entry_ptr->type->name);
- HDfprintf(stdout, " node_ptr = 0x%llx, item = 0x%llx\n",
+ HDfprintf(stdout, " node_ptr = 0x%llx, item = %p\n",
(unsigned long long)node_ptr,
- (unsigned long long)H5SL_item(node_ptr));
+ H5SL_item(node_ptr));
/* increment node_ptr before we delete its target */
node_ptr = H5SL_next(node_ptr);
@@ -257,10 +336,110 @@ H5C_dump_cache_skip_list(H5C_t * cache_ptr, char * calling_fcn)
HDfprintf(stdout, "\n\n");
-done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_dump_cache_skip_list() */
-#endif /* debugging routine */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_dump_coll_write_list
+ *
+ * Purpose: Debugging routine that prints a summary of the contents of
+ * the collective write skip list used by the metadata cache
+ * in the parallel case to maintain a list of entries to write
+ * collectively at a sync point.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 4/1/17
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifdef H5_HAVE_PARALLEL
+#ifndef NDEBUG
+herr_t
+H5C_dump_coll_write_list(H5C_t * cache_ptr, char * calling_fcn)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ int i;
+ int list_len;
+ H5AC_aux_t * aux_ptr = NULL;
+ H5C_cache_entry_t * entry_ptr = NULL;
+ H5SL_node_t * node_ptr = NULL;
+
+ FUNC_ENTER_NOAPI_NOERR
+
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->aux_ptr);
+
+ aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
+
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+
+ HDassert(calling_fcn != NULL);
+
+ list_len = (int)H5SL_count(cache_ptr->coll_write_list);
+
+ HDfprintf(stdout, "\n\nDumping MDC coll write list from %d:%s.\n",
+ aux_ptr->mpi_rank, calling_fcn);
+ HDfprintf(stdout, " slist len = %u.\n", cache_ptr->slist_len);
+
+ if ( list_len > 0 ) {
+
+ /* scan the collective write list generating the desired output */
+ HDfprintf(stdout,
+ "Num: Addr: Len: Prot/Pind: Dirty: Type:\n");
+
+ i = 0;
+
+ node_ptr = H5SL_first(cache_ptr->coll_write_list);
+
+ if ( node_ptr != NULL )
+
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ else
+
+ entry_ptr = NULL;
+
+ while ( entry_ptr != NULL ) {
+
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ HDfprintf(stdout,
+ "%s%d 0x%016llx %4lld %d/%d %d %s\n",
+ cache_ptr->prefix, i,
+ (long long)(entry_ptr->addr),
+ (long long)(entry_ptr->size),
+ (int)(entry_ptr->is_protected),
+ (int)(entry_ptr->is_pinned),
+ (int)(entry_ptr->is_dirty),
+ entry_ptr->type->name);
+
+ node_ptr = H5SL_next(node_ptr);
+
+ if ( node_ptr != NULL )
+
+ entry_ptr = (H5C_cache_entry_t *)H5SL_item(node_ptr);
+
+ else
+
+ entry_ptr = NULL;
+
+ i++;
+
+ } /* end while */
+ } /* end if */
+
+ HDfprintf(stdout, "\n\n");
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C_dump_coll_write_list() */
+#endif /* NDEBUG */
+#endif /* H5_HAVE_PARALLEL */
/*-------------------------------------------------------------------------
@@ -285,7 +464,7 @@ H5C_set_prefix(H5C_t * cache_ptr, char * prefix)
if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC) ||
(prefix == NULL) || (HDstrlen(prefix) >= H5C__PREFIX_LEN))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad param(s) on entry")
HDstrncpy(&(cache_ptr->prefix[0]), prefix, (size_t)(H5C__PREFIX_LEN));
@@ -342,19 +521,6 @@ done:
* Programmer: John Mainzer
* 6/2/04
*
- * JRM -- 11/13/08
- * Added code displaying the max_clean_index_size and
- * max_dirty_index_size.
- *
- * MAM -- 01/06/09
- * Added code displaying the calls_to_msic,
- * total_entries_skipped_in_msic, total_entries_scanned_in_msic,
- * and max_entries_skipped_in_msic fields.
- *
- * JRM -- 4/11/15
- * Added code displaying the new slist_scan_restarts,
- * LRU_scan_restarts, and hash_bucket_scan_restarts fields;
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -398,9 +564,11 @@ H5C_stats(H5C_t * cache_ptr,
size_t aggregate_max_size = 0;
int32_t aggregate_max_pins = 0;
double hit_rate;
+ double prefetch_use_rate;
double average_successful_search_depth = 0.0f;
double average_failed_search_depth = 0.0f;
double average_entries_skipped_per_calls_to_msic = 0.0f;
+ double average_dirty_pf_entries_skipped_per_call_to_msic = 0.0f;
double average_entries_scanned_per_calls_to_msic = 0.0f;
#endif /* H5C_COLLECT_CACHE_STATS */
herr_t ret_value = SUCCEED; /* Return value */
@@ -503,12 +671,12 @@ H5C_stats(H5C_t * cache_ptr,
average_failed_search_depth);
HDfprintf(stdout,
- "%s current (max) index size / length = %ld (%ld) / %ld (%ld)\n",
+ "%s current (max) index size / length = %ld (%ld) / %lu (%lu)\n",
cache_ptr->prefix,
(long)(cache_ptr->index_size),
(long)(cache_ptr->max_index_size),
- (long)(cache_ptr->index_len),
- (long)(cache_ptr->max_index_len));
+ (unsigned long)(cache_ptr->index_len),
+ (unsigned long)(cache_ptr->max_index_len));
HDfprintf(stdout,
"%s current (max) clean/dirty idx size = %ld (%ld) / %ld (%ld)\n",
@@ -519,46 +687,46 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->max_dirty_index_size));
HDfprintf(stdout,
- "%s current (max) slist size / length = %ld (%ld) / %ld (%ld)\n",
+ "%s current (max) slist size / length = %ld (%ld) / %lu (%lu)\n",
cache_ptr->prefix,
(long)(cache_ptr->slist_size),
(long)(cache_ptr->max_slist_size),
- (long)(cache_ptr->slist_len),
- (long)(cache_ptr->max_slist_len));
+ (unsigned long)(cache_ptr->slist_len),
+ (unsigned long)(cache_ptr->max_slist_len));
HDfprintf(stdout,
- "%s current (max) PL size / length = %ld (%ld) / %ld (%ld)\n",
+ "%s current (max) PL size / length = %ld (%ld) / %lu (%lu)\n",
cache_ptr->prefix,
(long)(cache_ptr->pl_size),
(long)(cache_ptr->max_pl_size),
- (long)(cache_ptr->pl_len),
- (long)(cache_ptr->max_pl_len));
+ (unsigned long)(cache_ptr->pl_len),
+ (unsigned long)(cache_ptr->max_pl_len));
HDfprintf(stdout,
- "%s current (max) PEL size / length = %ld (%ld) / %ld (%ld)\n",
+ "%s current (max) PEL size / length = %ld (%ld) / %lu (%lu)\n",
cache_ptr->prefix,
(long)(cache_ptr->pel_size),
(long)(cache_ptr->max_pel_size),
- (long)(cache_ptr->pel_len),
- (long)(cache_ptr->max_pel_len));
+ (unsigned long)(cache_ptr->pel_len),
+ (unsigned long)(cache_ptr->max_pel_len));
HDfprintf(stdout,
- "%s current LRU list size / length = %ld / %ld\n",
+ "%s current LRU list size / length = %ld / %lu\n",
cache_ptr->prefix,
(long)(cache_ptr->LRU_list_size),
- (long)(cache_ptr->LRU_list_len));
+ (unsigned long)(cache_ptr->LRU_list_len));
HDfprintf(stdout,
- "%s current clean LRU size / length = %ld / %ld\n",
+ "%s current clean LRU size / length = %ld / %lu\n",
cache_ptr->prefix,
(long)(cache_ptr->cLRU_list_size),
- (long)(cache_ptr->cLRU_list_len));
+ (unsigned long)(cache_ptr->cLRU_list_len));
HDfprintf(stdout,
- "%s current dirty LRU size / length = %ld / %ld\n",
+ "%s current dirty LRU size / length = %ld / %lu\n",
cache_ptr->prefix,
(long)(cache_ptr->dLRU_list_size),
- (long)(cache_ptr->dLRU_list_len));
+ (unsigned long)(cache_ptr->dLRU_list_len));
HDfprintf(stdout,
"%s Total hits / misses / hit_rate = %ld / %ld / %f\n",
@@ -625,11 +793,10 @@ H5C_stats(H5C_t * cache_ptr,
cache_ptr->prefix,
(long long)(cache_ptr->calls_to_msic));
- if (cache_ptr->calls_to_msic > 0) {
+ if (cache_ptr->calls_to_msic > 0)
average_entries_skipped_per_calls_to_msic =
(((double)(cache_ptr->total_entries_skipped_in_msic)) /
((double)(cache_ptr->calls_to_msic)));
- }
HDfprintf(stdout, "%s MSIC: Average/max entries skipped = %lf / %ld\n",
cache_ptr->prefix,
@@ -637,6 +804,17 @@ H5C_stats(H5C_t * cache_ptr,
(long)(cache_ptr->max_entries_skipped_in_msic));
if(cache_ptr->calls_to_msic > 0)
+ average_dirty_pf_entries_skipped_per_call_to_msic =
+ (((double)(cache_ptr->total_dirty_pf_entries_skipped_in_msic)) /
+ ((double)(cache_ptr->calls_to_msic)));
+
+ HDfprintf(stdout,
+ "%s MSIC: Average/max dirty pf entries skipped = %lf / %ld\n",
+ cache_ptr->prefix,
+ average_dirty_pf_entries_skipped_per_call_to_msic,
+ (long)(cache_ptr->max_dirty_pf_entries_skipped_in_msic));
+
+ if(cache_ptr->calls_to_msic > 0)
average_entries_scanned_per_calls_to_msic =
(((double)(cache_ptr->total_entries_scanned_in_msic)) /
((double)(cache_ptr->calls_to_msic)));
@@ -656,11 +834,44 @@ H5C_stats(H5C_t * cache_ptr,
cache_ptr->entries_scanned_to_make_space));
HDfprintf(stdout,
- "%s slist/LRU/hash bkt scan restarts = %lld / %lld / %lld.\n",
+ "%s slist/LRU/index scan restarts = %lld / %lld / %lld.\n",
cache_ptr->prefix,
(long long)(cache_ptr->slist_scan_restarts),
(long long)(cache_ptr->LRU_scan_restarts),
- (long long)(cache_ptr->hash_bucket_scan_restarts));
+ (long long)(cache_ptr->index_scan_restarts));
+
+ HDfprintf(stdout,
+ "%s cache image creations/reads/loads/size = %d / %d /%d / %Hu\n",
+ cache_ptr->prefix,
+ cache_ptr->images_created,
+ cache_ptr->images_read,
+ cache_ptr->images_loaded,
+ cache_ptr->last_image_size);
+
+ HDfprintf(stdout,
+ "%s prefetches / dirty prefetches = %lld / %lld\n",
+ cache_ptr->prefix,
+ (long long)(cache_ptr->prefetches),
+ (long long)(cache_ptr->dirty_prefetches));
+
+ HDfprintf(stdout,
+ "%s prefetch hits/flushes/evictions = %lld / %lld / %lld\n",
+ cache_ptr->prefix,
+ (long long)(cache_ptr->prefetch_hits),
+ (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]),
+ (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]));
+
+ if(cache_ptr->prefetches > 0)
+ prefetch_use_rate =
+ (double)100.0f * ((double)(cache_ptr->prefetch_hits)) /
+ ((double)(cache_ptr->prefetches));
+ else
+ prefetch_use_rate = 0.0f;
+
+ HDfprintf(stdout,
+ "%s prefetched entry use rate = %lf\n",
+ cache_ptr->prefix,
+ prefetch_use_rate);
#if H5C_COLLECT_CACHE_ENTRY_STATS
@@ -687,7 +898,7 @@ H5C_stats(H5C_t * cache_ptr,
HDfprintf(stdout, "%s Stats on %s:\n",
cache_ptr->prefix,
- ((cache_ptr->type_name_table_ptr))[i]);
+ ((cache_ptr->class_table_ptr))[i]->name);
if((cache_ptr->hits[i] > 0) || (cache_ptr->misses[i] > 0))
hit_rate = (double)100.0f * ((double)(cache_ptr->hits[i])) /
@@ -804,20 +1015,6 @@ done:
*
* Programmer: John Mainzer, 4/28/04
*
- * JRM 11/13/08
- * Added initialization for the new max_clean_index_size and
- * max_dirty_index_size fields.
- *
- * MAM -- 01/06/09
- * Added code to initalize the calls_to_msic,
- * total_entries_skipped_in_msic, total_entries_scanned_in_msic,
- * and max_entries_skipped_in_msic fields.
- *
- * JRM 4/11/15
- * Added code to initialize the new slist_scan_restarts,
- * LRU_scan_restarts, hash_bucket_scan_restarts, and
- * take_ownerships fields.
- *
*-------------------------------------------------------------------------
*/
void
@@ -886,16 +1083,27 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED * cache_ptr)
cache_ptr->max_pel_len = 0;
cache_ptr->max_pel_size = (size_t)0;
- cache_ptr->calls_to_msic = 0;
- cache_ptr->total_entries_skipped_in_msic = 0;
- cache_ptr->total_entries_scanned_in_msic = 0;
- cache_ptr->max_entries_skipped_in_msic = 0;
- cache_ptr->max_entries_scanned_in_msic = 0;
- cache_ptr->entries_scanned_to_make_space = 0;
+ cache_ptr->calls_to_msic = 0;
+ cache_ptr->total_entries_skipped_in_msic = 0;
+ cache_ptr->total_dirty_pf_entries_skipped_in_msic = 0;
+ cache_ptr->total_entries_scanned_in_msic = 0;
+ cache_ptr->max_entries_skipped_in_msic = 0;
+ cache_ptr->max_dirty_pf_entries_skipped_in_msic = 0;
+ cache_ptr->max_entries_scanned_in_msic = 0;
+ cache_ptr->entries_scanned_to_make_space = 0;
cache_ptr->slist_scan_restarts = 0;
cache_ptr->LRU_scan_restarts = 0;
- cache_ptr->hash_bucket_scan_restarts = 0;
+ cache_ptr->index_scan_restarts = 0;
+
+ cache_ptr->images_created = 0;
+ cache_ptr->images_read = 0;
+ cache_ptr->images_loaded = 0;
+ cache_ptr->last_image_size = (hsize_t)0;
+
+ cache_ptr->prefetches = 0;
+ cache_ptr->dirty_prefetches = 0;
+ cache_ptr->prefetch_hits = 0;
#if H5C_COLLECT_CACHE_ENTRY_STATS
for(i = 0; i <= cache_ptr->max_type_id; i++) {
@@ -910,6 +1118,7 @@ H5C_stats__reset(H5C_t H5_ATTR_UNUSED * cache_ptr)
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
#endif /* H5C_COLLECT_CACHE_STATS */
+ return;
} /* H5C_stats__reset() */
extern void
@@ -948,7 +1157,7 @@ H5C__dump_children_cb(H5C_cache_entry_t *entry_ptr, void *_ctx)
} /* end if */
return(H5_ITER_CONT);
-}
+} /* end H5C__dump_children_cb() */
static void
H5C__dump_children(H5C_t *cache_ptr, const H5C_cache_entry_t *entry_ptr,
@@ -964,7 +1173,7 @@ H5C__dump_children(H5C_t *cache_ptr, const H5C_cache_entry_t *entry_ptr,
ctx.prefix = prefix;
ctx.indent = indent;
H5C__iter_tagged_entries(cache_ptr, entry_ptr->tag_info->tag, FALSE, H5C__dump_children_cb, &ctx);
-}
+} /* end H5C__dump_children() */
void
H5C__dump_entry(H5C_t *cache_ptr, const H5C_cache_entry_t *entry_ptr,
@@ -978,4 +1187,411 @@ H5C__dump_entry(H5C_t *cache_ptr, const H5C_cache_entry_t *entry_ptr,
H5C__dump_parents(cache_ptr, entry_ptr, "Parent", indent);
if(entry_ptr->flush_dep_nchildren)
H5C__dump_children(cache_ptr, entry_ptr, FALSE, "Child", indent);
-}
+} /* end H5C__dump_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_flush_dependency_exists()
+ *
+ * Purpose: Test to see if a flush dependency relationship exists
+ * between the supplied parent and child. Both parties
+ * are indicated by addresses so as to avoid the necessity
+ * of protect / unprotect calls prior to this call.
+ *
+ * If either the parent or the child is not in the metadata
+ * cache, the function sets *fd_exists_ptr to FALSE.
+ *
+ * If both are in the cache, the childs list of parents is
+ * searched for the proposed parent. If the proposed parent
+ * is found in the childs parent list, the function sets
+ * *fd_exists_ptr to TRUE. In all other non-error cases,
+ * the function sets *fd_exists_ptr FALSE.
+ *
+ * Return: SUCCEED on success/FAIL on failure. Note that
+ * *fd_exists_ptr is undefined on failure.
+ *
+ * Programmer: John Mainzer
+ * 9/28/16
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr, haddr_t child_addr,
+ hbool_t *fd_exists_ptr)
+{
+ hbool_t fd_exists = FALSE; /* whether flush dependency exists */
+ H5C_cache_entry_t * parent_ptr; /* Ptr to parent entry */
+ H5C_cache_entry_t * child_ptr; /* Ptr to child entry */
+ hbool_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI(NULL)
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(H5F_addr_defined(parent_addr));
+ HDassert(H5F_addr_defined(child_addr));
+ HDassert(fd_exists_ptr);
+
+ H5C__SEARCH_INDEX(cache_ptr, parent_addr, parent_ptr, FAIL)
+ H5C__SEARCH_INDEX(cache_ptr, child_addr, child_ptr, FAIL)
+
+ if(parent_ptr && child_ptr) {
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(child_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ if(child_ptr->flush_dep_nparents > 0) {
+ unsigned u; /* Local index variable */
+
+ HDassert(child_ptr->flush_dep_parent);
+ HDassert(child_ptr->flush_dep_parent_nalloc >= child_ptr->flush_dep_nparents);
+
+ for(u = 0; u < child_ptr->flush_dep_nparents; u++) {
+ if(child_ptr->flush_dep_parent[u] == parent_ptr) {
+ fd_exists = TRUE;
+ HDassert(parent_ptr->flush_dep_nchildren > 0);
+ break;
+ } /* end if */
+ } /* end for */
+ } /* end if */
+ } /* end if */
+
+ *fd_exists_ptr = fd_exists;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_flush_dependency_exists() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_validate_index_list
+ *
+ * Purpose: Debugging function that scans the index list for errors.
+ *
+ * If an error is detected, the function generates a
+ * diagnostic and returns FAIL. If no error is detected,
+ * the function returns SUCCEED.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 9/16/16
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5C_validate_index_list(H5C_t *cache_ptr)
+{
+ H5C_cache_entry_t * entry_ptr = NULL;
+ uint32_t len = 0;
+ int32_t index_ring_len[H5C_RING_NTYPES];
+ size_t size = 0;
+ size_t clean_size = 0;
+ size_t dirty_size = 0;
+ size_t index_ring_size[H5C_RING_NTYPES];
+ size_t clean_index_ring_size[H5C_RING_NTYPES];
+ size_t dirty_index_ring_size[H5C_RING_NTYPES];
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ for(i = 0; i < H5C_RING_NTYPES; i++) {
+ index_ring_len[i] = 0;
+ index_ring_size[i] = 0;
+ clean_index_ring_size[i] = 0;
+ dirty_index_ring_size[i] = 0;
+ } /* end if */
+
+ if(((cache_ptr->il_head == NULL) || (cache_ptr->il_tail == NULL))
+ && (cache_ptr->il_head != cache_ptr->il_tail))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list pointer validation failed")
+
+ if((cache_ptr->index_len == 1) && ((cache_ptr->il_head != cache_ptr->il_tail)
+ || (cache_ptr->il_head == NULL) || (cache_ptr->il_head->size != cache_ptr->index_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list pointer sanity checks failed")
+
+ if((cache_ptr->index_len >= 1)
+ && ((cache_ptr->il_head == NULL)
+ || (cache_ptr->il_head->il_prev != NULL)
+ || (cache_ptr->il_tail == NULL)
+ || (cache_ptr->il_tail->il_next != NULL)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list length sanity checks failed")
+
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ if((entry_ptr != cache_ptr->il_head)
+ && ((entry_ptr->il_prev == NULL) || (entry_ptr->il_prev->il_next != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list pointers for entry are invalid")
+
+ if((entry_ptr != cache_ptr->il_tail)
+ && ((entry_ptr->il_next == NULL) || (entry_ptr->il_next->il_prev != entry_ptr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index list pointers for entry are invalid")
+
+ HDassert(entry_ptr->ring > 0);
+ HDassert(entry_ptr->ring < H5C_RING_NTYPES);
+
+ len++;
+ index_ring_len[entry_ptr->ring] += 1;
+
+ size += entry_ptr->size;
+ index_ring_size[entry_ptr->ring] += entry_ptr->size;
+
+ if(entry_ptr->is_dirty) {
+ dirty_size += entry_ptr->size;
+ dirty_index_ring_size[entry_ptr->ring] += entry_ptr->size;
+ } /* end if */
+ else {
+ clean_size += entry_ptr->size;
+ clean_index_ring_size[entry_ptr->ring] += entry_ptr->size;
+ } /* end else */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* end while */
+
+ if((cache_ptr->index_len != len) || (cache_ptr->il_len != len)
+ || (cache_ptr->index_size != size) || (cache_ptr->il_size != size)
+ || (cache_ptr->clean_index_size != clean_size)
+ || (cache_ptr->dirty_index_size != dirty_size)
+ || (clean_size + dirty_size != size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index, clean and dirty sizes for cache are invalid")
+
+ size = 0;
+ clean_size = 0;
+ dirty_size = 0;
+ for(i = 0; i < H5C_RING_NTYPES; i++) {
+ size += clean_index_ring_size[i] + dirty_index_ring_size[i];
+ clean_size += clean_index_ring_size[i];
+ dirty_size += dirty_index_ring_size[i];
+ } /* end for */
+
+ if((cache_ptr->index_size != size)
+ || (cache_ptr->clean_index_size != clean_size)
+ || (cache_ptr->dirty_index_size != dirty_size))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Index, clean and dirty sizes for cache are invalid")
+
+done:
+ if(ret_value != SUCCEED)
+ HDassert(0);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_validate_index_list() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_get_entry_ptr_from_addr()
+ *
+ * Purpose: Debugging function that attempts to look up an entry in the
+ * cache by its file address, and if found, returns a pointer
+ * to the entry in *entry_ptr_ptr. If the entry is not in the
+ * cache, *entry_ptr_ptr is set to NULL.
+ *
+ * WARNING: This call should be used only in debugging
+ * routines, and it should be avoided when
+ * possible.
+ *
+ * Further, if we ever multi-thread the cache,
+ * this routine will have to be either discarded
+ * or heavily re-worked.
+ *
+ * Finally, keep in mind that the entry whose
+ * pointer is obtained in this fashion may not
+ * be in a stable state.
+ *
+ * Note that this function is only defined if NDEBUG
+ * is not defined.
+ *
+ * As heavy use of this function is almost certainly a
+ * bad idea, the metadata cache tracks the number of
+ * successful calls to this function, and (if
+ * H5C_DO_SANITY_CHECKS is defined) displays any
+ * non-zero count on cache shutdown.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 5/30/14
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5C_get_entry_ptr_from_addr(H5C_t *cache_ptr, haddr_t addr, void **entry_ptr_ptr)
+{
+ H5C_cache_entry_t * entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(entry_ptr_ptr);
+
+ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+
+ if(entry_ptr == NULL)
+ /* the entry doesn't exist in the cache -- report this
+ * and quit.
+ */
+ *entry_ptr_ptr = NULL;
+ else {
+ *entry_ptr_ptr = entry_ptr;
+
+ /* increment call counter */
+ (cache_ptr->get_entry_ptr_from_addr_counter)++;
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_get_entry_ptr_from_addr() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_get_serialization_in_progress
+ *
+ * Purpose: Return the current value of
+ * cache_ptr->serialization_in_progress.
+ *
+ * Return: Current value of cache_ptr->serialization_in_progress.
+ *
+ * Programmer: John Mainzer
+ * 8/24/15
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+hbool_t
+H5C_get_serialization_in_progress(const H5C_t *cache_ptr)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity check */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ FUNC_LEAVE_NOAPI(cache_ptr->serialization_in_progress)
+} /* H5C_get_serialization_in_progress() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_cache_is_clean()
+ *
+ * Purpose: Debugging function that verifies that all rings in the
+ * metadata cache are clean from the outermost ring, inwards
+ * to the inner ring specified.
+ *
+ * Returns TRUE if all specified rings are clean, and FALSE
+ * if not. Throws an assertion failure on error.
+ *
+ * Return: TRUE if the indicated ring(s) are clean, and FALSE otherwise.
+ *
+ * Programmer: John Mainzer, 6/18/16
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+hbool_t
+H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring)
+{
+ H5C_ring_t ring = H5C_RING_USER;
+ hbool_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(inner_ring >= H5C_RING_USER);
+ HDassert(inner_ring <= H5C_RING_SB);
+
+ while(ring <= inner_ring) {
+ if(cache_ptr->dirty_index_ring_size[ring] > 0)
+ HGOTO_DONE(FALSE)
+
+ ring++;
+ } /* end while */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_cache_is_clean() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_verify_entry_type()
+ *
+ * Purpose: Debugging function that attempts to look up an entry in the
+ * cache by its file address, and if found, test to see if its
+ * type field contains the expted value.
+ *
+ * If the specified entry is in cache, *in_cache_ptr is set
+ * to TRUE, and *type_ok_ptr is set to TRUE or FALSE depending
+ * on whether the entries type field matches the expected_type
+ * parameter.
+ *
+ * If the target entry is not in cache, *in_cache_ptr is
+ * set to FALSE, and *type_ok_ptr is undefined.
+ *
+ * Note that this function is only defined if NDEBUG
+ * is not defined.
+ *
+ * Return: FAIL if error is detected, SUCCEED otherwise.
+ *
+ * Programmer: John Mainzer, 5/30/14
+ *
+ *-------------------------------------------------------------------------
+ */
+#ifndef NDEBUG
+herr_t
+H5C_verify_entry_type(H5C_t *cache_ptr, haddr_t addr,
+ const H5C_class_t *expected_type, hbool_t *in_cache_ptr,
+ hbool_t *type_ok_ptr)
+{
+ H5C_cache_entry_t * entry_ptr = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(H5F_addr_defined(addr));
+ HDassert(expected_type);
+ HDassert(in_cache_ptr);
+ HDassert(type_ok_ptr);
+
+ H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
+
+ if(entry_ptr == NULL)
+ /* the entry doesn't exist in the cache -- report this
+ * and quit.
+ */
+ *in_cache_ptr = FALSE;
+ else {
+ *in_cache_ptr = TRUE;
+
+ if(entry_ptr->prefetched)
+ *type_ok_ptr = (expected_type->id == entry_ptr->prefetch_type_id);
+ else
+ *type_ok_ptr = (expected_type == entry_ptr->type);
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_verify_entry_type() */
+#endif /* NDEBUG */
+
diff --git a/src/H5Cepoch.c b/src/H5Cepoch.c
index 3726aa1..f8507a9 100644
--- a/src/H5Cepoch.c
+++ b/src/H5Cepoch.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -25,18 +23,11 @@
*/
-/****************/
-/* Module Setup */
-/****************/
-
-#include "H5Cmodule.h" /* This source code file is part of the H5C module */
-
-
/***********/
/* Headers */
/***********/
#include "H5private.h" /* Generic Functions */
-#include "H5Cpkg.h" /* Cache */
+#include "H5ACprivate.h" /* Metadata cache */
/****************/
@@ -73,7 +64,7 @@ static void * H5C__epoch_marker_deserialize(const void * image_ptr,
size_t len, void * udata, hbool_t * dirty_ptr);
static herr_t H5C__epoch_marker_image_len(const void * thing,
size_t *image_len_ptr);
-static herr_t H5C__epoch_marker_pre_serialize(const H5F_t *f,
+static herr_t H5C__epoch_marker_pre_serialize(H5F_t *f,
hid_t dxpl_id, void * thing, haddr_t addr, size_t len,
haddr_t * new_addr_ptr, size_t * new_len_ptr, unsigned * flags_ptr);
static herr_t H5C__epoch_marker_serialize(const H5F_t *f,
@@ -99,12 +90,11 @@ static herr_t H5C__epoch_marker_fsf_size(const void H5_ATTR_UNUSED * thing,
/*******************/
-const H5C_class_t H5C__epoch_marker_class =
-{
- /* id = */ H5C__EPOCH_MARKER_TYPE,
+const H5AC_class_t H5AC_EPOCH_MARKER[1] = {{
+ /* id = */ H5AC_EPOCH_MARKER_ID,
/* name = */ "epoch marker",
/* mem_type = */ H5FD_MEM_DEFAULT, /* value doesn't matter */
- /* flags = */ H5C__CLASS_NO_FLAGS_SET,
+ /* flags = */ H5AC__CLASS_NO_FLAGS_SET,
/* get_initial_load_size = */ H5C__epoch_marker_get_initial_load_size,
/* get_final_load_size = */ H5C__epoch_marker_get_final_load_size,
/* verify_chksum = */ H5C__epoch_marker_verify_chksum,
@@ -115,7 +105,7 @@ const H5C_class_t H5C__epoch_marker_class =
/* notify = */ H5C__epoch_marker_notify,
/* free_icr = */ H5C__epoch_marker_free_icr,
/* fsf_size = */ H5C__epoch_marker_fsf_size,
-};
+}};
/***************************************************************************
@@ -190,7 +180,7 @@ H5C__epoch_marker_image_len(const void H5_ATTR_UNUSED *thing,
static herr_t
-H5C__epoch_marker_pre_serialize(const H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id,
+H5C__epoch_marker_pre_serialize(H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id,
void H5_ATTR_UNUSED *thing, haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED len,
haddr_t H5_ATTR_UNUSED *new_addr_ptr, size_t H5_ATTR_UNUSED *new_len_ptr,
unsigned H5_ATTR_UNUSED *flags_ptr)
diff --git a/src/H5Cimage.c b/src/H5Cimage.c
new file mode 100644
index 0000000..debd30c
--- /dev/null
+++ b/src/H5Cimage.c
@@ -0,0 +1,3569 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Cimage.c
+ * July 20, 2015
+ * John Mainzer
+ *
+ * Purpose: Functions in this file are specific to the implementation
+ * of the metadata cache image feature.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Cmodule.h" /* This source code file is part of the H5C module */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#ifdef H5_HAVE_PARALLEL
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#include "H5ACpkg.h" /* Metadata cache */
+#endif /* H5_HAVE_PARALLEL */
+#include "H5Cpkg.h" /* Cache */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5FDprivate.h" /* File drivers */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MFprivate.h" /* File memory management */
+#include "H5MMprivate.h" /* Memory management */
+
+
+/****************/
+/* Local Macros */
+/****************/
+#if H5C_DO_MEMORY_SANITY_CHECKS
+#define H5C_IMAGE_EXTRA_SPACE 8
+#define H5C_IMAGE_SANITY_VALUE "DeadBeef"
+#else /* H5C_DO_MEMORY_SANITY_CHECKS */
+#define H5C_IMAGE_EXTRA_SPACE 0
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+/* Cache image buffer components, on disk */
+#define H5C__MDCI_BLOCK_SIGNATURE "MDCI"
+#define H5C__MDCI_BLOCK_SIGNATURE_LEN 4
+#define H5C__MDCI_BLOCK_VERSION_0 0
+
+/* Metadata cache image header flags -- max 8 bits */
+#define H5C__MDCI_HEADER_HAVE_RESIZE_STATUS 0x01
+
+/* Metadata cache image entry flags -- max 8 bits */
+#define H5C__MDCI_ENTRY_DIRTY_FLAG 0x01
+#define H5C__MDCI_ENTRY_IN_LRU_FLAG 0x02
+#define H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG 0x04
+#define H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG 0x08
+
+/* Limits on flush dependency values, stored in 16-bit values on disk */
+#define H5C__MDCI_MAX_FD_CHILDREN USHRT_MAX
+#define H5C__MDCI_MAX_FD_PARENTS USHRT_MAX
+
+/* Values for image entry magic field */
+#define H5C_IMAGE_ENTRY_T_MAGIC 0x005CAC08
+#define H5C_IMAGE_ENTRY_T_BAD_MAGIC 0xBeefDead
+
+/* Maximum ring allowed in image */
+#define H5C_MAX_RING_IN_IMAGE H5C_RING_MDFSM
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/* Helper routines */
+static size_t H5C__cache_image_block_entry_header_size(const H5F_t *f);
+static size_t H5C__cache_image_block_header_size(const H5F_t *f);
+static herr_t H5C__decode_cache_image_header(const H5F_t *f,
+ H5C_t *cache_ptr, const uint8_t **buf);
+#ifndef NDEBUG /* only used in assertions */
+static herr_t H5C__decode_cache_image_entry(const H5F_t *f,
+ const H5C_t *cache_ptr, const uint8_t **buf, unsigned entry_num);
+#endif /* NDEBUG */ /* only used in assertions */
+static herr_t H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr,
+ H5C_cache_entry_t *pf_entry_ptr, H5C_cache_entry_t **fd_children);
+static herr_t H5C__encode_cache_image_header(const H5F_t *f,
+ const H5C_t *cache_ptr, uint8_t **buf);
+static herr_t H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr,
+ uint8_t **buf, unsigned entry_num);
+static herr_t H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr);
+static void H5C__prep_for_file_close__compute_fd_heights_real(
+ H5C_cache_entry_t *entry_ptr, uint32_t fd_height);
+static herr_t H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr);
+static herr_t H5C__prep_for_file_close__scan_entries(const H5F_t *f,
+ H5C_t *cache_ptr);
+static herr_t H5C__reconstruct_cache_contents(H5F_t *f, hid_t dxpl_id,
+ H5C_t *cache_ptr);
+static H5C_cache_entry_t *H5C__reconstruct_cache_entry(const H5F_t *f,
+ H5C_t *cache_ptr, const uint8_t **buf);
+static herr_t H5C__write_cache_image_superblock_msg(H5F_t *f, hid_t dxpl_id,
+ hbool_t create);
+static herr_t H5C__read_cache_image(H5F_t * f, hid_t dxpl_id, H5C_t *cache_ptr);
+static herr_t H5C__write_cache_image(H5F_t *f, hid_t dxpl_id, const H5C_t *cache_ptr);
+static herr_t H5C__construct_cache_image_buffer(H5F_t *f, H5C_t *cache_ptr);
+static herr_t H5C__free_image_entries_array(H5C_t *cache_ptr);
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/* Declare a free list to manage H5C_cache_entry_t objects */
+H5FL_DEFINE(H5C_cache_entry_t);
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+/*-------------------------------------------------------------------------
+ *
+ * Function: H5C_cache_image_pending()
+ *
+ * Purpose: Tests to see if the load of a metadata cache image
+ * load is pending (i.e. will be executed on the next
+ * protect or insert)
+ *
+ * Returns TRUE if a cache image load is pending, and FALSE
+ * if not. Throws an assertion failure on error.
+ *
+ * Return: TRUE if a cache image load is pending, and FALSE otherwise.
+ *
+ * Programmer: John Mainzer, 6/18/16
+ *
+ *-------------------------------------------------------------------------
+ */
+hbool_t
+H5C_cache_image_pending(const H5C_t *cache_ptr)
+{
+ hbool_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ ret_value = (cache_ptr->load_image && !cache_ptr->image_loaded);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_cache_image_pending() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_cache_image_status()
+ *
+ * Purpose: Examine the metadata cache associated with the supplied
+ * instance of H5F_t to determine whether the load of a
+ * cache image has either been queued or executed, and if
+ * construction of a cache image has been requested.
+ *
+ * This done, it set *load_ci_ptr to TRUE if a cache image
+ * has either been loaded or a load has been requested, and
+ * to FALSE otherwise.
+ *
+ * Similarly, set *write_ci_ptr to TRUE if construction of
+ * a cache image has been requested, and to FALSE otherwise.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 12/29/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr, hbool_t *write_ci_ptr)
+{
+ H5C_t * cache_ptr;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(load_ci_ptr);
+ HDassert(write_ci_ptr);
+
+ *load_ci_ptr = cache_ptr->load_image || cache_ptr->image_loaded;
+ *write_ci_ptr = cache_ptr->image_ctl.generate_image;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5C_cache_image_status() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__construct_cache_image_buffer()
+ *
+ * Purpose: Allocate a buffer of size cache_ptr->image_len, and
+ * load it with an image of the metadata cache image block.
+ *
+ * Note that by the time this function is called, the cache
+ * should have removed all entries from its data structures.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 8/5/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__construct_cache_image_buffer(H5F_t * f, H5C_t *cache_ptr)
+{
+ uint8_t * p; /* Pointer into image buffer */
+ uint32_t chksum;
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(cache_ptr == f->shared->cache);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->close_warning_received);
+ HDassert(cache_ptr->image_ctl.generate_image);
+ HDassert(cache_ptr->num_entries_in_image > 0);
+ HDassert(cache_ptr->index_len == 0);
+ HDassert(cache_ptr->image_data_len > 0);
+ HDassert(cache_ptr->image_data_len <= cache_ptr->image_len);
+
+ /* Allocate the buffer in which to construct the cache image block */
+ if(NULL == (cache_ptr->image_buffer = H5MM_malloc(cache_ptr->image_len + 1)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for cache image buffer")
+
+ /* Construct the cache image block header image */
+ p = (uint8_t *)cache_ptr->image_buffer;
+ if(H5C__encode_cache_image_header(f, cache_ptr, &p) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTENCODE, FAIL, "header image construction failed")
+ HDassert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_data_len);
+
+ /* Construct the cache entry images */
+ for(u = 0; u < cache_ptr->num_entries_in_image; u++)
+ if(H5C__encode_cache_image_entry(f, cache_ptr, &p, u) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTENCODE, FAIL, "entry image construction failed")
+ HDassert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_data_len);
+
+ /* Construct the adaptive resize status image -- not yet */
+
+ /* Compute the checksum and encode */
+ chksum = H5_checksum_metadata(cache_ptr->image_buffer, (size_t)(cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM), 0);
+ UINT32ENCODE(p, chksum);
+ HDassert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) == cache_ptr->image_data_len);
+ HDassert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) <= cache_ptr->image_len);
+
+#ifndef NDEBUG
+ /* validate the metadata cache image we just constructed by decoding it
+ * and comparing the result with the original data.
+ */
+ {
+ uint32_t old_chksum;
+ const uint8_t * q;
+ H5C_t * fake_cache_ptr = NULL;
+ unsigned v;
+ herr_t status; /* Status from decoding */
+
+ fake_cache_ptr = (H5C_t *)H5MM_malloc(sizeof(H5C_t));
+ HDassert(fake_cache_ptr);
+ fake_cache_ptr->magic = H5C__H5C_T_MAGIC;
+
+ /* needed for sanity checks */
+ fake_cache_ptr->image_len = cache_ptr->image_len;
+ q = (const uint8_t *)cache_ptr->image_buffer;
+ status = H5C__decode_cache_image_header(f, fake_cache_ptr, &q);
+ HDassert(status >= 0);
+
+ HDassert(NULL != p);
+ HDassert(fake_cache_ptr->num_entries_in_image == cache_ptr->num_entries_in_image);
+
+ fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_malloc(sizeof(H5C_image_entry_t) *
+ (size_t)(fake_cache_ptr->num_entries_in_image + 1));
+ HDassert(fake_cache_ptr->image_entries);
+
+ for(u = 0; u < fake_cache_ptr->num_entries_in_image; u++) {
+ (fake_cache_ptr->image_entries)[u].magic = H5C_IMAGE_ENTRY_T_MAGIC;
+ (fake_cache_ptr->image_entries)[u].image_ptr = NULL;
+
+ /* touch up f->shared->cache to satisfy sanity checks... */
+ f->shared->cache = fake_cache_ptr;
+ status = H5C__decode_cache_image_entry(f, fake_cache_ptr, &q, u);
+ HDassert(status >= 0);
+
+ /* ...and then return f->shared->cache to its correct value */
+ f->shared->cache = cache_ptr;
+
+ /* verify expected contents */
+ HDassert((cache_ptr->image_entries)[u].addr == (fake_cache_ptr->image_entries)[u].addr);
+ HDassert((cache_ptr->image_entries)[u].size == (fake_cache_ptr->image_entries)[u].size);
+ HDassert((cache_ptr->image_entries)[u].type_id == (fake_cache_ptr->image_entries)[u].type_id);
+ HDassert((cache_ptr->image_entries)[u].lru_rank == (fake_cache_ptr->image_entries)[u].lru_rank);
+ HDassert((cache_ptr->image_entries)[u].is_dirty == (fake_cache_ptr->image_entries)[u].is_dirty);
+ /* don't check image_fd_height as it is not stored in
+ * the metadata cache image block.
+ */
+ HDassert((cache_ptr->image_entries)[u].fd_child_count == (fake_cache_ptr->image_entries)[u].fd_child_count);
+ HDassert((cache_ptr->image_entries)[u].fd_dirty_child_count == (fake_cache_ptr->image_entries)[u].fd_dirty_child_count);
+ HDassert((cache_ptr->image_entries)[u].fd_parent_count == (fake_cache_ptr->image_entries)[u].fd_parent_count);
+
+ for(v = 0; v < (cache_ptr->image_entries)[u].fd_parent_count; v++)
+ HDassert((cache_ptr->image_entries)[u].fd_parent_addrs[v] == (fake_cache_ptr->image_entries)[u].fd_parent_addrs[v]);
+
+ /* free the fd_parent_addrs array if it exists */
+ if((fake_cache_ptr->image_entries)[u].fd_parent_addrs) {
+ HDassert((fake_cache_ptr->image_entries)[u].fd_parent_count > 0);
+ (fake_cache_ptr->image_entries)[u].fd_parent_addrs = (haddr_t *)H5MM_xfree((fake_cache_ptr->image_entries)[u].fd_parent_addrs);
+ (fake_cache_ptr->image_entries)[u].fd_parent_count = 0;
+ } /* end if */
+ else
+ HDassert((fake_cache_ptr->image_entries)[u].fd_parent_count == 0);
+
+ HDassert((cache_ptr->image_entries)[u].image_ptr);
+ HDassert((fake_cache_ptr->image_entries)[u].image_ptr);
+ HDassert(!HDmemcmp((cache_ptr->image_entries)[u].image_ptr,
+ (fake_cache_ptr->image_entries)[u].image_ptr,
+ (cache_ptr->image_entries)[u].size));
+
+ (fake_cache_ptr->image_entries)[u].image_ptr = H5MM_xfree((fake_cache_ptr->image_entries)[u].image_ptr);
+ } /* end for */
+
+ HDassert((size_t)(q - (const uint8_t *)cache_ptr->image_buffer) == cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM);
+
+ /* compute the checksum */
+ old_chksum = chksum;
+ chksum = H5_checksum_metadata(cache_ptr->image_buffer, (size_t)(cache_ptr->image_data_len - H5F_SIZEOF_CHKSUM), 0);
+ HDassert(chksum == old_chksum);
+
+ fake_cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(fake_cache_ptr->image_entries);
+ fake_cache_ptr = (H5C_t *)H5MM_xfree(fake_cache_ptr);
+ } /* end block */
+#endif /* NDEBUG */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__construct_cache_image_buffer() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__generate_cache_image()
+ *
+ * Purpose: Generate the cache image and write it to the file, if
+ * directed.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: Quincey Koziol
+ * 1/26/17
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__generate_cache_image(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(cache_ptr == f->shared->cache);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Construct cache image */
+ if(H5C__construct_cache_image_buffer(f, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't create metadata cache image")
+
+ /* Free image entries array */
+ if(H5C__free_image_entries_array(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't free image entries array")
+
+ /* Write cache image block if so configured */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK) {
+ if(H5C__write_cache_image(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't write metadata cache image block to file")
+
+ H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr);
+ } /* end if */
+
+ /* Free cache image buffer */
+ HDassert(cache_ptr->image_buffer);
+ cache_ptr->image_buffer = H5MM_xfree(cache_ptr->image_buffer);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__generate_cache_image() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__deserialize_prefetched_entry()
+ *
+ * Purpose: Deserialize the supplied prefetched entry entry, and return
+ * a pointer to the deserialized entry in *entry_ptr_ptr.
+ * If successful, remove the prefetched entry from the cache,
+ * and free it. Insert the deserialized entry into the cache.
+ *
+ * Note that the on disk image of the entry is not freed --
+ * a pointer to it is stored in the deserialized entries'
+ * image_ptr field, and its image_up_to_date field is set to
+ * TRUE unless the entry is dirtied by the deserialize call.
+ *
+ * If the prefetched entry is a flush dependency child,
+ * destroy that flush dependency prior to calling the
+ * deserialize callback. If appropriate, the flush dependency
+ * relationship will be recreated by the cache client.
+ *
+ * If the prefetched entry is a flush dependency parent,
+ * destroy the flush dependency relationship with all its
+ * children. As all these children must be prefetched entries,
+ * recreate these flush dependency relationships with
+ * deserialized entry after it is inserted in the cache.
+ *
+ * Since deserializing a prefetched entry is semantically
+ * equivalent to a load, issue an entry loaded nofification
+ * if the notify callback is defined.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Note that *entry_ptr_ptr is undefined on failure.
+ *
+ * Programmer: John Mainzer, 8/10/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr,
+ H5C_cache_entry_t **entry_ptr_ptr, const H5C_class_t *type,
+ haddr_t addr, void *udata)
+{
+ hbool_t dirty = FALSE; /* Flag indicating whether thing was
+ * dirtied during deserialize
+ */
+ size_t len; /* Size of image in file */
+ void * thing = NULL; /* Pointer to thing loaded */
+ H5C_cache_entry_t * pf_entry_ptr; /* pointer to the prefetched entry */
+ /* supplied in *entry_ptr_ptr. */
+ H5C_cache_entry_t * ds_entry_ptr; /* Alias for thing loaded, as cache
+ * entry
+ */
+ H5C_cache_entry_t** fd_children = NULL; /* Pointer to a dynamically */
+ /* allocated array of pointers to */
+ /* the flush dependency children of */
+ /* the prefetched entry, or NULL if */
+ /* that array does not exist. */
+ unsigned flush_flags = (H5C__FLUSH_INVALIDATE_FLAG |
+ H5C__FLUSH_CLEAR_ONLY_FLAG);
+ int i;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ HDassert(f->shared->cache == cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(entry_ptr_ptr);
+ HDassert(*entry_ptr_ptr);
+ pf_entry_ptr = *entry_ptr_ptr;
+ HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(pf_entry_ptr->type);
+ HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ HDassert(pf_entry_ptr->prefetched);
+ HDassert(pf_entry_ptr->image_up_to_date);
+ HDassert(pf_entry_ptr->image_ptr);
+ HDassert(pf_entry_ptr->size > 0);
+ HDassert(pf_entry_ptr->addr == addr);
+ HDassert(type);
+ HDassert(type->id == pf_entry_ptr->prefetch_type_id);
+ HDassert(type->mem_type == cache_ptr->class_table_ptr[type->id]->mem_type);
+
+ /* verify absence of prohibited or unsupported type flag combinations */
+ HDassert(!(type->flags & H5C__CLASS_SKIP_READS));
+
+ /* Can't see how skip reads could be usefully combined with
+ * either the speculative read flag. Hence disallow.
+ */
+ HDassert(!((type->flags & H5C__CLASS_SKIP_READS) &&
+ (type->flags & H5C__CLASS_SPECULATIVE_LOAD_FLAG)));
+ HDassert(H5F_addr_defined(addr));
+ HDassert(type->get_initial_load_size);
+ HDassert(type->deserialize);
+
+ /* if *pf_entry_ptr is a flush dependency child, destroy all such
+ * relationships now. The client will restore the relationship(s) with
+ * the deserialized entry if appropriate.
+ */
+ HDassert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents);
+ for(i = (int)(pf_entry_ptr->fd_parent_count) - 1; i >= 0; i--) {
+ HDassert(pf_entry_ptr->flush_dep_parent);
+ HDassert(pf_entry_ptr->flush_dep_parent[i]);
+ HDassert(pf_entry_ptr->flush_dep_parent[i]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(pf_entry_ptr->flush_dep_parent[i]->flush_dep_nchildren > 0);
+ HDassert(pf_entry_ptr->fd_parent_addrs);
+ HDassert(pf_entry_ptr->flush_dep_parent[i]->addr == pf_entry_ptr->fd_parent_addrs[i]);
+
+ if(H5C_destroy_flush_dependency(pf_entry_ptr->flush_dep_parent[i], pf_entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry parent flush dependency")
+
+ pf_entry_ptr->fd_parent_addrs[i] = HADDR_UNDEF;
+ } /* end for */
+ HDassert(pf_entry_ptr->flush_dep_nparents == 0);
+
+ /* If *pf_entry_ptr is a flush dependency parent, destroy its flush
+ * dependency relationships with all its children (which must be
+ * prefetched entries as well).
+ *
+ * These flush dependency relationships will have to be restored
+ * after the deserialized entry is inserted into the cache in order
+ * to transfer these relationships to the new entry. Hence save the
+ * pointers to the flush dependency children of *pf_enty_ptr for later
+ * use.
+ */
+ if(pf_entry_ptr->fd_child_count > 0) {
+ if(NULL == (fd_children = (H5C_cache_entry_t **)H5MM_calloc(sizeof(H5C_cache_entry_t **) * (size_t)(pf_entry_ptr->fd_child_count + 1))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd child ptr array")
+
+ if(H5C__destroy_pf_entry_child_flush_deps(cache_ptr, pf_entry_ptr, fd_children) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry child flush dependency(s).")
+ } /* end if */
+
+ /* Since the size of the on disk image is known exactly, there is
+ * no need for either a call to the get_initial_load_size() callback,
+ * or retries if the H5C__CLASS_SPECULATIVE_LOAD_FLAG flag is set.
+ * Similarly, there is no need to clamp possible reads beyond
+ * EOF.
+ */
+ len = pf_entry_ptr->size;
+
+ /* Deserialize the prefetched on-disk image of the entry into the
+ * native memory form
+ */
+ if(NULL == (thing = type->deserialize(pf_entry_ptr->image_ptr, len, udata, &dirty)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "Can't deserialize image")
+ ds_entry_ptr = (H5C_cache_entry_t *)thing;
+
+ /* In general, an entry should be clean just after it is loaded.
+ *
+ * However, when this code is used in the metadata cache, it is
+ * possible that object headers will be dirty at this point, as
+ * the deserialize function will alter object headers if necessary to
+ * fix an old bug.
+ *
+ * In the following assert:
+ *
+ * HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6 ) );
+ *
+ * note that type ids 5 & 6 are associated with object headers in the
+ * metadata cache.
+ *
+ * When we get to using H5C for other purposes, we may wish to
+ * tighten up the assert so that the loophole only applies to the
+ * metadata cache.
+ *
+ * Note that at present, dirty can't be set to true with prefetched
+ * entries. However this may change, so include this functionality
+ * against that posibility.
+ *
+ * Also, note that it is possible for a prefetched entry to be dirty --
+ * hence the value assigned to ds_entry_ptr->is_dirty below.
+ */
+
+ HDassert( ( dirty == FALSE ) || ( type->id == 5 || type->id == 6) );
+
+ ds_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ ds_entry_ptr->cache_ptr = f->shared->cache;
+ ds_entry_ptr->addr = addr;
+ ds_entry_ptr->size = len;
+ HDassert(ds_entry_ptr->size < H5C_MAX_ENTRY_SIZE);
+ ds_entry_ptr->image_ptr = pf_entry_ptr->image_ptr;
+ ds_entry_ptr->image_up_to_date = !dirty;
+ ds_entry_ptr->type = type;
+ ds_entry_ptr->is_dirty = dirty | pf_entry_ptr->is_dirty;
+ ds_entry_ptr->dirtied = FALSE;
+ ds_entry_ptr->is_protected = FALSE;
+ ds_entry_ptr->is_read_only = FALSE;
+ ds_entry_ptr->ro_ref_count = 0;
+ ds_entry_ptr->is_pinned = FALSE;
+ ds_entry_ptr->in_slist = FALSE;
+ ds_entry_ptr->flush_marker = FALSE;
+#ifdef H5_HAVE_PARALLEL
+ ds_entry_ptr->clear_on_unprotect = FALSE;
+ ds_entry_ptr->flush_immediately = FALSE;
+ ds_entry_ptr->coll_access = FALSE;
+#endif /* H5_HAVE_PARALLEL */
+ ds_entry_ptr->flush_in_progress = FALSE;
+ ds_entry_ptr->destroy_in_progress = FALSE;
+
+ ds_entry_ptr->ring = pf_entry_ptr->ring;
+
+ /* Initialize flush dependency height fields */
+ ds_entry_ptr->flush_dep_parent = NULL;
+ ds_entry_ptr->flush_dep_nparents = 0;
+ ds_entry_ptr->flush_dep_parent_nalloc = 0;
+ ds_entry_ptr->flush_dep_nchildren = 0;
+ ds_entry_ptr->flush_dep_ndirty_children = 0;
+ ds_entry_ptr->flush_dep_nunser_children = 0;
+
+ /* Initialize fields supporting the hash table: */
+ ds_entry_ptr->ht_next = NULL;
+ ds_entry_ptr->ht_prev = NULL;
+ ds_entry_ptr->il_next = NULL;
+ ds_entry_ptr->il_prev = NULL;
+
+ /* Initialize fields supporting replacement policies: */
+ ds_entry_ptr->next = NULL;
+ ds_entry_ptr->prev = NULL;
+ ds_entry_ptr->aux_next = NULL;
+ ds_entry_ptr->aux_prev = NULL;
+#ifdef H5_HAVE_PARALLEL
+ pf_entry_ptr->coll_next = NULL;
+ pf_entry_ptr->coll_prev = NULL;
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Initialize cache image related fields */
+ ds_entry_ptr->include_in_image = FALSE;
+ ds_entry_ptr->lru_rank = 0;
+ ds_entry_ptr->image_dirty = FALSE;
+ ds_entry_ptr->fd_parent_count = 0;
+ ds_entry_ptr->fd_parent_addrs = NULL;
+ ds_entry_ptr->fd_child_count = pf_entry_ptr->fd_child_count;
+ ds_entry_ptr->fd_dirty_child_count = 0;
+ ds_entry_ptr->image_fd_height = 0;
+ ds_entry_ptr->prefetched = FALSE;
+ ds_entry_ptr->prefetch_type_id = 0;
+ ds_entry_ptr->age = 0;
+ ds_entry_ptr->prefetched_dirty = pf_entry_ptr->prefetched_dirty;
+#ifndef NDEBUG /* debugging field */
+ ds_entry_ptr->serialization_count = 0;
+#endif /* NDEBUG */
+
+ H5C__RESET_CACHE_ENTRY_STATS(ds_entry_ptr);
+
+ /* Apply to to the newly deserialized entry */
+ if(H5C__tag_entry(cache_ptr, ds_entry_ptr, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTTAG, FAIL, "Cannot tag metadata entry")
+
+ /* We have successfully deserialized the prefetched entry.
+ *
+ * Before we return a pointer to the deserialized entry, we must remove
+ * the prefetched entry from the cache, discard it, and replace it with
+ * the deserialized entry. Note that we do not free the prefetched
+ * entries image, as that has been transferred to the deserialized
+ * entry.
+ *
+ * Also note that we have not yet restored any flush dependencies. This
+ * must wait until the deserialized entry is inserted in the cache.
+ *
+ * To delete the prefetched entry from the cache:
+ *
+ * 1) Set pf_entry_ptr->image_ptr to NULL. Since we have already
+ * transferred the buffer containing the image to *ds_entry_ptr,
+ * this is not a memory leak.
+ *
+ * 2) Call H5C__flush_single_entry() with the H5C__FLUSH_INVALIDATE_FLAG
+ * and H5C__FLUSH_CLEAR_ONLY_FLAG flags set.
+ */
+ pf_entry_ptr->image_ptr = NULL;
+ if(pf_entry_ptr->is_dirty) {
+ HDassert(pf_entry_ptr->in_slist);
+ flush_flags |= H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG;
+ } /* end if */
+
+ if(H5C__flush_single_entry(f, dxpl_id, pf_entry_ptr, flush_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTEXPUNGE, FAIL, "can't expunge prefetched entry")
+
+#ifndef NDEGUG /* verify deletion */
+ H5C__SEARCH_INDEX(cache_ptr, addr, pf_entry_ptr, FAIL);
+
+ HDassert(NULL == pf_entry_ptr);
+#endif /* NDEBUG */
+
+ /* Insert the deserialized entry into the cache. */
+ H5C__INSERT_IN_INDEX(cache_ptr, ds_entry_ptr, FAIL)
+
+ HDassert(!ds_entry_ptr->in_slist);
+ if(ds_entry_ptr->is_dirty)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, ds_entry_ptr, FAIL)
+
+ H5C__UPDATE_RP_FOR_INSERTION(cache_ptr, ds_entry_ptr, FAIL)
+
+ /* Deserializing a prefetched entry is the conceptual equivalent of
+ * loading it from file. If the deserialized entry has a notify callback,
+ * send an "after load" notice now that the deserialized entry is fully
+ * integrated into the cache.
+ */
+ if(ds_entry_ptr->type->notify &&
+ (ds_entry_ptr->type->notify)(H5C_NOTIFY_ACTION_AFTER_LOAD, ds_entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTNOTIFY, FAIL, "can't notify client about entry loaded into cache")
+
+ /* Restore flush dependencies with the flush dependency children of
+ * of the prefetched entry. Note that we must protect *ds_entry_ptr
+ * before the call to avoid triggering sanity check failures, and
+ * then unprotect it afterwards.
+ */
+ i = 0;
+ if(fd_children != NULL) {
+ H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, ds_entry_ptr, FAIL)
+ ds_entry_ptr->is_protected = TRUE;
+ while(fd_children[i] != NULL) {
+ /* Sanity checks */
+ HDassert((fd_children[i])->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert((fd_children[i])->prefetched);
+ HDassert((fd_children[i])->fd_parent_count > 0);
+ HDassert((fd_children[i])->fd_parent_addrs);
+
+#ifndef NDEBUG
+ {
+ int j;
+ hbool_t found;
+
+ j = 0;
+ found = FALSE;
+ while((j < (int)((fd_children[i])->fd_parent_count)) && (!found)) {
+ if((fd_children[i])->fd_parent_addrs[j] == ds_entry_ptr->addr)
+ found = TRUE;
+
+ j++;
+ } /* end while */
+ HDassert(found);
+ }
+#endif /* NDEBUG */
+
+ if(H5C_create_flush_dependency(ds_entry_ptr, fd_children[i]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore child flush dependency")
+
+ i++;
+ } /* end while */
+
+ H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, ds_entry_ptr, FAIL);
+ ds_entry_ptr->is_protected = FALSE;
+ } /* end if ( fd_children != NULL ) */
+ HDassert((unsigned)i == ds_entry_ptr->fd_child_count);
+
+ ds_entry_ptr->fd_child_count = 0;
+ H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr)
+
+ /* finally, pass ds_entry_ptr back to the caller */
+ *entry_ptr_ptr = ds_entry_ptr;
+
+done:
+ if(fd_children)
+ fd_children = (H5C_cache_entry_t **)H5MM_xfree((void *)fd_children);
+
+ /* Release resources on error */
+ if(FAIL == ret_value)
+ if(thing && type->free_icr(thing) < 0)
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "free_icr callback failed")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__deserialize_prefetched_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__free_image_entries_array
+ *
+ * Purpose: If the image entries array exists, free the image
+ * associated with each entry, and then free the image
+ * entries array proper.
+ *
+ * Note that by the time this function is called, the cache
+ * should have removed all entries from its data structures.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 8/4/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__free_image_entries_array(H5C_t * cache_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->close_warning_received);
+ HDassert(cache_ptr->image_ctl.generate_image);
+ HDassert(cache_ptr->index_len == 0);
+
+ /* Check for entries to free */
+ if(cache_ptr->image_entries != NULL) {
+ unsigned u; /* Local index variable */
+
+ for(u = 0; u < cache_ptr->num_entries_in_image; u++) {
+ H5C_image_entry_t *ie_ptr; /* Image entry to release */
+
+ /* Get pointer to image entry */
+ ie_ptr = &((cache_ptr->image_entries)[u]);
+
+ /* Sanity checks */
+ HDassert(ie_ptr);
+ HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC);
+ HDassert(ie_ptr->image_ptr);
+
+ /* Free the parent addrs array if appropriate */
+ if(ie_ptr->fd_parent_addrs) {
+ HDassert(ie_ptr->fd_parent_count > 0);
+
+ ie_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(ie_ptr->fd_parent_addrs);
+ } /* end if */
+ else
+ HDassert(ie_ptr->fd_parent_count == 0);
+
+ /* Free the image */
+ ie_ptr->image_ptr = H5MM_xfree(ie_ptr->image_ptr);
+
+ /* Set magic field to bad magic so we can detect freed entries */
+ ie_ptr->magic = H5C_IMAGE_ENTRY_T_BAD_MAGIC;
+ } /* end for */
+
+ /* Free the image entries array */
+ cache_ptr->image_entries = (H5C_image_entry_t *)H5MM_xfree(cache_ptr->image_entries);
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5C__free_image_entries_array() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_force_cache_image_load()
+ *
+ * Purpose: On rare occasions, it is necessary to run
+ * H5MF_tidy_self_referential_fsm_hack() prior to the first
+ * metadata cache access. This is a problem as if there is a
+ * cache image at the end of the file, that routine will
+ * discard it.
+ *
+ * We solve this issue by calling this function, which will
+ * load the cache image and then call
+ * H5MF_tidy_self_referential_fsm_hack() to discard it.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 1/11/17
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_force_cache_image_load(H5F_t *f, hid_t dxpl_id)
+{
+ H5C_t *cache_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->load_image);
+
+ /* Load the cache image, if requested */
+ if(cache_ptr->load_image) {
+ cache_ptr->load_image = FALSE;
+ if(H5C__load_cache_image(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "can't load cache image")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_force_cache_image_load() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_get_cache_image_config
+ *
+ * Purpose: Copy the current configuration for cache image generation
+ * on file close into the instance of H5C_cache_image_ctl_t
+ * pointed to by config_ptr.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 7/3/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_get_cache_image_config(const H5C_t * cache_ptr,
+ H5C_cache_image_ctl_t *config_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry")
+ if(config_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad config_ptr on entry")
+
+ *config_ptr = cache_ptr->image_ctl;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_get_cache_image_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_image_stats
+ *
+ * Purpose: Prints statistics specific to the cache image.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 10/26/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+#if H5C_COLLECT_CACHE_STATS
+H5C_image_stats(H5C_t * cache_ptr, hbool_t print_header)
+#else /* H5C_COLLECT_CACHE_STATS */
+H5C_image_stats(H5C_t * cache_ptr, hbool_t H5_ATTR_UNUSED print_header)
+#endif /* H5C_COLLECT_CACHE_STATS */
+{
+#if H5C_COLLECT_CACHE_STATS
+ int i;
+ int64_t total_hits = 0;
+ int64_t total_misses = 0;
+ double hit_rate;
+ double prefetch_use_rate;
+#endif /* H5C_COLLECT_CACHE_STATS */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if(!cache_ptr || cache_ptr->magic != H5C__H5C_T_MAGIC)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Bad cache_ptr")
+
+#if H5C_COLLECT_CACHE_STATS
+ for(i = 0; i <= cache_ptr->max_type_id; i++) {
+ total_hits += cache_ptr->hits[i];
+ total_misses += cache_ptr->misses[i];
+ } /* end for */
+
+ if((total_hits > 0) || (total_misses > 0))
+ hit_rate = (double)100.0f * ((double)(total_hits)) / ((double)(total_hits + total_misses));
+ else
+ hit_rate = 0.0f;
+
+ if(cache_ptr->prefetches > 0)
+ prefetch_use_rate = (double)100.0f * ((double)(cache_ptr->prefetch_hits)) /
+ ((double)(cache_ptr->prefetches));
+ else
+ prefetch_use_rate = 0.0f;
+
+ if(print_header) {
+ HDfprintf(stdout,
+ "\nhit prefetches prefetch image pf hit\n");
+ HDfprintf(stdout,
+ "rate: total: dirty: hits: flshs: evct: size: rate:\n");
+ } /* end if */
+
+ HDfprintf(stdout,
+ "%3.1lf %5lld %5lld %5lld %5lld %5lld %5lld %3.1lf\n",
+ hit_rate,
+ (long long)(cache_ptr->prefetches),
+ (long long)(cache_ptr->dirty_prefetches),
+ (long long)(cache_ptr->prefetch_hits),
+ (long long)(cache_ptr->flushes[H5AC_PREFETCHED_ENTRY_ID]),
+ (long long)(cache_ptr->evictions[H5AC_PREFETCHED_ENTRY_ID]),
+ (long long)(cache_ptr->last_image_size),
+ prefetch_use_rate);
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_image_stats() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__read_cache_image
+ *
+ * Purpose: Load the metadata cache image from the specified location
+ * in the file, and return it in the supplied buffer.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/16/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__read_cache_image(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(H5F_addr_defined(cache_ptr->image_addr));
+ HDassert(cache_ptr->image_len > 0);
+ HDassert(cache_ptr->image_buffer);
+
+#ifdef H5_HAVE_PARALLEL
+{
+ H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
+ int mpi_result;
+
+ if ( ( NULL == aux_ptr ) || ( aux_ptr->mpi_rank == 0 ) ) {
+
+ HDassert((NULL == aux_ptr) ||
+ (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC));
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Read the buffer (if serial access, or rank 0 of parallel access) */
+ if ( H5F_block_read(f, H5FD_MEM_SUPER, cache_ptr->image_addr,
+ cache_ptr->image_len, dxpl_id,
+ cache_ptr->image_buffer) < 0)
+
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, \
+ "Can't read metadata cache image block")
+
+ H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr)
+
+#ifdef H5_HAVE_PARALLEL
+ if ( aux_ptr ) {
+
+ /* Broadcast cache image */
+ if ( MPI_SUCCESS !=
+ (mpi_result = MPI_Bcast(cache_ptr->image_buffer,
+ (int)cache_ptr->image_len, MPI_BYTE,
+ 0, aux_ptr->mpi_comm)) )
+
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+
+ } /* end if */
+ } /* end if */
+ else if ( aux_ptr ) {
+
+ /* Retrieve the contents of the metadata cache image from process 0 */
+ if ( MPI_SUCCESS !=
+ (mpi_result = MPI_Bcast(cache_ptr->image_buffer,
+ (int)cache_ptr->image_len, MPI_BYTE,
+ 0, aux_ptr->mpi_comm)) )
+
+ HMPI_GOTO_ERROR(FAIL, "can't receive cache image MPI_Bcast", \
+ mpi_result)
+ } /* end else-if */
+} /* end block */
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5C__read_cache_image() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__load_cache_image
+ *
+ * Purpose: Read the cache image superblock extension message and
+ * delete it if so directed.
+ *
+ * Then load the cache image block at the specified location,
+ * decode it, and insert its contents into the metadata
+ * cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__load_cache_image(H5F_t *f, hid_t dxpl_id)
+{
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* If the image address is defined, load the image, decode it,
+ * and insert its contents into the metadata cache.
+ *
+ * Note that under normal operating conditions, it is an error if the
+ * image address is HADDR_UNDEF. However, to facilitate testing,
+ * we allow this special value of the image address which means that
+ * no image exists, and that the load operation should be skipped
+ * silently.
+ */
+ if(H5F_addr_defined(cache_ptr->image_addr)) {
+ /* Sanity checks */
+ HDassert(cache_ptr->image_len > 0);
+ HDassert(cache_ptr->image_buffer == NULL);
+
+ /* Allocate space for the image */
+ if(NULL == (cache_ptr->image_buffer = H5MM_malloc(cache_ptr->image_len + 1)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for cache image buffer")
+
+ /* Load the image from file */
+ if(H5C__read_cache_image(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_READERROR, FAIL, "Can't read metadata cache image block")
+
+ /* Reconstruct cache contents, from image */
+ if(H5C__reconstruct_cache_contents(f, dxpl_id, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDECODE, FAIL, "Can't reconstruct cache contents from image block")
+
+ /* Free the image buffer */
+ cache_ptr->image_buffer = H5MM_xfree(cache_ptr->image_buffer);
+
+ /* Update stats -- must do this now, as we are about
+ * to discard the size of the cache image.
+ */
+ H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr)
+
+ cache_ptr->image_loaded = TRUE;
+ } /* end if */
+
+ /* If directed, free the on disk metadata cache image */
+ if(cache_ptr->delete_image) {
+ if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_MDCI_MSG_ID) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove metadata cache image message from superblock extension")
+
+ /* Reset image block values */
+ cache_ptr->image_len = 0;
+ cache_ptr->image_data_len = 0;
+ cache_ptr->image_addr = HADDR_UNDEF;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__load_cache_image() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_load_cache_image_on_next_protect()
+ *
+ * Purpose: Note the fact that a metadata cache image superblock
+ * extension message exists, along with the base address
+ * and length of the metadata cache image block.
+ *
+ * Once this notification is received the metadata cache
+ * image block must be read, decoded, and loaded into the
+ * cache on the next call to H5C_protect().
+ *
+ * Further, if the file is opened R/W, the metadata cache
+ * image superblock extension message must be deleted from
+ * the superblock extension and the image block freed
+ *
+ * Contrawise, if the file is openened R/O, the metadata
+ * cache image superblock extension message and image block
+ * must be left as is. Further, any dirty entries in the
+ * cache image block must be marked as clean to avoid
+ * attempts to write them on file close.
+ *
+ * Return: SUCCEED
+ *
+ * Programmer: John Mainzer
+ * 7/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr, hsize_t len,
+ hbool_t rw)
+{
+ H5C_t *cache_ptr;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Set information needed to load cache image */
+ cache_ptr->image_addr = addr,
+ cache_ptr->image_len = len;
+ cache_ptr->load_image = TRUE;
+ cache_ptr->delete_image = rw;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5C_load_cache_image_on_next_protect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__image_entry_cmp
+ *
+ * Purpose: Comparison callback for qsort(3) on image entries.
+ * Entries are sorted first by flush dependency height,
+ * and then by LRU rank.
+ *
+ * Note: Entries with a _greater_ flush dependency height should
+ * be sorted earlier than entries with lower heights, since
+ * leafs in the flush dependency graph are at height 0, and their
+ * parents need to be earlier in the image, so that they can
+ * construct their flush dependencies when decoded.
+ *
+ * Return: An integer less than, equal to, or greater than zero if the
+ * first entry is considered to be respectively less than,
+ * equal to, or greater than the second.
+ *
+ * Programmer: Quincey Koziol
+ * 1/20/16
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5C__image_entry_cmp(const void *_entry1, const void *_entry2)
+{
+ const H5C_image_entry_t *entry1 = (const H5C_image_entry_t *)_entry1; /* Pointer to first image entry to compare */
+ const H5C_image_entry_t *entry2 = (const H5C_image_entry_t *)_entry2; /* Pointer to second image entry to compare */
+ int ret_value = 0; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(entry1);
+ HDassert(entry2);
+
+ if(entry1->image_fd_height > entry2->image_fd_height)
+ ret_value = -1;
+ else if(entry1->image_fd_height < entry2->image_fd_height)
+ ret_value = 1;
+ else {
+ /* Sanity check */
+ HDassert(entry1->lru_rank >= -1);
+ HDassert(entry2->lru_rank >= -1);
+
+ if(entry1->lru_rank < entry2->lru_rank)
+ ret_value = -1;
+ else if(entry1->lru_rank > entry2->lru_rank)
+ ret_value = 1;
+ } /* end else */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__image_entry_cmp() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__prep_image_for_file_close
+ *
+ * Purpose: The objective of the call is to allow the metadata cache
+ * to do any preparatory work prior to generation of a
+ * cache image.
+ *
+ * In particular, the cache must
+ *
+ * 1) serialize all its entries,
+ *
+ * 2) compute the size of the metadata cache image,
+ *
+ * 3) allocate space for the metadata cache image, and
+ *
+ * 4) setup the metadata cache image superblock extension
+ * message with the address and size of the metadata
+ * cache image.
+ *
+ * The parallel case is complicated by the fact that
+ * while all metadata caches must contain the same set of
+ * dirty entries, there is no such requirement for clean
+ * entries or the order that entries appear in the LRU.
+ *
+ * Thus, there is no requirement that different processes
+ * will construct cache images of the same size.
+ *
+ * This is not a major issue as long as all processes include
+ * the same set of dirty entries in the cache -- as they
+ * currently do (note that this will change when we implement
+ * the ageout feature). Since only the process zero cache
+ * writes the cache image, all that is necessary is to
+ * broadcast the process zero cache size for use in the
+ * superblock extension messages and cache image block
+ * allocations.
+ *
+ * Note: At present, cache image is disabled in the
+ * parallel case as the new collective metadata write
+ * code must be modified to support cache image.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/3/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id, hbool_t *image_generated)
+{
+ H5C_t * cache_ptr = NULL;
+ haddr_t eoa_frag_addr = HADDR_UNDEF;
+ hsize_t eoa_frag_size = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(image_generated);
+
+ /* If the file is opened and closed without any access to
+ * any group or data set, it is possible that the cache image (if
+ * it exists) has not been read yet. Do this now if required.
+ */
+ if(cache_ptr->load_image) {
+ cache_ptr->load_image = FALSE;
+ if(H5C__load_cache_image(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTLOAD, FAIL, "can't load cache image")
+ } /* end if */
+
+ /* Before we start to generate the cache image (if requested), verify
+ * that the superblock supports superblock extension messages, and
+ * silently cancel any request for a cache image if it does not.
+ *
+ * Ideally, we would do this when the cache image is requested,
+ * but the necessary information is not necessary available at that
+ * time -- hence this last minute check.
+ *
+ * Note that under some error conditions, the superblock will be
+ * undefined in this case as well -- if so, assume that the
+ * superblock does not support superblock extension messages.
+ */
+ if((NULL == f->shared->sblock) ||
+ (f->shared->sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2)) {
+ H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL;
+
+ cache_ptr->image_ctl = default_image_ctl;
+ HDassert(!(cache_ptr->image_ctl.generate_image));
+ } /* end if */
+
+ /* Generate the cache image, if requested */
+ if(cache_ptr->image_ctl.generate_image) {
+ /* Create the cache image super block extension message.
+ *
+ * Note that the base address and length of the metadata cache
+ * image are undefined at this point, and thus will have to be
+ * updated later.
+ *
+ * Create the super block extension message now so that space
+ * is allocated for it (if necessary) before we allocate space
+ * for the cache image block.
+ *
+ * To simplify testing, do this only if the
+ * H5C_CI__GEN_MDCI_SBE_MESG bit is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDCI_SBE_MESG)
+ if(H5C__write_cache_image_superblock_msg(f, dxpl_id, TRUE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "creation of cache image SB mesg failed.")
+
+ /* Serialize the cache */
+ if(H5C__serialize_cache(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "serialization of the cache failed")
+
+ /* Scan the cache and record data needed to construct the
+ * cache image. In particular, for each entry we must record:
+ *
+ * 1) rank in LRU (if entry is in LRU)
+ *
+ * 2) Whether the entry is dirty prior to flush of
+ * cache just prior to close.
+ *
+ * 3) Addresses of flush dependency parents (if any).
+ *
+ * 4) Number of flush dependency children (if any).
+ *
+ * In passing, also compute the size of the metadata cache
+ * image. With the recent modifications of the free space
+ * manager code, this size should be correct.
+ */
+ if(H5C__prep_for_file_close__scan_entries(f, cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5C__prep_for_file_close__scan_entries failed")
+ HDassert(HADDR_UNDEF == cache_ptr->image_addr);
+
+#ifdef H5_HAVE_PARALLEL
+ /* In the parallel case, overwrite the image_len with the
+ * value computed by process 0.
+ */
+ if(cache_ptr->aux_ptr) { /* we have multiple processes */
+ int mpi_result;
+ unsigned p0_image_len;
+ H5AC_aux_t * aux_ptr;
+
+ aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
+ if(aux_ptr->mpi_rank == 0) {
+ aux_ptr->p0_image_len = (unsigned)cache_ptr->image_data_len;
+ p0_image_len = aux_ptr->p0_image_len;
+
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+
+ HDassert(p0_image_len == aux_ptr->p0_image_len);
+ } /* end if */
+ else {
+ if(MPI_SUCCESS != (mpi_result = MPI_Bcast(&p0_image_len, 1, MPI_UNSIGNED, 0, aux_ptr->mpi_comm)))
+ HMPI_GOTO_ERROR(FAIL, "MPI_Bcast failed", mpi_result)
+
+ aux_ptr->p0_image_len = p0_image_len;
+ } /* end else */
+
+ /* Allocate space for a cache image of size equal to that
+ * computed by the process 0. This may be different from
+ * cache_ptr->image_data_len if mpi_rank != 0. However, since
+ * cache image write is suppressed on all processes other than
+ * process 0, this doesn't matter.
+ *
+ * Note that we allocate the cache image directly from the file
+ * driver so as to avoid unsettling the free space managers.
+ */
+ if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
+ (hsize_t)p0_image_len, &eoa_frag_addr, &eoa_frag_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
+ } /* end if */
+ else
+#endif /* H5_HAVE_PARALLEL */
+ /* Allocate the cache image block. Note that we allocate this
+ * this space directly from the file driver so as to avoid
+ * unsettling the free space managers.
+ */
+ if(HADDR_UNDEF == (cache_ptr->image_addr = H5FD_alloc(f->shared->lf, dxpl_id, H5FD_MEM_SUPER, f,
+ (hsize_t)(cache_ptr->image_data_len), &eoa_frag_addr, &eoa_frag_size)))
+ HGOTO_ERROR(H5E_CACHE, H5E_NOSPACE, FAIL, "can't allocate file space for metadata cache image")
+
+ /* Make note of the eoa after allocation of the cache image
+ * block. This value is used for sanity checking when we
+ * shutdown the self referential free space managers after
+ * we destroy the metadata cache.
+ */
+ HDassert(HADDR_UNDEF == f->shared->eoa_post_mdci_fsalloc);
+ if(HADDR_UNDEF == (f->shared->eoa_post_mdci_fsalloc = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)) )
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file size")
+
+ /* For now, drop any fragment left over from the allocation of the
+ * image block on the ground. A fragment should only be returned
+ * if the underlying file alignment is greater than 1.
+ *
+ * Clean this up eventually by extending the size of the cache
+ * image block to the next alignement boundary, and then setting
+ * the image_data_len to the actual size of the cache_image.
+ *
+ * On the off chance that there is some other way to get a
+ * a fragment on a cache image allocation, leave the following
+ * assertion in the code so we will find out.
+ */
+ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
+
+ /* Eventually it will be possible for the length of the cache image
+ * block on file to be greater than the size of the data it
+ * contains. However, for now they must be the same. Set
+ * cache_ptr->image_len accordingly.
+ */
+ cache_ptr->image_len = cache_ptr->image_data_len;
+
+ /* update the metadata cache image superblock extension
+ * message with the new cache image block base address and
+ * length.
+ *
+ * to simplify testing, do this only if the
+ * H5C_CI__GEN_MDC_IMAGE_BLK bit is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
+ if(H5C__write_cache_image_superblock_msg(f, dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "update of cache image SB mesg failed")
+
+ /* At this point:
+ *
+ * 1) space in the file for the metadata cache image
+ * is allocated,
+ *
+ * 2) the metadata cache image superblock extension
+ * message exists and (if so configured) contains
+ * the correct data,
+ *
+ * 3) All entries in the cache that will appear in the
+ * cache image are serialized with up to date images.
+ *
+ * Since we just updated the cache image message,
+ * the super block extension message is dirty. However,
+ * since the superblock and the superblock extension
+ * can't be included in the cache image, this is a non-
+ * issue.
+ *
+ * 4) All entries in the cache that will be include in
+ * the cache are marked as such, and we have a count
+ * of same.
+ *
+ * 5) Flush dependency heights are calculated for all
+ * entries that will be included in the cache image.
+ *
+ * If there are any entries to be included in the metadata cache
+ * image, allocate, populate, and sort the image_entries array.
+ *
+ * If the metadata cache image will be empty, delete the
+ * metadata cache image superblock extension message, set
+ * cache_ptr->image_ctl.generate_image to FALSE. This will
+ * allow the file close to continue normally without the
+ * unecessary generation of the metadata cache image.
+ */
+ if(cache_ptr->num_entries_in_image > 0) {
+ if(H5C__prep_for_file_close__setup_image_entries_array(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTINIT, FAIL, "can't setup image entries array.")
+
+ /* Sort the entries */
+ HDqsort(cache_ptr->image_entries, (size_t)cache_ptr->num_entries_in_image,
+ sizeof(H5C_image_entry_t), H5C__image_entry_cmp);
+ } /* end if */
+ else { /* cancel creation of metadata cache image */
+ HDassert(cache_ptr->image_entries == NULL);
+
+ /* To avoid breaking the control flow tests, only delete
+ * the mdci superblock extension message if the
+ * H5C_CI__GEN_MDC_IMAGE_BLK flag is set in
+ * cache_ptr->image_ctl.flags.
+ */
+ if(cache_ptr->image_ctl.flags & H5C_CI__GEN_MDC_IMAGE_BLK)
+ if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_MDCI_MSG_ID) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTREMOVE, FAIL, "can't remove MDC image msg from superblock ext")
+
+ cache_ptr->image_ctl.generate_image = FALSE;
+ } /* end else */
+
+ /* Indicate that a cache image was generated */
+ *image_generated = TRUE;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__prep_image_for_file_close() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_set_cache_image_config
+ *
+ * Purpose: If *config_ptr contains valid data, copy it into the
+ * image_ctl field of *cache_ptr. Make adjustments for
+ * changes in configuration as required.
+ *
+ * If the file is open read only, silently
+ * force the cache image configuration to its default
+ * (which disables construction of a cache image).
+ *
+ * Note that in addition to being inapplicable in the
+ * read only case, cache image is also inapplicable if
+ * the superblock does not support superblock extension
+ * messages. Unfortunately, this information need not
+ * be available at this point. Thus we check for this
+ * later, in H5C_prep_for_file_close() and cancel the
+ * cache image request if appropriate.
+ *
+ * Fail if the new configuration is invalid.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 7/3/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr,
+ H5C_cache_image_ctl_t *config_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache == f->shared->cache);
+
+ /* Check arguments */
+ if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad cache_ptr on entry")
+
+ /* Validate the config: */
+ if(H5C_validate_cache_image_config(config_ptr) < 0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "invalid cache image configuration")
+
+#ifdef H5_HAVE_PARALLEL
+ /* The collective metadata write code is not currently compatible
+ * with cache image. Until this is fixed, suppress cache image silently
+ * if there is more than one process.
+ * JRM -- 11/8/16
+ */
+ if(cache_ptr->aux_ptr) {
+ H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL;
+
+ cache_ptr->image_ctl = default_image_ctl;
+ HDassert(!(cache_ptr->image_ctl.generate_image));
+ } /* end if */
+ else {
+#endif /* H5_HAVE_PARALLEL */
+ /* A cache image can only be generated if the file is opened read / write
+ * and the superblock supports superblock extension messages.
+ *
+ * However, the superblock version is not available at this point --
+ * hence we can only check the former requirement now. Do the latter
+ * check just before we construct the image..
+ *
+ * If the file is opened read / write, apply the supplied configuration.
+ *
+ * If it is not, set the image configuration to the default, which has
+ * the effect of silently disabling the cache image if it was requested.
+ */
+ if(H5F_INTENT(f) & H5F_ACC_RDWR)
+ cache_ptr->image_ctl = *config_ptr;
+ else {
+ H5C_cache_image_ctl_t default_image_ctl = H5C__DEFAULT_CACHE_IMAGE_CTL;
+
+ cache_ptr->image_ctl = default_image_ctl;
+ HDassert(!(cache_ptr->image_ctl.generate_image));
+ } /* end else */
+#ifdef H5_HAVE_PARALLEL
+ } /* end else */
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_set_cache_image_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C_validate_cache_image_config()
+ *
+ * Purpose: Run a sanity check on the provided instance of struct
+ * H5AC_cache_image_config_t.
+ *
+ * Do nothing and return SUCCEED if no errors are detected,
+ * and flag an error and return FAIL otherwise.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/15/15
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_validate_cache_image_config(H5C_cache_image_ctl_t * ctl_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if(ctl_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "NULL ctl_ptr on entry")
+ if(ctl_ptr->version != H5C__CURR_CACHE_IMAGE_CTL_VER)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Unknown cache image control version")
+
+ /* At present, we do not support inclusion of the adaptive resize
+ * configuration in the cache image. Thus the save_resize_status
+ * field must be FALSE.
+ */
+ if(ctl_ptr->save_resize_status != FALSE)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unexpected value in save_resize_status field")
+
+ /* At present, we do not support prefetched entry ageouts. Thus
+ * the entry_ageout field must be set to
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
+ */
+ if(ctl_ptr->entry_ageout != H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unexpected value in entry_ageout field")
+
+ if((ctl_ptr->flags & ~H5C_CI__ALL_FLAGS) != 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "unknown flag set")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_validate_cache_image_config() */
+
+
+/*************************************************************************/
+/**************************** Private Functions: *************************/
+/*************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__cache_image_block_entry_header_size
+ *
+ * Purpose: Compute the size of the header of the metadata cache
+ * image block, and return the value.
+ *
+ * Return: Size of the header section of the metadata cache image
+ * block in bytes.
+ *
+ * Programmer: John Mainzer
+ * 7/27/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static size_t
+H5C__cache_image_block_entry_header_size(const H5F_t * f)
+{
+ size_t ret_value = 0; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Set return value */
+ ret_value = (size_t)( 1 + /* type */
+ 1 + /* flags */
+ 1 + /* ring */
+ 1 + /* age */
+ 2 + /* dependency child count */
+ 2 + /* dirty dep child count */
+ 2 + /* dependency parent count */
+ 4 + /* index in LRU */
+ H5F_SIZEOF_ADDR(f) + /* entry offset */
+ H5F_SIZEOF_SIZE(f) ); /* entry length */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__cache_image_block_entry_header_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__cache_image_block_header_size
+ *
+ * Purpose: Compute the size of the header of the metadata cache
+ * image block, and return the value.
+ *
+ * Return: Size of the header section of the metadata cache image
+ * block in bytes.
+ *
+ * Programmer: John Mainzer
+ * 7/27/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static size_t
+H5C__cache_image_block_header_size(const H5F_t * f)
+{
+ size_t ret_value = 0; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Set return value */
+ ret_value = (size_t)( 4 + /* signature */
+ 1 + /* version */
+ 1 + /* flags */
+ H5F_SIZEOF_SIZE(f) + /* image data length */
+ 4 ); /* num_entries */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__cache_image_block_header_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__decode_cache_image_header()
+ *
+ * Purpose: Decode the metadata cache image buffer header from the
+ * supplied buffer and load the data into the supplied instance
+ * of H5C_t. Advances the buffer pointer to the first byte
+ * after the header image, or unchanged on failure.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__decode_cache_image_header(const H5F_t *f, H5C_t *cache_ptr,
+ const uint8_t **buf)
+{
+ uint8_t version;
+ uint8_t flags;
+ hbool_t have_resize_status = FALSE;
+ size_t actual_header_len;
+ size_t expected_header_len;
+ const uint8_t * p;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(buf);
+ HDassert(*buf);
+
+ /* Point to buffer to decode */
+ p = *buf;
+
+ /* Check signature */
+ if(HDmemcmp(p, H5C__MDCI_BLOCK_SIGNATURE, (size_t)H5C__MDCI_BLOCK_SIGNATURE_LEN))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image header signature")
+ p += H5C__MDCI_BLOCK_SIGNATURE_LEN;
+
+ /* Check version */
+ version = *p++;
+ if(version != (uint8_t)H5C__MDCI_BLOCK_VERSION_0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image version")
+
+ /* Decode flags */
+ flags = *p++;
+ if(flags & H5C__MDCI_HEADER_HAVE_RESIZE_STATUS)
+ have_resize_status = TRUE;
+ if(have_resize_status)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "MDC resize status not yet supported")
+
+ /* Read image data length */
+ H5F_DECODE_LENGTH(f, p, cache_ptr->image_data_len);
+
+ /* For now -- will become <= eventually */
+ if(cache_ptr->image_data_len != cache_ptr->image_len)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache image data length")
+
+ /* Read num entries */
+ UINT32DECODE(p, cache_ptr->num_entries_in_image);
+ if(cache_ptr->num_entries_in_image == 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad metadata cache entry count")
+
+ /* Verify expected length of header */
+ actual_header_len = (size_t)(p - *buf);
+ expected_header_len = H5C__cache_image_block_header_size(f);
+ if(actual_header_len != expected_header_len)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len")
+
+ /* Update buffer pointer */
+ *buf = p;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__decode_cache_image_header() */
+
+#ifndef NDEBUG
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__decode_cache_image_entry()
+ *
+ * Purpose: Decode the metadata cache image entry from the supplied
+ * buffer into the supplied instance of H5C_image_entry_t.
+ * This includes allocating a buffer for the entry image,
+ * loading it, and seting ie_ptr->image_ptr to point to
+ * the buffer.
+ *
+ * Advances the buffer pointer to the first byte
+ * after the entry, or unchanged on failure.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__decode_cache_image_entry(const H5F_t *f, const H5C_t *cache_ptr,
+ const uint8_t **buf, unsigned entry_num)
+{
+ hbool_t is_dirty = FALSE;
+ hbool_t in_lru = FALSE; /* Only used in assertions */
+ hbool_t is_fd_parent = FALSE; /* Only used in assertions */
+ hbool_t is_fd_child = FALSE; /* Only used in assertions */
+ haddr_t addr;
+ hsize_t size = 0;
+ void * image_ptr;
+ uint8_t flags = 0;
+ uint8_t type_id;
+ uint8_t ring;
+ uint8_t age;
+ uint16_t fd_child_count;
+ uint16_t fd_dirty_child_count;
+ uint16_t fd_parent_count;
+ haddr_t * fd_parent_addrs = NULL;
+ int32_t lru_rank;
+ H5C_image_entry_t * ie_ptr = NULL;
+ const uint8_t * p;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(cache_ptr == f->shared->cache);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(buf);
+ HDassert(*buf);
+ HDassert(entry_num < cache_ptr->num_entries_in_image);
+ ie_ptr = &((cache_ptr->image_entries)[entry_num]);
+ HDassert(ie_ptr);
+ HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC);
+
+ /* Get pointer to buffer */
+ p = *buf;
+
+ /* Decode type id */
+ type_id = *p++;
+
+ /* Decode flags */
+ flags = *p++;
+ if(flags & H5C__MDCI_ENTRY_DIRTY_FLAG)
+ is_dirty = TRUE;
+ if(flags & H5C__MDCI_ENTRY_IN_LRU_FLAG)
+ in_lru = TRUE;
+ if(flags & H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG)
+ is_fd_parent = TRUE;
+ if(flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG)
+ is_fd_child = TRUE;
+
+ /* Decode ring */
+ ring = *p++;
+ HDassert(ring > (uint8_t)(H5C_RING_UNDEFINED));
+ HDassert(ring < (uint8_t)(H5C_RING_NTYPES));
+
+ /* Decode age */
+ age = *p++;
+
+ /* Decode dependency child count */
+ UINT16DECODE(p, fd_child_count);
+ HDassert((is_fd_parent && fd_child_count > 0) || (!is_fd_parent && fd_child_count == 0));
+
+ /* Decode dirty dependency child count */
+ UINT16DECODE(p, fd_dirty_child_count);
+ if(fd_dirty_child_count > fd_child_count)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid dirty flush dependency child count")
+
+ /* Decode dependency parent count */
+ UINT16DECODE(p, fd_parent_count);
+ HDassert((is_fd_child && fd_parent_count > 0) || (!is_fd_child && fd_parent_count == 0));
+
+ /* Decode index in LRU */
+ INT32DECODE(p, lru_rank);
+ HDassert((in_lru && lru_rank >= 0) || (!in_lru && lru_rank == -1));
+
+ /* Decode entry offset */
+ H5F_addr_decode(f, &p, &addr);
+ if(!H5F_addr_defined(addr))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid entry offset")
+
+ /* Decode entry length */
+ H5F_DECODE_LENGTH(f, p, size);
+ if(size == 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid entry size")
+
+ /* Verify expected length of entry image */
+ if((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, FAIL, "Bad entry image len")
+
+ /* If parent count greater than zero, allocate array for parent
+ * addresses, and decode addresses into the array.
+ */
+ if(fd_parent_count > 0) {
+ int i; /* Local index variable */
+
+ if(NULL == (fd_parent_addrs = (haddr_t *)H5MM_malloc((size_t)(fd_parent_count) * H5F_SIZEOF_ADDR(f))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd parent addrs buffer")
+
+ for(i = 0; i < fd_parent_count; i++) {
+ H5F_addr_decode(f, &p, &(fd_parent_addrs[i]));
+ if(!H5F_addr_defined(fd_parent_addrs[i]))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "invalid flush dependency parent offset")
+ } /* end for */
+ } /* end if */
+
+ /* Allocate buffer for entry image */
+ if(NULL == (image_ptr = H5MM_malloc(size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for on disk image buffer")
+
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ HDmemcpy(((uint8_t *)image_ptr) + size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ /* Copy the entry image from the cache image block */
+ HDmemcpy(image_ptr, p, size);
+ p += size;
+
+ /* Copy data into target */
+ ie_ptr->addr = addr;
+ ie_ptr->size = size;
+ ie_ptr->ring = (H5C_ring_t)ring;
+ ie_ptr->age = (int32_t)age;
+ ie_ptr->type_id = (int32_t)type_id;
+ ie_ptr->lru_rank = lru_rank;
+ ie_ptr->is_dirty = is_dirty;
+ ie_ptr->fd_child_count = (uint64_t)fd_child_count;
+ ie_ptr->fd_dirty_child_count = (uint64_t)fd_dirty_child_count;
+ ie_ptr->fd_parent_count = (uint64_t)fd_parent_count;
+ ie_ptr->fd_parent_addrs = fd_parent_addrs;
+ ie_ptr->image_ptr = image_ptr;
+
+ /* Update buffer pointer */
+ *buf = p;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__decode_cache_image_entry() */
+#endif /* NDEBUG */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__destroy_pf_entry_child_flush_deps()
+ *
+ * Purpose: Destroy all flush dependencies in this the supplied
+ * prefetched entry is the parent. Note that the children
+ * in these flush dependencies must be prefetched entries as
+ * well.
+ *
+ * As this action is part of the process of transferring all
+ * such flush dependencies to the deserialized version of the
+ * prefetched entry, ensure that the data necessary to complete
+ * the transfer is retained.
+ *
+ * Note: The current implementation of this function is
+ * quite inefficient -- mostly due to the current
+ * implementation of flush dependencies. This should
+ * be fixed at some point.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/11/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__destroy_pf_entry_child_flush_deps(H5C_t *cache_ptr,
+ H5C_cache_entry_t *pf_entry_ptr, H5C_cache_entry_t **fd_children)
+{
+ H5C_cache_entry_t * entry_ptr;
+ unsigned entries_visited = 0;
+ int fd_children_found = 0;
+ hbool_t found;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(pf_entry_ptr);
+ HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(pf_entry_ptr->type);
+ HDassert(pf_entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ HDassert(pf_entry_ptr->prefetched);
+ HDassert(pf_entry_ptr->fd_child_count > 0);
+ HDassert(fd_children);
+
+ /* Scan each entry on the index list */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Here we look at entry_ptr->flush_dep_nparents and not
+ * entry_ptr->fd_parent_count as it is possible that some
+ * or all of the prefetched flush dependency child relationships
+ * have already been destroyed.
+ */
+ if(entry_ptr->prefetched && (entry_ptr->flush_dep_nparents > 0)) {
+ unsigned u; /* Local index variable */
+
+ /* Re-init */
+ u = 0;
+ found = FALSE;
+
+ /* Sanity checks */
+ HDassert(entry_ptr->type);
+ HDassert(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID);
+ HDassert(entry_ptr->fd_parent_count >= entry_ptr->flush_dep_nparents);
+ HDassert(entry_ptr->fd_parent_addrs);
+ HDassert(entry_ptr->flush_dep_parent);
+
+ /* Look for correct entry */
+ while(!found && (u < entry_ptr->fd_parent_count)) {
+ /* Sanity check entry */
+ HDassert(entry_ptr->flush_dep_parent[u]);
+ HDassert(entry_ptr->flush_dep_parent[u]->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Correct entry? */
+ if(pf_entry_ptr == entry_ptr->flush_dep_parent[u])
+ found = TRUE;
+
+ u++;
+ } /* end while */
+
+ if(found) {
+ HDassert(NULL == fd_children[fd_children_found]);
+
+ /* Remove flush dependency */
+ fd_children[fd_children_found] = entry_ptr;
+ fd_children_found++;
+ if(H5C_destroy_flush_dependency(pf_entry_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "can't destroy pf entry child flush dependency")
+
+#ifndef NDEBUG
+ /* Sanity check -- verify that the address of the parent
+ * appears in entry_ptr->fd_parent_addrs. Must do a search,
+ * as with flush dependency creates and destroys,
+ * entry_ptr->fd_parent_addrs and entry_ptr->flush_dep_parent
+ * can list parents in different order.
+ */
+ found = FALSE;
+ u = 0;
+ while(!found && u < entry_ptr->fd_parent_count) {
+ if(pf_entry_ptr->addr == entry_ptr->fd_parent_addrs[u])
+ found = TRUE;
+ u++;
+ } /* end while */
+ HDassert(found);
+#endif /* NDEBUG */
+ } /* end if */
+ } /* end if */
+
+ entries_visited++;
+ entry_ptr = entry_ptr->il_next;
+ } /* end while */
+
+ /* Post-op sanity checks */
+ HDassert(NULL == fd_children[fd_children_found]);
+ HDassert((unsigned)fd_children_found == pf_entry_ptr->fd_child_count);
+ HDassert(entries_visited == cache_ptr->index_len);
+ HDassert(!pf_entry_ptr->is_pinned);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__destroy_pf_entry_child_flush_deps() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__encode_cache_image_header()
+ *
+ * Purpose: Encode the metadata cache image buffer header in the
+ * supplied buffer. Updates buffer pointer to the first byte
+ * after the header image in the buffer, or unchanged on failure.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__encode_cache_image_header(const H5F_t *f, const H5C_t *cache_ptr,
+ uint8_t **buf)
+{
+ size_t actual_header_len;
+ size_t expected_header_len;
+ uint8_t flags = 0;
+ uint8_t * p; /* Pointer into cache image buffer */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->close_warning_received);
+ HDassert(cache_ptr->image_ctl.generate_image);
+ HDassert(cache_ptr->index_len == 0);
+ HDassert(cache_ptr->image_data_len > 0);
+ HDassert(cache_ptr->image_data_len <= cache_ptr->image_len);
+ HDassert(buf);
+ HDassert(*buf);
+
+ /* Set pointer into buffer */
+ p = *buf;
+
+ /* write signature */
+ HDmemcpy(p, H5C__MDCI_BLOCK_SIGNATURE, (size_t)H5C__MDCI_BLOCK_SIGNATURE_LEN);
+ p += H5C__MDCI_BLOCK_SIGNATURE_LEN;
+
+ /* write version */
+ *p++ = (uint8_t)H5C__MDCI_BLOCK_VERSION_0;
+
+ /* setup and write flags */
+
+ /* at present we don't support saving resize status */
+ HDassert(!cache_ptr->image_ctl.save_resize_status);
+ if(cache_ptr->image_ctl.save_resize_status)
+ flags |= H5C__MDCI_HEADER_HAVE_RESIZE_STATUS;
+
+ *p++ = flags;
+
+ /* Encode image data length */
+ /* this must be true at present */
+ HDassert(cache_ptr->image_len == cache_ptr->image_data_len);
+ H5F_ENCODE_LENGTH(f, p, cache_ptr->image_data_len);
+
+ /* write num entries */
+ UINT32ENCODE(p, cache_ptr->num_entries_in_image);
+
+ /* verify expected length of header */
+ actual_header_len = (size_t)(p - *buf);
+ expected_header_len = H5C__cache_image_block_header_size(f);
+ if(actual_header_len != expected_header_len)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad header image len")
+
+ /* Update buffer pointer */
+ *buf = p;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__encode_cache_image_header() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__encode_cache_image_entry()
+ *
+ * Purpose: Encode the metadata cache image buffer header in the
+ * supplied buffer. Updates buffer pointer to the first byte
+ * after the entry in the buffer, or unchanged on failure.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/6/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__encode_cache_image_entry(H5F_t *f, H5C_t *cache_ptr, uint8_t **buf,
+ unsigned entry_num)
+{
+ H5C_image_entry_t * ie_ptr; /* Pointer to entry to encode */
+ uint8_t flags = 0; /* Flags for entry */
+ uint8_t * p; /* Pointer into cache image buffer */
+ unsigned u; /* Local index value */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(cache_ptr == f->shared->cache);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->close_warning_received);
+ HDassert(cache_ptr->image_ctl.generate_image);
+ HDassert(cache_ptr->index_len == 0);
+ HDassert(buf);
+ HDassert(*buf);
+ HDassert(entry_num < cache_ptr->num_entries_in_image);
+ ie_ptr = &((cache_ptr->image_entries)[entry_num]);
+ HDassert(ie_ptr->magic == H5C_IMAGE_ENTRY_T_MAGIC);
+
+ /* Get pointer to buffer to encode into */
+ p = *buf;
+
+ /* Encode type */
+ if((ie_ptr->type_id < 0) || (ie_ptr->type_id > 255))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "type_id out of range.")
+ *p++ = (uint8_t)(ie_ptr->type_id);
+
+ /* Compose and encode flags */
+ if(ie_ptr->is_dirty)
+ flags |= H5C__MDCI_ENTRY_DIRTY_FLAG;
+ if(ie_ptr->lru_rank > 0)
+ flags |= H5C__MDCI_ENTRY_IN_LRU_FLAG;
+ if(ie_ptr->fd_child_count > 0)
+ flags |= H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG;
+ if(ie_ptr->fd_parent_count > 0)
+ flags |= H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG;
+ *p++ = flags;
+
+ /* Encode ring */
+ *p++ = (uint8_t)(ie_ptr->ring);
+
+ /* Encode age */
+ *p++ = (uint8_t)(ie_ptr->age);
+
+ /* Validate and encode dependency child count */
+ if(ie_ptr->fd_child_count > H5C__MDCI_MAX_FD_CHILDREN)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_child_count out of range")
+ UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_child_count));
+
+ /* Validate and encode dirty dependency child count */
+ if(ie_ptr->fd_dirty_child_count > H5C__MDCI_MAX_FD_CHILDREN)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_dirty_child_count out of range")
+ UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_dirty_child_count));
+
+ /* Validate and encode dependency parent count */
+ if(ie_ptr->fd_parent_count > H5C__MDCI_MAX_FD_PARENTS)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADRANGE, FAIL, "fd_parent_count out of range")
+ UINT16ENCODE(p, (uint16_t)(ie_ptr->fd_parent_count));
+
+ /* Encode index in LRU */
+ INT32ENCODE(p, ie_ptr->lru_rank);
+
+ /* Encode entry offset */
+ H5F_addr_encode(f, &p, ie_ptr->addr);
+
+ /* Encode entry length */
+ H5F_ENCODE_LENGTH(f, p, ie_ptr->size);
+
+ /* Verify expected length of entry image */
+ if((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Bad entry image len")
+
+ /* Encode dependency parent offsets -- if any */
+ for(u = 0; u < ie_ptr->fd_parent_count; u++)
+ H5F_addr_encode(f, &p, ie_ptr->fd_parent_addrs[u]);
+
+ /* Copy entry image */
+ HDmemcpy(p, ie_ptr->image_ptr, ie_ptr->size);
+ p += ie_ptr->size;
+
+ /* Update buffer pointer */
+ *buf = p;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__encode_cache_image_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__prep_for_file_close__compute_fd_heights
+ *
+ * Purpose: Recent modifications to flush dependency support in the
+ * metadata cache have removed the notion of flush dependency
+ * height. This is a problem for the cache image feature,
+ * as flush dependency height is used to order entries in the
+ * cache image so that flush dependency parents appear before
+ * flush dependency children. (Recall that the flush dependency
+ * height of an entry in a flush dependency relationship is the
+ * length of the longest path from the entry to a leaf entry --
+ * that is an entry with flush dependency parents, but no
+ * flush dependency children. With the introduction of the
+ * possibility of multiple flush dependency parents, we have
+ * a flush partial dependency latice, not a flush dependency
+ * tree. But since the partial latice is acyclic, the concept
+ * of flush dependency height still makes sense.
+ *
+ * The purpose of this function is to compute the flush
+ * dependency height of all entries that appear in the cache
+ * image.
+ *
+ * At present, entries are included or excluded from the
+ * cache image depending upon the ring in which they reside.
+ * Thus there is no chance that one side of a flush dependency
+ * will be in the cache image, and the other side not.
+ *
+ * However, once we start placing a limit on the size of the
+ * cache image, or start excluding prefetched entries from
+ * the cache image if they haven't been accessed in some
+ * number of file close / open cycles, this will no longer
+ * be the case.
+ *
+ * In particular, if a flush dependency child is dirty, and
+ * one of its flush dependency parents is dirty and not in
+ * the cache image, then the flush dependency child cannot
+ * be in the cache image without violating flush ordering.
+ *
+ * Observe that a clean flush dependency child can be either
+ * in or out of the cache image without effect on flush
+ * dependencies.
+ *
+ * Similarly, a flush dependency parent can always be part
+ * of a cache image, regardless of whether it is clean or
+ * dirty -- but remember that a flush dependency parent can
+ * also be a flush dependency child.
+ *
+ * Finally, note that for purposes of the cache image, flush
+ * dependency height ends when a flush dependecy relation
+ * passes off the cache image.
+ *
+ * On exit, the flush dependency height of each entry in the
+ * cache image should be calculated and stored in the cache
+ * entry. Entries will be removed from the cache image if
+ * necessary to maintain flush ordering.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 9/6/16
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__prep_for_file_close__compute_fd_heights(const H5C_t *cache_ptr)
+{
+ H5C_cache_entry_t * entry_ptr;
+ H5C_cache_entry_t * parent_ptr;
+ unsigned entries_removed_from_image = 0;
+ unsigned external_parent_fd_refs_removed = 0;
+ unsigned external_child_fd_refs_removed = 0;
+ hbool_t done = FALSE;
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ /* Remove from the cache image all dirty entries that are
+ * flush dependency children of dirty entries that are not in the
+ * cache image. Must do this, as if we fail to do so, the parent
+ * will be written to file before the child. Since it is possible
+ * that the child will have dirty children of its own, this may take
+ * multiple passes through the index list.
+ */
+ done = FALSE;
+ while(!done) {
+ done = TRUE;
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Should this entry be in the image */
+ if(entry_ptr->image_dirty && entry_ptr->include_in_image &&
+ (entry_ptr->fd_parent_count > 0)) {
+ HDassert(entry_ptr->flush_dep_parent != NULL);
+ for(u = 0; u < entry_ptr->flush_dep_nparents; u++ ) {
+ parent_ptr = entry_ptr->flush_dep_parent[u];
+
+ /* Sanity check parent */
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring == parent_ptr->ring);
+
+ if(parent_ptr->is_dirty && !parent_ptr->include_in_image &&
+ entry_ptr->include_in_image) {
+
+ /* Must remove child from image -- only do this once */
+ entries_removed_from_image++;
+ entry_ptr->include_in_image = FALSE;
+ } /* end if */
+ } /* for */
+ } /* end if */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* while ( entry_ptr != NULL ) */
+ } /* while ( ! done ) */
+
+ /* at present, entries are included in the cache image if they reside
+ * in a specified set of rings. Thus it should be impossible for
+ * entries_removed_from_image to be positive. Assert that this is
+ * so. Note that this will change when we start aging entries out
+ * of the cache image.
+ */
+ HDassert(entries_removed_from_image == 0);
+
+ /* Next, remove from entries in the cache image, references to
+ * flush dependency parents or children that are not in the cache image.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ if(!entry_ptr->include_in_image && entry_ptr->flush_dep_nparents > 0) {
+ HDassert(entry_ptr->flush_dep_parent != NULL);
+
+ for(u = 0; u < entry_ptr->flush_dep_nparents; u++ ) {
+ parent_ptr = entry_ptr->flush_dep_parent[u];
+
+ /* Sanity check parent */
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring == parent_ptr->ring);
+
+ if(parent_ptr->include_in_image) {
+ /* Must remove reference to child */
+ HDassert(parent_ptr->fd_child_count > 0);
+ parent_ptr->fd_child_count--;
+
+ if(entry_ptr->is_dirty) {
+ HDassert(parent_ptr->fd_dirty_child_count > 0);
+ parent_ptr->fd_dirty_child_count--;
+ } /* end if */
+
+ external_child_fd_refs_removed++;
+ } /* end if */
+ } /* for */
+ } /* end if */
+ else if(entry_ptr->include_in_image && entry_ptr->flush_dep_nparents > 0) {
+ /* Sanity checks */
+ HDassert(entry_ptr->flush_dep_parent != NULL);
+ HDassert(entry_ptr->flush_dep_nparents == entry_ptr->fd_parent_count);
+ HDassert(entry_ptr->fd_parent_addrs);
+
+ for(u = 0; u < entry_ptr->flush_dep_nparents; u++ ) {
+ parent_ptr = entry_ptr->flush_dep_parent[u];
+
+ /* Sanity check parent */
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring == parent_ptr->ring);
+
+ if(!parent_ptr->include_in_image) {
+ /* Must remove reference to parent */
+ HDassert(entry_ptr->fd_parent_count > 0);
+ parent_ptr->fd_child_count--;
+
+ HDassert(parent_ptr->addr == entry_ptr->fd_parent_addrs[u]);
+
+ entry_ptr->fd_parent_addrs[u] = HADDR_UNDEF;
+ external_parent_fd_refs_removed++;
+ } /* end if */
+ } /* for */
+
+ /* Touch up fd_parent_addrs array if necessary */
+ if(entry_ptr->fd_parent_count == 0) {
+ H5MM_xfree(entry_ptr->fd_parent_addrs);
+ entry_ptr->fd_parent_addrs = NULL;
+ } /* end if */
+ else if(entry_ptr->flush_dep_nparents > entry_ptr->fd_parent_count) {
+ haddr_t * old_fd_parent_addrs = entry_ptr->fd_parent_addrs;
+ unsigned v;
+
+ if(NULL == (entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_calloc(sizeof(haddr_t) * (size_t)(entry_ptr->fd_parent_addrs))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd parent addr array")
+
+ v = 0;
+ for(u = 0; u < entry_ptr->flush_dep_nparents; u++) {
+ if(old_fd_parent_addrs[u] != HADDR_UNDEF) {
+ entry_ptr->fd_parent_addrs[v] = old_fd_parent_addrs[u];
+ v++;
+ } /* end if */
+ } /* end for */
+
+ HDassert(v == entry_ptr->fd_parent_count);
+ } /* end else-if */
+ } /* end else-if */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* while (entry_ptr != NULL) */
+
+ /* At present, no extenal parent or child flush dependency links
+ * should exist -- hence the following assertions. This will change
+ * if we support ageout of entries in the cache image.
+ */
+ HDassert(external_child_fd_refs_removed == 0);
+ HDassert(external_parent_fd_refs_removed == 0);
+
+ /* At this point we should have removed all flush dependencies that
+ * cross cache image boundaries. Now compute the flush dependency
+ * heights for all entries in the image.
+ *
+ * Until I can think of a better way, do this via a depth first
+ * search implemented via a recursive function call.
+ *
+ * Note that entry_ptr->image_fd_height has already been initialized to 0
+ * for all entries that may appear in the cache image.
+ */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ if(entry_ptr->include_in_image && entry_ptr->fd_child_count == 0 &&
+ entry_ptr->fd_parent_count > 0) {
+ for(u = 0; u < entry_ptr->fd_parent_count; u++) {
+ parent_ptr = entry_ptr->flush_dep_parent[u];
+
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ if(parent_ptr->include_in_image && parent_ptr->image_fd_height <= 0)
+ H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, 1);
+ } /* end for */
+ } /* end if */
+
+ entry_ptr = entry_ptr->il_next;
+ } /* while (entry_ptr != NULL) */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__prep_for_file_close__compute_fd_heights() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__prep_for_file_close__compute_fd_heights_real
+ *
+ * Purpose: H5C__prep_for_file_close__compute_fd_heights() prepares
+ * for the computation of flush dependency heights of all
+ * entries in the cache image, this function actually does
+ * it.
+ *
+ * The basic observation behind this function is as follows:
+ *
+ * Suppose you have an entry E with a flush dependency
+ * height of X. Then the parents of E must all have
+ * flush dependency X + 1 or greater.
+ *
+ * Use this observation to compute flush dependency height
+ * of all entries in the cache image via the following
+ * recursive algorithm:
+ *
+ * 1) On entry, set the flush dependency height of the
+ * supplied cache entry to the supplied value.
+ *
+ * 2) Examine all the flush dependency parents of the
+ * supplied entry.
+ *
+ * If the parent is in the cache image, and has flush
+ * dependency height less than or equal to the flush
+ * dependency height of the current entry, call the
+ * recursive routine on the parent with flush dependency
+ * height equal to the flush dependency height of the
+ * child plus 1.
+ *
+ * Otherwise do nothing.
+ *
+ * Observe that if the flush dependency height of all entries
+ * in the image is initialized to zero, and if this recursive
+ * function is called with flush dependency height 0 on all
+ * entries in the cache image with FD parents in the image,
+ * but without FD children in the image, the correct flush
+ * dependency height should be set for all entries in the
+ * cache image.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/6/16
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+H5C__prep_for_file_close__compute_fd_heights_real(H5C_cache_entry_t *entry_ptr,
+ uint32_t fd_height)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->include_in_image);
+ HDassert((entry_ptr->image_fd_height == 0) || (entry_ptr->image_fd_height < fd_height));
+ HDassert(((fd_height == 0) && (entry_ptr->fd_child_count == 0)) || ((fd_height > 0) && (entry_ptr->fd_child_count > 0)));
+
+ entry_ptr->image_fd_height = fd_height;
+ if(entry_ptr->flush_dep_nparents > 0) {
+ unsigned u;
+
+ HDassert(entry_ptr->flush_dep_parent);
+ for(u = 0; u < entry_ptr->fd_parent_count; u++) {
+ H5C_cache_entry_t *parent_ptr;
+
+ parent_ptr = entry_ptr->flush_dep_parent[u];
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ if(parent_ptr->include_in_image && parent_ptr->image_fd_height <= fd_height)
+ H5C__prep_for_file_close__compute_fd_heights_real(parent_ptr, fd_height + 1);
+ } /* end for */
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* H5C__prep_for_file_close__compute_fd_heights_real() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__prep_for_file_close__setup_image_entries_array
+ *
+ * Purpose: Allocate space for the image_entries array, and load
+ * each instance of H5C_image_entry_t in the array with
+ * the data necessary to construct the metadata cache image.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/4/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__prep_for_file_close__setup_image_entries_array(H5C_t *cache_ptr)
+{
+ H5C_cache_entry_t * entry_ptr;
+ H5C_image_entry_t * image_entries = NULL;
+ uint32_t entries_visited = 0;
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->close_warning_received);
+ HDassert(cache_ptr->pl_len == 0);
+ HDassert(cache_ptr->num_entries_in_image > 0);
+ HDassert(cache_ptr->image_entries == NULL);
+
+ /* Allocate and initialize image_entries array */
+ if(NULL == (image_entries = (H5C_image_entry_t *)H5MM_calloc(sizeof(H5C_image_entry_t) * (size_t)(cache_ptr->num_entries_in_image + 1))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for image_entries")
+
+ /* Initialize (non-zero/NULL/FALSE) fields */
+ for(u = 0; u <= cache_ptr->num_entries_in_image; u++) {
+ image_entries[u].magic = H5C_IMAGE_ENTRY_T_MAGIC;
+ image_entries[u].addr = HADDR_UNDEF;
+ image_entries[u].ring = H5C_RING_UNDEFINED;
+ image_entries[u].type_id = -1;
+ } /* end for */
+
+ /* Scan each entry on the index list and populate the image_entries array */
+ u = 0;
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ if(entry_ptr->include_in_image) {
+ /* Since we have already serialized the cache, the following
+ * should hold.
+ */
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->image_ptr);
+ HDassert(entry_ptr->type);
+
+ image_entries[u].addr = entry_ptr->addr;
+ image_entries[u].size = entry_ptr->size;
+ image_entries[u].ring = entry_ptr->ring;
+
+ /* When a prefetched entry is included in the image, store
+ * its underlying type id in the image entry, not
+ * H5AC_PREFETCHED_ENTRY_ID. In passing, also increment
+ * the age (up to H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX).
+ */
+ if(entry_ptr->type->id == H5AC_PREFETCHED_ENTRY_ID) {
+ image_entries[u].type_id = entry_ptr->prefetch_type_id;
+ image_entries[u].age = entry_ptr->age + 1;
+
+ if(image_entries[u].age > H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX)
+ image_entries[u].age = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX;
+ } /* end if */
+ else {
+ image_entries[u].type_id = entry_ptr->type->id;
+ image_entries[u].age = 0;
+ } /* end else */
+
+ image_entries[u].lru_rank = entry_ptr->lru_rank;
+ image_entries[u].is_dirty = entry_ptr->is_dirty;
+ image_entries[u].image_fd_height = entry_ptr->image_fd_height;
+ image_entries[u].fd_parent_count = entry_ptr->fd_parent_count;
+ image_entries[u].fd_parent_addrs = entry_ptr->fd_parent_addrs;
+ image_entries[u].fd_child_count = entry_ptr->fd_child_count;
+ image_entries[u].fd_dirty_child_count =
+ entry_ptr->fd_dirty_child_count;
+ image_entries[u].image_ptr = entry_ptr->image_ptr;
+
+ /* Null out entry_ptr->fd_parent_addrs and set
+ * entry_ptr->fd_parent_count to zero so that ownership of the
+ * flush dependency parents address array is transferred to the
+ * image entry.
+ */
+ entry_ptr->fd_parent_count = 0;
+ entry_ptr->fd_parent_addrs = NULL;
+
+ u++;
+
+ HDassert(u <= cache_ptr->num_entries_in_image);
+ } /* end if */
+
+ entries_visited++;
+
+ entry_ptr = entry_ptr->il_next;
+ } /* end while */
+
+ /* Sanity checks */
+ HDassert(entries_visited == cache_ptr->index_len);
+ HDassert(u == cache_ptr->num_entries_in_image);
+
+ HDassert(image_entries[u].fd_parent_addrs == NULL);
+ HDassert(image_entries[u].image_ptr == NULL);
+
+ cache_ptr->image_entries = image_entries;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__prep_for_file_close__setup_image_entries_array() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__prep_for_file_close__scan_entries
+ *
+ * Purpose: Scan all entries in the metadata cache, and store all
+ * entry specific data required for construction of the
+ * metadata cache image block and likely to be discarded
+ * or modified during the cache flush on file close.
+ *
+ * In particular, make note of:
+ * entry rank in LRU
+ * whether the entry is dirty
+ * base address of entry flush dependency parent,
+ * if it exists.
+ * number of flush dependency children, if any.
+ *
+ * Also, determine which entries are to be included in the
+ * metadata cache image. At present, all entries other than
+ * the superblock, the superblock extension object header and
+ * its associated chunks (if any) are included.
+ *
+ * Finally, compute the size of the metadata cache image
+ * block.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 7/21/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__prep_for_file_close__scan_entries(const H5F_t *f, H5C_t *cache_ptr)
+{
+ H5C_cache_entry_t * entry_ptr;
+ hbool_t include_in_image;
+ unsigned entries_visited = 0;
+ int lru_rank = 1;
+ uint32_t num_entries_tentatively_in_image = 0;
+ uint32_t num_entries_in_image = 0;
+ size_t image_len;
+ size_t entry_header_len;
+ size_t fd_parents_list_len;
+ int i;
+ unsigned j;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->sblock);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->close_warning_received);
+ HDassert(cache_ptr->pl_len == 0);
+
+ /* Initialize image len to the size of the metadata cache image block
+ * header.
+ */
+ image_len = H5C__cache_image_block_header_size(f);
+ entry_header_len = H5C__cache_image_block_entry_header_size(f);
+
+ /* Scan each entry on the index list */
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ /* Since we have already serialized the cache, the following
+ * should hold.
+ */
+ HDassert(entry_ptr->image_up_to_date);
+ HDassert(entry_ptr->image_ptr);
+
+ /* Initially, we mark all entries in the rings included
+ * in the cache image as being included in the in the
+ * image. Depending on circumstances, we may exclude some
+ * of these entries later.
+ */
+ if(entry_ptr->ring > H5C_MAX_RING_IN_IMAGE)
+ include_in_image = FALSE;
+ else
+ include_in_image = TRUE;
+ entry_ptr->include_in_image = include_in_image;
+
+ if(include_in_image) {
+ entry_ptr->lru_rank = -1;
+ entry_ptr->image_dirty = entry_ptr->is_dirty;
+ entry_ptr->image_fd_height = 0; /* will compute this later */
+
+ /* Initially, include all flush dependency parents in the
+ * the list of flush dependencies to be stored in the
+ * image. We may remove some or all of these later.
+ */
+ if(entry_ptr->flush_dep_nparents > 0) {
+ /* The parents addresses array may already exist -- reallocate
+ * as needed.
+ */
+ if(entry_ptr->flush_dep_nparents == entry_ptr->fd_parent_count ) {
+ /* parent addresses array should already be allocated
+ * and of the correct size.
+ */
+ HDassert(entry_ptr->fd_parent_addrs);
+ } /* end if */
+ else if(entry_ptr->fd_parent_count > 0) {
+ HDassert(entry_ptr->fd_parent_addrs);
+ entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(entry_ptr->fd_parent_addrs);
+ } /* end else-if */
+ else {
+ HDassert(entry_ptr->fd_parent_count == 0);
+ HDassert(entry_ptr->fd_parent_addrs == NULL);
+ } /* end else */
+
+ entry_ptr->fd_parent_count = entry_ptr->flush_dep_nparents;
+ if(NULL == entry_ptr->fd_parent_addrs)
+ if(NULL == (entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_malloc(sizeof(haddr_t) * (size_t)(entry_ptr->fd_parent_count))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, FAIL, "memory allocation failed for fd parent addrs buffer")
+
+ for(i = 0; i < (int)(entry_ptr->fd_parent_count); i++) {
+ entry_ptr->fd_parent_addrs[i] = entry_ptr->flush_dep_parent[i]->addr;
+ HDassert(H5F_addr_defined(entry_ptr->fd_parent_addrs[i]));
+ } /* end for */
+ } /* end if */
+ else if(entry_ptr->fd_parent_count > 0) {
+ HDassert(entry_ptr->fd_parent_addrs);
+ entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree(entry_ptr->fd_parent_addrs);
+ } /* end else-if */
+ else
+ HDassert(entry_ptr->fd_parent_addrs == NULL);
+
+ /* Initially, all flush dependency children are included int
+ * the count of flush dependency child relationships to be
+ * represented in the cache image. Some or all of these
+ * may be dropped from the image later.
+ */
+ if(entry_ptr->flush_dep_nchildren > 0) {
+ if(!entry_ptr->is_pinned)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "encountered unpinned fd parent?!?")
+
+ entry_ptr->fd_child_count = entry_ptr->flush_dep_nchildren;
+ entry_ptr->fd_dirty_child_count = entry_ptr->flush_dep_ndirty_children;
+ } /* end if */
+
+ num_entries_tentatively_in_image++;
+ } /* end if */
+
+ entries_visited++;
+ entry_ptr = entry_ptr->il_next;
+ } /* end while */
+ HDassert(entries_visited == cache_ptr->index_len);
+
+ /* Now compute the flush dependency heights of all flush dependency
+ * relationships to be represented in the image.
+ *
+ * If all entries in the target rings are included in the
+ * image, the flush dependency heights are simply the heights
+ * of all flush dependencies in the target rings.
+ *
+ * However, if we restrict appearance in the cache image either
+ * by number of entries in the image, restrictions on the number
+ * of times a prefetched entry can appear in an image, or image
+ * size, it is possible that flush dependency parents or children
+ * of entries that are in the image may not be included in the
+ * the image. In this case, we must prune all flush dependency
+ * relationships that cross the image boundary, and all exclude
+ * from the image all dirty flush dependency children that have
+ * a dirty flush dependency parent that is not in the image.
+ * This is necessary to preserve the required flush ordering.
+ *
+ * These details are tended to by the following call to
+ * H5C__prep_for_file_close__compute_fd_heights(). Because the
+ * exact contents of the image cannot be known until after this
+ * call, computation of the image size is delayed.
+ */
+ if(H5C__prep_for_file_close__compute_fd_heights(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "computation of flush dependency heights failed?!?")
+
+ /* At this point, all entries that will appear in the cache
+ * image should be marked correctly. Compute the size of the
+ * cache image.
+ */
+ entries_visited = 0;
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+
+ if(entry_ptr->include_in_image) {
+ if(entry_ptr->fd_parent_count > 0)
+ fd_parents_list_len = (size_t)(H5F_SIZEOF_ADDR(f) * entry_ptr->fd_parent_count);
+ else
+ fd_parents_list_len = (size_t)0;
+
+ image_len += entry_header_len + fd_parents_list_len + entry_ptr->size;
+ num_entries_in_image++;
+ } /* end if */
+
+ entries_visited++;
+ entry_ptr = entry_ptr->il_next;
+ } /* end while */
+ HDassert(entries_visited == cache_ptr->index_len);
+ HDassert(num_entries_in_image <= num_entries_tentatively_in_image);
+
+ j = 0;
+ for(i = H5C_MAX_RING_IN_IMAGE + 1; i <= H5C_RING_SB; i++)
+ j += cache_ptr->index_ring_len[i];
+
+ /* This will change */
+ HDassert(entries_visited == (num_entries_tentatively_in_image + j));
+
+ cache_ptr->num_entries_in_image = num_entries_in_image;
+ entries_visited = 0;
+
+ /* Now scan the LRU list to set the lru_rank fields of all entries
+ * on the LRU.
+ *
+ * Note that we start with rank 1, and increment by 1 with each
+ * entry on the LRU.
+ *
+ * Note that manually pinned entryies will have lru_rank -1,
+ * and no flush dependency. Putting these entries at the head of
+ * the reconstructed LRU should be appropriate.
+ */
+ entry_ptr = cache_ptr->LRU_head_ptr;
+ while(entry_ptr != NULL) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->type != NULL);
+
+ /* to avoid confusion, don't set lru_rank on epoch markers.
+ * Note that we still increment the lru_rank, so that the holes
+ * in the sequence of entries on the LRU will indicate the
+ * locations of epoch markers (if any) when we reconstruct
+ * the LRU.
+ *
+ * Do not set lru_rank or increment lru_rank for entries
+ * that will not be included in the cache image.
+ */
+ if(entry_ptr->type->id == H5AC_EPOCH_MARKER_ID)
+ lru_rank++;
+ else if(entry_ptr->include_in_image) {
+ entry_ptr->lru_rank = lru_rank;
+ lru_rank++;
+ } /* end else-if */
+
+ entries_visited++;
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+ HDassert(entries_visited == cache_ptr->LRU_list_len);
+
+ image_len += H5F_SIZEOF_CHKSUM;
+ cache_ptr->image_data_len = image_len;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__prep_for_file_close__scan_entries() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__reconstruct_cache_contents()
+ *
+ * Purpose: Scan the image buffer, and create a prefetched
+ * cache entry for every entry in the buffer. Insert the
+ * prefetched entries in the index and the LRU, and
+ * reconstruct any flush dependencies. Order the entries
+ * in the LRU as indicated by the stored lru_ranks.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: John Mainzer
+ * 8/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__reconstruct_cache_contents(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr)
+{
+ H5C_cache_entry_t * pf_entry_ptr; /* Pointer to prefetched entry */
+ H5C_cache_entry_t * parent_ptr; /* Pointer to parent of prefetched entry */
+ const uint8_t * p; /* Pointer into image buffer */
+ unsigned u, v; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(cache_ptr == f->shared->cache);
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->image_buffer);
+ HDassert(cache_ptr->image_len > 0);
+
+ /* Decode metadata cache image header */
+ p = (uint8_t *)cache_ptr->image_buffer;
+ if(H5C__decode_cache_image_header(f, cache_ptr, &p) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDECODE, FAIL, "cache image header decode failed")
+ HDassert((size_t)(p - (uint8_t *)cache_ptr->image_buffer) < cache_ptr->image_len);
+
+ /* The image_data_len and # of entries should be defined now */
+ HDassert(cache_ptr->image_data_len > 0);
+ HDassert(cache_ptr->image_data_len <= cache_ptr->image_len);
+ HDassert(cache_ptr->num_entries_in_image > 0);
+
+ /* Reconstruct entries in image */
+ for(u = 0; u < cache_ptr->num_entries_in_image; u++) {
+ /* Create the prefetched entry described by the ith
+ * entry in cache_ptr->image_entrise.
+ */
+ if(NULL == (pf_entry_ptr = H5C__reconstruct_cache_entry(f, cache_ptr, &p)))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "reconstruction of cache entry failed")
+
+ /* Note that we make no checks on available cache space before
+ * inserting the reconstructed entry into the metadata cache.
+ *
+ * This is OK since the cache must be almost empty at the beginning
+ * of the process, and since we check cache size at the end of the
+ * reconstruction process.
+ */
+
+ /* Insert the prefetched entry in the index */
+ H5C__INSERT_IN_INDEX(cache_ptr, pf_entry_ptr, FAIL)
+
+ /* If dirty, insert the entry into the slist. */
+ if(pf_entry_ptr->is_dirty)
+ H5C__INSERT_ENTRY_IN_SLIST(cache_ptr, pf_entry_ptr, FAIL)
+
+ /* Append the entry to the LRU */
+ H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, pf_entry_ptr, FAIL)
+
+ H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, pf_entry_ptr->is_dirty)
+
+ /* If the prefetched entry is the child in one or more flush
+ * dependency relationships, recreate those flush dependencies.
+ */
+ for(v = 0; v < pf_entry_ptr->fd_parent_count; v++) {
+ /* Sanity checks */
+ HDassert(pf_entry_ptr->fd_parent_addrs);
+ HDassert(H5F_addr_defined(pf_entry_ptr->fd_parent_addrs[v]));
+
+ /* Find the parent entry */
+ parent_ptr = NULL;
+ H5C__SEARCH_INDEX(cache_ptr, pf_entry_ptr->fd_parent_addrs[v], parent_ptr, FAIL)
+ if(parent_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "fd parent not in cache?!?")
+
+ /* Sanity checks */
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(parent_ptr->addr == pf_entry_ptr->fd_parent_addrs[v]);
+ HDassert(parent_ptr->lru_rank == -1);
+
+ /* Must protect parent entry to set up a flush dependency.
+ * Do this now, and then uprotect when done.
+ */
+ H5C__UPDATE_RP_FOR_PROTECT(cache_ptr, parent_ptr, FAIL)
+ parent_ptr->is_protected = TRUE;
+
+ /* Setup the flush dependency */
+ if(H5C_create_flush_dependency(parent_ptr, pf_entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTDEPEND, FAIL, "Can't restore flush dependency")
+
+ /* And now unprotect */
+ H5C__UPDATE_RP_FOR_UNPROTECT(cache_ptr, parent_ptr, FAIL)
+ parent_ptr->is_protected = FALSE;
+ } /* end for */
+ } /* end for */
+
+#ifndef NDEBUG
+ /* Scan the cache entries, and verify that each entry has
+ * the expected flush dependency status.
+ */
+ pf_entry_ptr = cache_ptr->il_head;
+ while(pf_entry_ptr != NULL) {
+ HDassert(pf_entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert((pf_entry_ptr->prefetched && pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY)
+ || (!pf_entry_ptr->prefetched && pf_entry_ptr->type != H5AC_PREFETCHED_ENTRY));
+ if(pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY)
+ HDassert(pf_entry_ptr->fd_parent_count == pf_entry_ptr->flush_dep_nparents);
+
+ for(v = 0; v < pf_entry_ptr->fd_parent_count; v++) {
+ parent_ptr = pf_entry_ptr->flush_dep_parent[v];
+ HDassert(parent_ptr);
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(pf_entry_ptr->fd_parent_addrs);
+ HDassert(pf_entry_ptr->fd_parent_addrs[v] == parent_ptr->addr);
+ HDassert(parent_ptr->flush_dep_nchildren > 0);
+ } /* end for */
+
+ if(pf_entry_ptr->type == H5AC_PREFETCHED_ENTRY) {
+ HDassert(pf_entry_ptr->fd_child_count == pf_entry_ptr->flush_dep_nchildren);
+ HDassert(pf_entry_ptr->fd_dirty_child_count == pf_entry_ptr->flush_dep_ndirty_children);
+ } /* end if */
+
+ pf_entry_ptr = pf_entry_ptr->il_next;
+ } /* end while */
+
+ /* Scan the LRU, and verify the expected ordering of the
+ * prefetched entries.
+ */
+ {
+ int lru_rank_holes = 0;
+ H5C_cache_entry_t *entry_ptr;
+ int i; /* Local index variable */
+
+ i = -1;
+ entry_ptr = cache_ptr->LRU_head_ptr;
+
+ while(entry_ptr != NULL) {
+
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->type != NULL);
+
+ if ( entry_ptr->prefetched ) {
+
+ HDassert(entry_ptr->lru_rank != 0);
+ HDassert((entry_ptr->lru_rank == -1) ||
+ (entry_ptr->lru_rank > i));
+
+ if ( ( entry_ptr->lru_rank > 1 ) &&
+ ( entry_ptr->lru_rank > i + 1 ) )
+
+ lru_rank_holes += entry_ptr->lru_rank - (i + 1);
+
+ i = entry_ptr->lru_rank;
+
+ } /* end if */
+
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+
+ /* Holes in the sequences of LRU ranks can appear due to epoch
+ * markers. They are left in to allow re-insertion of the
+ * epoch markers on reconstruction of the cache -- thus
+ * the following sanity check will have to be revised when
+ * we add code to store and restore adaptive resize status.
+ */
+ HDassert(lru_rank_holes <= H5C__MAX_EPOCH_MARKERS);
+ } /* end block */
+#endif /* NDEBUG */
+
+ /* Check to see if the cache is oversize, and evict entries as
+ * necessary to remain within limits.
+ */
+ if(cache_ptr->index_size >= cache_ptr->max_cache_size) {
+ /* cache is oversized -- call H5C__make_space_in_cache() with zero
+ * space needed to repair the situation if possible.
+ */
+ hbool_t write_permitted = FALSE;
+
+ if(cache_ptr->check_write_permitted != NULL) {
+ if((cache_ptr->check_write_permitted)(f, &write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "Can't get write_permitted")
+ } /* end if */
+ else
+ write_permitted = cache_ptr->write_permitted;
+
+ if(H5C__make_space_in_cache(f, dxpl_id, 0, write_permitted) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTPROTECT, FAIL, "H5C__make_space_in_cache failed")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__reconstruct_cache_contents() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__reconstruct_cache_entry()
+ *
+ * Purpose: Allocate a prefetched metadata cache entry and initialize
+ * it from image buffer.
+ *
+ * Return a pointer to the newly allocated cache entry,
+ * or NULL on failure.
+ *
+ * Return: Pointer to the new instance of H5C_cache_entry on success,
+ * or NULL on failure.
+ *
+ * Programmer: John Mainzer
+ * 8/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static H5C_cache_entry_t *
+H5C__reconstruct_cache_entry(const H5F_t *f, H5C_t *cache_ptr,
+ const uint8_t **buf)
+{
+ H5C_cache_entry_t *pf_entry_ptr = NULL; /* Reconstructed cache entry */
+ uint8_t flags = 0;
+ hbool_t is_dirty = FALSE;
+#ifndef NDEBUG /* only used in assertions */
+ hbool_t in_lru = FALSE;
+ hbool_t is_fd_parent = FALSE;
+ hbool_t is_fd_child = FALSE;
+#endif /* NDEBUG */ /* only used in assertions */
+ const uint8_t * p;
+ hbool_t file_is_rw;
+ H5C_cache_entry_t *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->num_entries_in_image > 0);
+ HDassert(buf && *buf);
+
+ /* Key R/W access off of whether the image will be deleted */
+ file_is_rw = cache_ptr->delete_image;
+
+ /* Allocate space for the prefetched cache entry */
+ if(NULL == (pf_entry_ptr = H5FL_CALLOC(H5C_cache_entry_t)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for prefetched cache entry")
+
+ /* Get pointer to buffer */
+ p = *buf;
+
+ /* Decode type id */
+ pf_entry_ptr->prefetch_type_id = *p++;
+
+ /* Decode flags */
+ flags = *p++;
+ if(flags & H5C__MDCI_ENTRY_DIRTY_FLAG)
+ is_dirty = TRUE;
+#ifndef NDEBUG /* only used in assertions */
+ if(flags & H5C__MDCI_ENTRY_IN_LRU_FLAG)
+ in_lru = TRUE;
+ if(flags & H5C__MDCI_ENTRY_IS_FD_PARENT_FLAG)
+ is_fd_parent = TRUE;
+ if(flags & H5C__MDCI_ENTRY_IS_FD_CHILD_FLAG)
+ is_fd_child = TRUE;
+#endif /* NDEBUG */ /* only used in assertions */
+
+ /* Force dirty entries to clean if the file read only -- must do
+ * this as otherwise the cache will attempt to write them on file
+ * close. Since the file is R/O, the metadata cache image superblock
+ * extension message and the cache image block will not be removed.
+ * Hence no danger in this for subsequent opens.
+ *
+ * However, if the dirty entry (marked clean for purposes of the R/O
+ * file open) is evicted and then referred to, the cache will read
+ * either invalid or obsolete data from the file. Handle this by
+ * setting the prefetched_dirty field, and hiding such entries from
+ * the eviction candidate selection algorithm.
+ */
+ pf_entry_ptr->is_dirty = (is_dirty && file_is_rw);
+
+ /* Decode ring */
+ pf_entry_ptr->ring = *p++;
+ HDassert(pf_entry_ptr->ring > (uint8_t)(H5C_RING_UNDEFINED));
+ HDassert(pf_entry_ptr->ring < (uint8_t)(H5C_RING_NTYPES));
+
+ /* Decode age */
+ pf_entry_ptr->age = *p++;
+
+ /* Decode dependency child count */
+ UINT16DECODE(p, pf_entry_ptr->fd_child_count);
+ HDassert((is_fd_parent && pf_entry_ptr->fd_child_count > 0) || (!is_fd_parent && pf_entry_ptr->fd_child_count == 0));
+
+ /* Decode dirty dependency child count */
+ UINT16DECODE(p, pf_entry_ptr->fd_dirty_child_count);
+ if(!file_is_rw)
+ pf_entry_ptr->fd_dirty_child_count = 0;
+ if(pf_entry_ptr->fd_dirty_child_count > pf_entry_ptr->fd_child_count)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid dirty flush dependency child count")
+
+ /* Decode dependency parent count */
+ UINT16DECODE(p, pf_entry_ptr->fd_parent_count);
+ HDassert((is_fd_child && pf_entry_ptr->fd_parent_count > 0) || (!is_fd_child && pf_entry_ptr->fd_parent_count == 0));
+
+ /* Decode index in LRU */
+ INT32DECODE(p, pf_entry_ptr->lru_rank);
+ HDassert((in_lru && pf_entry_ptr->lru_rank >= 0) || (!in_lru && pf_entry_ptr->lru_rank == -1));
+
+ /* Decode entry offset */
+ H5F_addr_decode(f, &p, &pf_entry_ptr->addr);
+ if(!H5F_addr_defined(pf_entry_ptr->addr))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry offset")
+
+ /* Decode entry length */
+ H5F_DECODE_LENGTH(f, p, pf_entry_ptr->size);
+ if(pf_entry_ptr->size == 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid entry size")
+
+ /* Verify expected length of entry image */
+ if((size_t)(p - *buf) != H5C__cache_image_block_entry_header_size(f))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADSIZE, NULL, "Bad entry image len")
+
+ /* If parent count greater than zero, allocate array for parent
+ * addresses, and decode addresses into the array.
+ */
+ if(pf_entry_ptr->fd_parent_count > 0) {
+ unsigned u; /* Local index variable */
+
+ if(NULL == (pf_entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_malloc((size_t)(pf_entry_ptr->fd_parent_count) * H5F_SIZEOF_ADDR(f))))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for fd parent addrs buffer")
+
+ for(u = 0; u < pf_entry_ptr->fd_parent_count; u++) {
+ H5F_addr_decode(f, &p, &(pf_entry_ptr->fd_parent_addrs[u]));
+ if(!H5F_addr_defined(pf_entry_ptr->fd_parent_addrs[u]))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, NULL, "invalid flush dependency parent offset")
+ } /* end for */
+ } /* end if */
+
+ /* Allocate buffer for entry image */
+ if(NULL == (pf_entry_ptr->image_ptr = H5MM_malloc(pf_entry_ptr->size + H5C_IMAGE_EXTRA_SPACE)))
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTALLOC, NULL, "memory allocation failed for on disk image buffer")
+#if H5C_DO_MEMORY_SANITY_CHECKS
+ HDmemcpy(((uint8_t *)pf_entry_ptr->image_ptr) + size, H5C_IMAGE_SANITY_VALUE, H5C_IMAGE_EXTRA_SPACE);
+#endif /* H5C_DO_MEMORY_SANITY_CHECKS */
+
+ /* Copy the entry image from the cache image block */
+ HDmemcpy(pf_entry_ptr->image_ptr, p, pf_entry_ptr->size);
+ p += pf_entry_ptr->size;
+
+ /* Initialize the rest of the fields in the prefetched entry */
+ /* (Only need to set non-zero/NULL/FALSE fields, due to calloc() above) */
+ pf_entry_ptr->magic = H5C__H5C_CACHE_ENTRY_T_MAGIC;
+ pf_entry_ptr->cache_ptr = cache_ptr;
+ pf_entry_ptr->image_up_to_date = TRUE;
+ pf_entry_ptr->type = H5AC_PREFETCHED_ENTRY;
+ pf_entry_ptr->prefetched = TRUE;
+ pf_entry_ptr->prefetched_dirty = is_dirty && (!file_is_rw);
+
+ /* Sanity checks */
+ HDassert(pf_entry_ptr->size > 0 && pf_entry_ptr->size < H5C_MAX_ENTRY_SIZE);
+
+ /* Update buffer pointer */
+ *buf = p;
+
+ ret_value = pf_entry_ptr;
+
+done:
+ if(NULL == ret_value && pf_entry_ptr)
+ pf_entry_ptr = H5FL_FREE(H5C_cache_entry_t, pf_entry_ptr);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__reconstruct_cache_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__write_cache_image_superblock_msg
+ *
+ * Purpose: Write the cache image superblock extension message,
+ * creating if specified.
+ *
+ * In general, the size and location of the cache image block
+ * will be unknow at the time that the cache image superblock
+ * message is created. A subsequent call to this routine will
+ * be used to write the correct data.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer, 7/4/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__write_cache_image_superblock_msg(H5F_t *f, hid_t dxpl_id, hbool_t create)
+{
+ H5C_t * cache_ptr;
+ H5O_mdci_t mdci_msg; /* metadata cache image message */
+ /* to insert in the superblock */
+ /* extension. */
+ unsigned mesg_flags = H5O_MSG_FLAG_FAIL_IF_UNKNOWN_ALWAYS;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->cache);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->close_warning_received);
+
+ /* Write data into the metadata cache image superblock extension message.
+ * Note that this data will be bogus when we first create the message.
+ * We will overwrite this data later in a second call to this function.
+ */
+ mdci_msg.addr = cache_ptr->image_addr;
+#ifdef H5_HAVE_PARALLEL
+ if(cache_ptr->aux_ptr) { /* we have multiple processes */
+ H5AC_aux_t * aux_ptr;
+
+ aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
+ HDassert(aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC);
+ mdci_msg.size = aux_ptr->p0_image_len;
+ } /* end if */
+ else
+#endif /* H5_HAVE_PARALLEL */
+ mdci_msg.size = cache_ptr->image_len;
+
+ /* Write metadata cache image message to superblock extension */
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_MDCI_MSG_ID, &mdci_msg, create, mesg_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "can't write metadata cache image message to superblock extension")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__write_cache_image_superblock_msg() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__write_cache_image
+ *
+ * Purpose: Write the supplied metadata cache image to the specified
+ * location in file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 8/26/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__write_cache_image(H5F_t *f, hid_t dxpl_id, const H5C_t *cache_ptr)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(cache_ptr);
+ HDassert(H5F_addr_defined(cache_ptr->image_addr));
+ HDassert(cache_ptr->image_len > 0);
+ HDassert(cache_ptr->image_buffer);
+
+#ifdef H5_HAVE_PARALLEL
+{
+ H5AC_aux_t *aux_ptr = (H5AC_aux_t *)cache_ptr->aux_ptr;
+
+ if((NULL == aux_ptr) || (aux_ptr->mpi_rank == 0)) {
+ HDassert((NULL == aux_ptr) || (aux_ptr->magic == H5AC__H5AC_AUX_T_MAGIC));
+#endif /* H5_HAVE_PARALLEL */
+
+ /* Write the buffer (if serial access, or rank 0 for parallel access) */
+ if(H5F_block_write(f, H5FD_MEM_SUPER, cache_ptr->image_addr, cache_ptr->image_len, dxpl_id, cache_ptr->image_buffer) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't write metadata cache image block to file")
+#ifdef H5_HAVE_PARALLEL
+ } /* end if */
+} /* end block */
+#endif /* H5_HAVE_PARALLEL */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__write_cache_image() */
+
diff --git a/src/H5Clog.c b/src/H5Clog.c
index e3e4388..3353619 100644
--- a/src/H5Clog.c
+++ b/src/H5Clog.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Cmodule.h b/src/H5Cmodule.h
index 2c39eab..534404d 100644
--- a/src/H5Cmodule.h
+++ b/src/H5Cmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Cmpio.c b/src/H5Cmpio.c
index ab94879..a75cd88 100644
--- a/src/H5Cmpio.c
+++ b/src/H5Cmpio.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -65,6 +63,11 @@
/* Local Prototypes */
/********************/
static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id);
+static herr_t H5C__flush_candidate_entries(H5F_t *f, hid_t dxpl_id,
+ unsigned entries_to_flush[H5C_RING_NTYPES],
+ unsigned entries_to_clear[H5C_RING_NTYPES]);
+static herr_t H5C__flush_candidates_in_ring(H5F_t *f, hid_t dxpl_id,
+ H5C_ring_t ring, unsigned entries_to_flush, unsigned entries_to_clear);
/*********************/
@@ -164,96 +167,63 @@ static herr_t H5C__collective_write(H5F_t *f, hid_t dxpl_id);
* Programmer: John Mainzer
* 3/17/10
*
- * Changes: Ported code to detect next entry status changes as the
- * the result of a flush from the serial code in the scan of
- * the LRU. Also added code to detect and adapt to the
- * removal from the cache of the next entry in the scan of
- * the LRU.
- *
- * Note that at present, all of these changes should not
- * be required as the operations on entries as they are
- * flushed that can cause these condiditions are not premitted
- * in the parallel case. However, Quincey indicates that
- * this may change, and thus has requested the modification.
- *
- * Note the assert(FALSE) in the if statement whose body
- * restarts the scan of the LRU. As the body of the if
- * statement should be unreachable, it should never be
- * triggered until the constraints on the parallel case
- * are relaxed. Please remove the assertion at that time.
- *
- * Also added warning on the Pinned Entry List scan, as it
- * is potentially subject to the same issue. As there is
- * no cognate of this scan in the serial code, I don't have
- * a fix to port to it.
- *
- * JRM -- 4/10/19
- *
*-------------------------------------------------------------------------
*/
herr_t
H5C_apply_candidate_list(H5F_t * f,
hid_t dxpl_id,
H5C_t * cache_ptr,
- int num_candidates,
+ unsigned num_candidates,
haddr_t * candidates_list_ptr,
int mpi_rank,
int mpi_size)
{
- hbool_t restart_scan;
- hbool_t prev_is_dirty;
int i;
int m;
int n;
- int first_entry_to_flush;
- int last_entry_to_flush;
- int entries_to_clear = 0;
- int entries_to_flush = 0;
- int entries_to_flush_or_clear_last = 0;
- int entries_to_flush_collectively = 0;
- int entries_cleared = 0;
- int entries_flushed = 0;
- int entries_delayed = 0;
- int entries_flushed_or_cleared_last = 0;
- int entries_flushed_collectively = 0;
- int entries_examined = 0;
- int initial_list_len;
+ unsigned first_entry_to_flush;
+ unsigned last_entry_to_flush;
+ unsigned total_entries_to_clear = 0;
+ unsigned total_entries_to_flush = 0;
int * candidate_assignment_table = NULL;
+ unsigned entries_to_flush[H5C_RING_NTYPES];
+ unsigned entries_to_clear[H5C_RING_NTYPES];
haddr_t addr;
- H5C_cache_entry_t * clear_ptr = NULL;
- H5C_cache_entry_t * next_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
- H5C_cache_entry_t * flush_ptr = NULL;
- H5C_cache_entry_t * delayed_ptr = NULL;
#if H5C_DO_SANITY_CHECKS
haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
char tbl_buf[1024];
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- herr_t ret_value = SUCCEED; /* Return value */
+ unsigned u; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
- HDassert( cache_ptr != NULL );
- HDassert( cache_ptr->magic == H5C__H5C_T_MAGIC );
- HDassert( num_candidates > 0 );
- HDassert( num_candidates <= cache_ptr->slist_len );
- HDassert( candidates_list_ptr != NULL );
- HDassert( 0 <= mpi_rank );
- HDassert( mpi_rank < mpi_size );
+ /* Sanity checks */
+ HDassert(cache_ptr != NULL);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(num_candidates > 0);
+ HDassert(num_candidates <= cache_ptr->slist_len);
+ HDassert(candidates_list_ptr != NULL);
+ HDassert(0 <= mpi_rank);
+ HDassert(mpi_rank < mpi_size);
+
+ /* Initialize the entries_to_flush and entries_to_clear arrays */
+ HDmemset(entries_to_flush, 0, sizeof(entries_to_flush));
+ HDmemset(entries_to_clear, 0, sizeof(entries_to_clear));
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: setting up candidate assignment table.\n",
- FUNC, mpi_rank);
- for ( i = 0; i < 1024; i++ ) tbl_buf[i] = '\0';
+ HDfprintf(stdout, "%s:%d: setting up candidate assignment table.\n", FUNC, mpi_rank);
+
+ HDmemset(tbl_buf, 0, sizeof(tbl_buf));
+
sprintf(&(tbl_buf[0]), "candidate list = ");
- for ( i = 0; i < num_candidates; i++ )
- {
- sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), " 0x%llx",
- (long long)(*(candidates_list_ptr + i)));
- }
+ for(u = 0; u < num_candidates; u++)
+ sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), " 0x%llx", (long long)(*(candidates_list_ptr + u)));
sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n");
+
HDfprintf(stdout, "%s", tbl_buf);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
@@ -269,7 +239,6 @@ H5C_apply_candidate_list(H5F_t * f,
n = num_candidates / mpi_size;
m = num_candidates % mpi_size;
HDassert(n >= 0);
-
if(NULL == (candidate_assignment_table = (int *)H5MM_malloc(sizeof(int) * (size_t)(mpi_size + 1))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed for candidate assignment table")
@@ -297,9 +266,8 @@ H5C_apply_candidate_list(H5F_t * f,
HDassert((candidate_assignment_table[mpi_size - 1] + n) == num_candidates);
#if H5C_DO_SANITY_CHECKS
- /* verify that the candidate assignment table has the expected form */
- for ( i = 1; i < mpi_size - 1; i++ )
- {
+ /* Verify that the candidate assignment table has the expected form */
+ for(i = 1; i < mpi_size - 1; i++) {
int a, b;
a = candidate_assignment_table[i] - candidate_assignment_table[i - 1];
@@ -323,433 +291,104 @@ H5C_apply_candidate_list(H5F_t * f,
sprintf(&(tbl_buf[HDstrlen(tbl_buf)]), "\n");
HDfprintf(stdout, "%s", tbl_buf);
- HDfprintf(stdout, "%s:%d: flush entries [%d, %d].\n",
+ HDfprintf(stdout, "%s:%d: flush entries [%u, %u].\n",
FUNC, mpi_rank, first_entry_to_flush, last_entry_to_flush);
HDfprintf(stdout, "%s:%d: marking entries.\n", FUNC, mpi_rank);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
- for(i = 0; i < num_candidates; i++) {
- addr = candidates_list_ptr[i];
- HDassert( H5F_addr_defined(addr) );
+ for(u = 0; u < num_candidates; u++) {
+ addr = candidates_list_ptr[u];
+ HDassert(H5F_addr_defined(addr));
#if H5C_DO_SANITY_CHECKS
- if ( i > 0 ) {
- if ( last_addr == addr ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Duplicate entry in cleaned list.\n")
- } else if ( last_addr > addr ) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "candidate list not sorted.\n")
- }
- }
+ if(u > 0) {
+ if(last_addr == addr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "duplicate entry in cleaned list")
+ else if(last_addr > addr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "candidate list not sorted")
+ } /* end if */
last_addr = addr;
#endif /* H5C_DO_SANITY_CHECKS */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- if(entry_ptr == NULL) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed candidate entry not in cache?!?!?.")
- } else if(!entry_ptr->is_dirty) {
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?.")
- } else if ( entry_ptr->is_protected ) {
+ if(entry_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "listed candidate entry not in cache?!?!?")
+ if(!entry_ptr->is_dirty)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?")
+ if(entry_ptr->is_protected)
/* For now at least, we can't deal with protected entries.
* If we encounter one, scream and die. If it becomes an
* issue, we should be able to work around this.
*/
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?.")
- } else {
- /* determine whether the entry is to be cleared or flushed,
- * and mark it accordingly. We will scan the protected and
- * pinned list shortly, and clear or flush according to these
- * markings.
- */
- if((i >= first_entry_to_flush) && (i <= last_entry_to_flush)) {
- entries_to_flush++;
- entry_ptr->flush_immediately = TRUE;
- } /* end if */
- else {
- entries_to_clear++;
- entry_ptr->clear_on_unprotect = TRUE;
- } /* end else */
-
- /* Entries marked as collectively accessed and are in the
- candidate list to clear from the cache have to be
- removed from the coll list. This is OK since the
- candidate list is collective and uniform across all
- ranks. */
- if(TRUE == entry_ptr->coll_access) {
- entry_ptr->coll_access = FALSE;
- H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
- } /* end if */
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry is protected?!?!?")
+
+ /* Sanity checks */
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->ring >= H5C_RING_USER);
+ HDassert(entry_ptr->ring <= H5C_RING_SB);
+ HDassert(!entry_ptr->flush_immediately);
+ HDassert(!entry_ptr->clear_on_unprotect);
+
+ /* Determine whether the entry is to be cleared or flushed,
+ * and mark it accordingly. We will scan the protected and
+ * pinned list shortly, and clear or flush according to these
+ * markings.
+ */
+ if(u >= first_entry_to_flush && u <= last_entry_to_flush) {
+ total_entries_to_flush++;
+ entries_to_flush[entry_ptr->ring]++;
+ entry_ptr->flush_immediately = TRUE;
+ } /* end if */
+ else {
+ total_entries_to_clear++;
+ entries_to_clear[entry_ptr->ring]++;
+ entry_ptr->clear_on_unprotect = TRUE;
} /* end else */
+
+ /* Entries marked as collectively accessed and are in the
+ * candidate list to clear from the cache have to be
+ * removed from the coll list. This is OK since the
+ * candidate list is collective and uniform across all
+ * ranks.
+ */
+ if(entry_ptr->coll_access) {
+ entry_ptr->coll_access = FALSE;
+ H5C__REMOVE_FROM_COLL_LIST(cache_ptr, entry_ptr, FAIL)
+ } /* end if */
} /* end for */
+#if H5C_DO_SANITY_CHECKS
+ m = 0;
+ n = 0;
+ for(i = 0; i < H5C_RING_NTYPES; i++) {
+ m += (int)entries_to_flush[i];
+ n += (int)entries_to_clear[i];
+ } /* end if */
+
+ HDassert((unsigned)m == total_entries_to_flush);
+ HDassert((unsigned)n == total_entries_to_clear);
+#endif /* H5C_DO_SANITY_CHECKS */
+
#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %d/%d/%d.\n",
- FUNC, mpi_rank, (int)num_candidates, (int)entries_to_clear,
- (int)entries_to_flush);
+ HDfprintf(stdout, "%s:%d: num candidates/to clear/to flush = %u/%u/%u.\n",
+ FUNC, mpi_rank, num_candidates, total_entries_to_clear,
+ total_entries_to_flush);
#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
/* We have now marked all the entries on the candidate list for
* either flush or clear -- now scan the LRU and the pinned list
- * for these entries and do the deed.
+ * for these entries and do the deed. Do this via a call to
+ * H5C__flush_candidate_entries().
*
* Note that we are doing things in this round about manner so as
* to preserve the order of the LRU list to the best of our ability.
* If we don't do this, my experiments indicate that we will have a
* noticably poorer hit ratio as a result.
*/
-
-#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: scanning LRU list. len = %d.\n", FUNC, mpi_rank,
- (int)(cache_ptr->LRU_list_len));
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* ===================================================================== *
- * Now scan the LRU and PEL lists, flushing or clearing entries as
- * needed.
- *
- * The flush_me_last flag may dictate how or
- * when some entries can be flushed, and should be addressed here.
- * However, in their initial implementation, these flags only apply to the
- * superblock, so there's only a relatively small change to this function
- * to account for this one case where they come into play. If these flags
- * are ever expanded upon, this function and the following flushing steps
- * should be reworked to account for additional cases.
- * ===================================================================== */
-
- HDassert(entries_to_flush >= 0);
-
- restart_scan = FALSE;
- entries_examined = 0;
- initial_list_len = cache_ptr->LRU_list_len;
- entry_ptr = cache_ptr->LRU_tail_ptr;
-
- /* Examine each entry in the LRU list */
- while ( ( entry_ptr != NULL )
- &&
- ( entries_examined <= (entries_to_flush + 1) * initial_list_len )
- &&
- ( (entries_cleared + entries_flushed) < num_candidates ) ) {
-
- if ( entry_ptr->prev != NULL )
- prev_is_dirty = entry_ptr->prev->is_dirty;
-
- /* If this process needs to clear this entry. */
- if(entry_ptr->clear_on_unprotect) {
-
- HDassert(entry_ptr->is_dirty);
-
- next_ptr = entry_ptr->next;
- entry_ptr->clear_on_unprotect = FALSE;
- clear_ptr = entry_ptr;
- entry_ptr = entry_ptr->prev;
- entries_cleared++;
-
-#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
- (long long)clear_ptr->addr);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- *
- * Note that as of this writing (April 2015) this
- * case cannot occur in the parallel case. However
- * Quincey is making noises about changing this, hence
- * the insertion of this test.
- *
- * Note also that there is no test code to verify
- * that this code actually works (although similar code
- * in the serial version exists and is tested).
- *
- * Implementing a test will likely require implementing
- * flush op like facilities in the parallel tests. At
- * a guess this will not be terribly painful, but it
- * will take a bit of time.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
-
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
-
- if((cache_ptr->entries_removed_counter > 1) ||
- (cache_ptr->last_entry_removed_ptr == entry_ptr))
- restart_scan = TRUE;
- } /* end if */
-
- /* Else, if this process needs to flush this entry. */
- else if (entry_ptr->flush_immediately) {
-
- HDassert(entry_ptr->is_dirty);
-
- next_ptr = entry_ptr->next;
- entry_ptr->flush_immediately = FALSE;
- flush_ptr = entry_ptr;
- entry_ptr = entry_ptr->prev;
- entries_flushed++;
-
-#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank,
- (long long)flush_ptr->addr);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* reset entries_removed_counter and
- * last_entry_removed_ptr prior to the call to
- * H5C__flush_single_entry() so that we can spot
- * unexpected removals of entries from the cache,
- * and set the restart_scan flag if proceeding
- * would be likely to cause us to scan an entry
- * that is no longer in the cache.
- *
- * Note that as of this writing (April 2015) this
- * case cannot occur in the parallel case. However
- * Quincey is making noises about changing this, hence
- * the insertion of this test.
- *
- * Note also that there is no test code to verify
- * that this code actually works (although similar code
- * in the serial version exists and is tested).
- *
- * Implementing a test will likely require implementing
- * flush op like facilities in the parallel tests. At
- * a guess this will not be terribly painful, but it
- * will take a bit of time.
- */
- cache_ptr->entries_removed_counter = 0;
- cache_ptr->last_entry_removed_ptr = NULL;
-
- /* Add this entry to the list of entries to collectively write */
- if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
-
- if((cache_ptr->entries_removed_counter > 1) ||
- (cache_ptr->last_entry_removed_ptr == entry_ptr))
- restart_scan = TRUE;
- } /* end else-if */
-
- /* Otherwise, no action to be taken on this entry. Grab the next. */
- else {
- entry_ptr = entry_ptr->prev;
-
- if ( entry_ptr != NULL )
- next_ptr = entry_ptr->next;
-
- } /* end else */
-
- if ( ( entry_ptr != NULL )
- &&
- ( ( restart_scan )
- ||
- ( entry_ptr->is_dirty != prev_is_dirty )
- ||
- ( entry_ptr->next != next_ptr )
- ||
- ( entry_ptr->is_protected )
- ||
- ( entry_ptr->is_pinned )
- )
- ) {
-
- /* something has happened to the LRU -- start over
- * from the tail.
- *
- * Recall that this code should be un-reachable at present,
- * as all the operations by entries on flush that could cause
- * it to be reachable are disallowed in the parallel case at
- * present. Hence the following assertion which should be
- * removed if the above changes.
- */
-
- HDassert( ! restart_scan );
- HDassert( entry_ptr->is_dirty == prev_is_dirty );
- HDassert( entry_ptr->next == next_ptr );
- HDassert( ! entry_ptr->is_protected );
- HDassert( ! entry_ptr->is_pinned );
-
- HDassert(FALSE); /* see comment above */
-
- restart_scan = FALSE;
- entry_ptr = cache_ptr->LRU_tail_ptr;
-/*
- H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
-*/
- }
-
- entries_examined++;
- } /* end while */
-
-#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: entries examined/cleared/flushed = %d/%d/%d.\n",
- FUNC, mpi_rank, entries_examined,
- entries_cleared, entries_flushed);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* It is also possible that some of the cleared entries are on the
- * pinned list. Must scan that also.
- *
- * WARNING:
- *
- * As we now allow unpinning, and removal of other entries as a side
- * effect of flushing an entry, it is possible that the next entry
- * in a PEL scan could either be no longer pinned, or no longer in
- * the cache by the time we get to it.
- *
- * At present, this is not possible in this case, as we disallow such
- * operations in the parallel version of the library. However, Quincey
- * has been making noises about relaxing this. If and when he does,
- * we have a potential problem here.
- *
- * The same issue exists in the serial cache, and there are tests
- * to detect this problem when it occurs, and adjust to it. As seen
- * above in the LRU scan, I have ported such tests to the parallel
- * code where a close cognate exists in the serial code.
- *
- * I haven't done so here, as there are no PEL scans where the problem
- * can occur in the serial code. Needless to say, this will have to
- * be repaired if the constraints on pre_serialize and serialize
- * callbacks are relaxed in the parallel version of the metadata cache.
- *
- * JRM -- 4/1/15
- */
-
-#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout, "%s:%d: scanning pinned entry list. len = %d\n",
- FUNC, mpi_rank, (int)(cache_ptr->pel_len));
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- entry_ptr = cache_ptr->pel_head_ptr;
- while((entry_ptr != NULL) &&
- ((entries_cleared + entries_flushed + entries_delayed)
- < num_candidates)) {
-
- /* If entry is marked for flush or for clear */
- if((entry_ptr->clear_on_unprotect||entry_ptr->flush_immediately)) {
-
- /* If this entry needs to be flushed last */
- if (entry_ptr->flush_me_last) {
-
- /* At this time, only the superblock supports being
- flushed last. Conveniently, it also happens to be the only
- entry that supports being flushed collectively, as well. Also
- conveniently, it's always pinned, so we only need to check
- for it while scanning the PEL here. Finally, it's never
- included in a candidate list that excludes other dirty
- entries in a cache, so we can handle this relatively simple
- case here.
-
- For now, this function asserts this and saves the entry
- to flush it after scanning the rest of the PEL list.
-
- If there are ever more entries that either need to be
- flushed last and/or flushed collectively, this whole routine
- will need to be reworked to handle all additional cases. As
- it is the simple case of a single pinned entry needing
- flushed last and collectively is just a minor addition to
- this routine, but signficantly buffing up the usage of
- flush_me_last will require a more
- intense rework of this function and potentially the function
- of candidate lists as a whole. */
-
- entries_to_flush_or_clear_last++;
- entries_to_flush_collectively++;
- HDassert(entries_to_flush_or_clear_last == 1);
- HDassert(entries_to_flush_collectively == 1);
-
- /* Delay the entry. It will be flushed later. */
- delayed_ptr = entry_ptr;
- entries_delayed++;
- HDassert(entries_delayed == 1);
-
- } /* end if */
-
- /* Else, this process needs to clear this entry. */
- else if (entry_ptr->clear_on_unprotect) {
- HDassert(!entry_ptr->flush_immediately);
- entry_ptr->clear_on_unprotect = FALSE;
- clear_ptr = entry_ptr;
- entry_ptr = entry_ptr->next;
- entries_cleared++;
-
-#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: clearing 0x%llx.\n", FUNC, mpi_rank,
- (long long)clear_ptr->addr);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
- } /* end else-if */
-
- /* Else, if this process needs to independently flush this entry. */
- else if (entry_ptr->flush_immediately) {
- entry_ptr->flush_immediately = FALSE;
- flush_ptr = entry_ptr;
- entry_ptr = entry_ptr->next;
- entries_flushed++;
-
-#if ( H5C_APPLY_CANDIDATE_LIST__DEBUG > 1 )
- HDfprintf(stdout, "%s:%d: flushing 0x%llx.\n", FUNC, mpi_rank,
- (long long)flush_ptr->addr);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* Add this entry to the list of entries to collectively write */
- if(H5C__flush_single_entry(f, dxpl_id, flush_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
- } /* end else-if */
- } /* end if */
-
- /* Otherwise, this entry is not marked for flush or clear. Grab the next. */
- else {
- entry_ptr = entry_ptr->next;
- } /* end else */
-
- } /* end while */
-
-#if H5C_APPLY_CANDIDATE_LIST__DEBUG
- HDfprintf(stdout,
- "%s:%d: pel entries examined/cleared/flushed = %d/%d/%d.\n",
- FUNC, mpi_rank, entries_examined,
- entries_cleared, entries_flushed);
- HDfprintf(stdout, "%s:%d: done.\n", FUNC, mpi_rank);
-
- HDfsync(stdout);
-#endif /* H5C_APPLY_CANDIDATE_LIST__DEBUG */
-
- /* ====================================================================== *
- * Now, handle all delayed entries. *
- * *
- * This can *only* be the superblock at this time, so it's relatively *
- * easy to deal with. We're collectively flushing the entry saved from *
- * above. This will need to be handled differently if there are ever more *
- * than one entry needing this special treatment.) *
- * ====================================================================== */
-
- if (delayed_ptr) {
-
- if (delayed_ptr->clear_on_unprotect) {
- if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry.")
-
- entry_ptr->clear_on_unprotect = FALSE;
- entries_cleared++;
- } else if (delayed_ptr->flush_immediately) {
- /* Add this entry to the list of entries to collectively write */
- if(H5C__flush_single_entry(f, dxpl_id, delayed_ptr, H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't flush entry collectively.")
-
- entry_ptr->flush_immediately = FALSE;
- entries_flushed++;
- } /* end if */
-
- entries_flushed_collectively++;
- entries_flushed_or_cleared_last++;
- } /* end if */
+ if(H5C__flush_candidate_entries(f, dxpl_id, entries_to_flush, entries_to_clear) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates failed")
/* If we've deferred writing to do it collectively, take care of that now */
if(f->coll_md_write) {
@@ -758,28 +397,12 @@ H5C_apply_candidate_list(H5F_t * f,
/* Write collective list */
if(H5C__collective_write(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "Can't write metadata collectively")
+ HGOTO_ERROR(H5E_CACHE, H5E_WRITEERROR, FAIL, "can't write metadata collectively")
} /* end if */
- /* ====================================================================== *
- * Finished flushing everything. *
- * ====================================================================== */
-
- HDassert((entries_flushed == entries_to_flush));
- HDassert((entries_cleared == entries_to_clear));
- HDassert((entries_flushed_or_cleared_last == entries_to_flush_or_clear_last));
- HDassert((entries_flushed_collectively == entries_to_flush_collectively));
-
- if((entries_flushed != entries_to_flush) ||
- (entries_cleared != entries_to_clear) ||
- (entries_flushed_or_cleared_last != entries_to_flush_or_clear_last) ||
- (entries_flushed_collectively != entries_to_flush_collectively))
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "entry count mismatch.")
-
done:
if(candidate_assignment_table != NULL)
candidate_assignment_table = (int *)H5MM_xfree((void *)candidate_assignment_table);
-
if(cache_ptr->coll_write_list) {
if(H5SL_close(cache_ptr->coll_write_list) < 0)
HDONE_ERROR(H5E_CACHE, H5E_CANTFREE, FAIL, "failed to destroy skip list")
@@ -836,7 +459,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
if(space_needed > 0) { /* we have work to do */
H5C_cache_entry_t *entry_ptr;
- int nominated_entries_count = 0;
+ unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
haddr_t nominated_addr;
@@ -857,7 +480,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
nominated_addr = entry_ptr->addr;
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed(1).")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -881,7 +504,7 @@ H5C_construct_candidate_list__clean_cache(H5C_t * cache_ptr)
nominated_addr = entry_ptr->addr;
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed(2).")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -950,7 +573,7 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
if(space_needed > 0) { /* we have work to do */
H5C_cache_entry_t *entry_ptr;
- int nominated_entries_count = 0;
+ unsigned nominated_entries_count = 0;
size_t nominated_entries_size = 0;
HDassert( cache_ptr->slist_len > 0 );
@@ -973,7 +596,7 @@ H5C_construct_candidate_list__min_clean(H5C_t * cache_ptr)
nominated_addr = entry_ptr->addr;
if(H5AC_add_candidate((H5AC_t *)cache_ptr, nominated_addr) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed.")
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "H5AC_add_candidate() failed")
nominated_entries_size += entry_ptr->size;
nominated_entries_count++;
@@ -1017,51 +640,30 @@ done:
* Programmer: John Mainzer
* 7/5/05
*
- * Changes: Tidied up code, removeing some old commented out
- * code that had been left in pending success of the
- * new version.
- *
- * Note that unlike H5C_apply_candidate_list(),
- * H5C_mark_entries_as_clean() makes all its calls to
- * H5C__flush_single_entry() with the
- * H5C__FLUSH_CLEAR_ONLY_FLAG set. As a result,
- * the pre_serialize() and serialize calls are not made.
- *
- * This then implies that (assuming such actions were
- * permitted in the parallel case) no loads, dirties,
- * resizes, or removals of other entries can occur as
- * a side effect of the flush. Hence, there is no need
- * for the checks for entry removal / status change
- * that I ported to H5C_apply_candidate_list().
- *
- * However, if (in addition to allowing such operations
- * in the parallel case), we allow such operations outside
- * of the pre_serialize / serialize routines, this may
- * cease to be the case -- requiring a review of this
- * function.
- *
*-------------------------------------------------------------------------
*/
herr_t
H5C_mark_entries_as_clean(H5F_t * f,
hid_t dxpl_id,
- int32_t ce_array_len,
+ unsigned ce_array_len,
haddr_t * ce_array_ptr)
{
H5C_t * cache_ptr;
- int entries_cleared;
- int entries_examined;
- int i;
- int initial_list_len;
+ unsigned entries_cleared;
+ unsigned pinned_entries_cleared;
+ hbool_t progress;
+ unsigned entries_examined;
+ unsigned initial_list_len;
haddr_t addr;
+ unsigned pinned_entries_marked = 0;
#if H5C_DO_SANITY_CHECKS
- int pinned_entries_marked = 0;
- int protected_entries_marked = 0;
- int other_entries_marked = 0;
+ unsigned protected_entries_marked = 0;
+ unsigned other_entries_marked = 0;
haddr_t last_addr;
#endif /* H5C_DO_SANITY_CHECKS */
H5C_cache_entry_t * clear_ptr = NULL;
H5C_cache_entry_t * entry_ptr = NULL;
+ unsigned u;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -1076,46 +678,30 @@ H5C_mark_entries_as_clean(H5F_t * f,
HDassert( ce_array_ptr != NULL );
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on entry.\n");
- }
+ if(H5C_validate_protected_entry_list(cache_ptr) < 0 ||
+ H5C_validate_pinned_entry_list(cache_ptr) < 0 ||
+ H5C_validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
- for ( i = 0; i < ce_array_len; i++ )
- {
- addr = ce_array_ptr[i];
+ for(u = 0; u < ce_array_len; u++) {
+ addr = ce_array_ptr[u];
#if H5C_DO_SANITY_CHECKS
- if ( i == 0 ) {
-
+ if(u == 0)
last_addr = addr;
-
- } else {
-
- if ( last_addr == addr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Duplicate entry in cleaned list.\n");
-
- } else if ( last_addr > addr ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "cleaned list not sorted.\n");
- }
- }
+ else {
+ if(last_addr == addr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Duplicate entry in cleaned list")
+ if(last_addr > addr)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "cleaned list not sorted")
+ } /* end else */
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed in for loop.\n");
- }
+ if(H5C_validate_protected_entry_list(cache_ptr) < 0
+ || H5C_validate_pinned_entry_list(cache_ptr) < 0
+ || H5C_validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed in for loop")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
#endif /* H5C_DO_SANITY_CHECKS */
@@ -1123,28 +709,24 @@ H5C_mark_entries_as_clean(H5F_t * f,
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- if ( entry_ptr == NULL ) {
+ if(entry_ptr == NULL) {
#if H5C_DO_SANITY_CHECKS
HDfprintf(stdout,
- "H5C_mark_entries_as_clean: entry[%d] = %ld not in cache.\n",
- (int)i,
- (long)addr);
+ "H5C_mark_entries_as_clean: entry[%u] = %a not in cache.\n",
+ u,
+ addr);
#endif /* H5C_DO_SANITY_CHECKS */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Listed entry not in cache?!?!?.")
-
- } else if ( ! entry_ptr->is_dirty ) {
-
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not in cache?!?!?")
+ } /* end if */
+ else if(!entry_ptr->is_dirty) {
#if H5C_DO_SANITY_CHECKS
HDfprintf(stdout,
- "H5C_mark_entries_as_clean: entry %ld is not dirty!?!\n",
- (long)addr);
+ "H5C_mark_entries_as_clean: entry %a is not dirty!?!\n",
+ addr);
#endif /* H5C_DO_SANITY_CHECKS */
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Listed entry not dirty?!?!?.")
-
- } else {
-
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Listed entry not dirty?!?!?")
+ } /* end else-if */
+ else {
/* Mark the entry to be cleared on unprotect. We will
* scan the LRU list shortly, and clear all those entries
* not currently protected.
@@ -1158,19 +740,13 @@ H5C_mark_entries_as_clean(H5F_t * f,
} /* end if */
entry_ptr->clear_on_unprotect = TRUE;
+ if(entry_ptr->is_pinned)
+ pinned_entries_marked++;
#if H5C_DO_SANITY_CHECKS
- if ( entry_ptr->is_protected ) {
-
+ else if(entry_ptr->is_protected)
protected_entries_marked++;
-
- } else if ( entry_ptr->is_pinned ) {
-
- pinned_entries_marked++;
-
- } else {
-
+ else
other_entries_marked++;
- }
#endif /* H5C_DO_SANITY_CHECKS */
}
}
@@ -1200,31 +776,26 @@ H5C_mark_entries_as_clean(H5F_t * f,
* point.
* JRM -- 4/7/15
*/
-
entries_cleared = 0;
entries_examined = 0;
initial_list_len = cache_ptr->LRU_list_len;
entry_ptr = cache_ptr->LRU_tail_ptr;
-
- while ( ( entry_ptr != NULL ) &&
- ( entries_examined <= initial_list_len ) &&
- ( entries_cleared < ce_array_len ) )
- {
- if ( entry_ptr->clear_on_unprotect ) {
-
+ while(entry_ptr != NULL && entries_examined <= initial_list_len &&
+ entries_cleared < ce_array_len) {
+ if(entry_ptr->clear_on_unprotect) {
entry_ptr->clear_on_unprotect = FALSE;
clear_ptr = entry_ptr;
entry_ptr = entry_ptr->prev;
entries_cleared++;
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
- } else {
-
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr,
+ (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__UPDATE_PAGE_BUFFER_FLAG)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
+ } /* end if */
+ else
entry_ptr = entry_ptr->prev;
- }
entries_examined++;
- }
+ } /* end while */
#if H5C_DO_SANITY_CHECKS
HDassert( entries_cleared == other_entries_marked );
@@ -1233,25 +804,28 @@ H5C_mark_entries_as_clean(H5F_t * f,
/* It is also possible that some of the cleared entries are on the
* pinned list. Must scan that also.
*/
+ pinned_entries_cleared = 0;
+ progress = TRUE;
+ while((pinned_entries_cleared < pinned_entries_marked) && progress) {
+ progress = FALSE;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while(entry_ptr != NULL) {
+ if(entry_ptr->clear_on_unprotect && entry_ptr->flush_dep_ndirty_children == 0) {
+ entry_ptr->clear_on_unprotect = FALSE;
+ clear_ptr = entry_ptr;
+ entry_ptr = entry_ptr->next;
+ entries_cleared++;
+ pinned_entries_cleared++;
+ progress = TRUE;
- entry_ptr = cache_ptr->pel_head_ptr;
-
- while ( entry_ptr != NULL )
- {
- if ( entry_ptr->clear_on_unprotect ) {
-
- entry_ptr->clear_on_unprotect = FALSE;
- clear_ptr = entry_ptr;
- entry_ptr = entry_ptr->next;
- entries_cleared++;
-
- if(H5C__flush_single_entry(f, dxpl_id, clear_ptr, H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0 )
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Can't clear entry.")
- } else {
-
- entry_ptr = entry_ptr->next;
- }
- }
+ if(H5C__flush_single_entry(f, dxpl_id, clear_ptr,
+ (H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__GENERATE_IMAGE_FLAG | H5C__UPDATE_PAGE_BUFFER_FLAG)) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't clear entry")
+ } /* end if */
+ else
+ entry_ptr = entry_ptr->next;
+ } /* end while */
+ } /* end while */
#if H5C_DO_SANITY_CHECKS
HDassert( entries_cleared == pinned_entries_marked + other_entries_marked );
@@ -1262,33 +836,28 @@ H5C_mark_entries_as_clean(H5F_t * f,
( (ce_array_len - entries_cleared) <= cache_ptr->pl_len ) );
#if H5C_DO_SANITY_CHECKS
- i = 0;
+ u = 0;
entry_ptr = cache_ptr->pl_head_ptr;
while ( entry_ptr != NULL )
{
if ( entry_ptr->clear_on_unprotect ) {
- i++;
+ u++;
}
entry_ptr = entry_ptr->next;
}
- HDassert( (entries_cleared + i) == ce_array_len );
+ HDassert( (entries_cleared + u) == ce_array_len );
#endif /* H5C_DO_SANITY_CHECKS */
done:
-
#if H5C_DO_EXTREME_SANITY_CHECKS
- if ( ( H5C_validate_protected_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_pinned_entry_list(cache_ptr) < 0 ) ||
- ( H5C_validate_lru_list(cache_ptr) < 0 ) ) {
-
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "an extreme sanity check failed on exit.\n");
- }
+ if(H5C_validate_protected_entry_list(cache_ptr) < 0
+ || H5C_validate_pinned_entry_list(cache_ptr) < 0
+ || H5C_validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on exit")
#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
FUNC_LEAVE_NOAPI(ret_value)
-
} /* H5C_mark_entries_as_clean() */
@@ -1309,7 +878,7 @@ done:
herr_t
H5C_clear_coll_entries(H5C_t *cache_ptr, hbool_t partial)
{
- int32_t clear_cnt;
+ uint32_t clear_cnt;
H5C_cache_entry_t * entry_ptr = NULL;
herr_t ret_value = SUCCEED;
@@ -1379,7 +948,9 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id)
/* Get original transfer mode */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data transfer property list")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, \
+ "not a data transfer property list")
+
if(H5P_get(plist, H5D_XFER_IO_XFER_MODE_NAME, &orig_xfer_mode) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property")
@@ -1394,21 +965,34 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id)
int i;
if(H5P_set(plist, H5D_XFER_IO_XFER_MODE_NAME, &xfer_mode) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property")
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, \
+ "can't set MPI-I/O property")
/* Allocate arrays */
- if(NULL == (length_array = (int *)H5MM_malloc((size_t)count * sizeof(int))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective write table length array")
- if(NULL == (buf_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective buf table length array")
- if(NULL == (offset_array = (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "memory allocation failed for collective offset table length array")
+ if ( NULL == (length_array =
+ (int *)H5MM_malloc((size_t)count * sizeof(int))) )
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, \
+ "memory allocation failed for collective write table length array")
+
+ if ( NULL == (buf_array =
+ (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))) )
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, \
+ "memory allocation failed for collective buf table length array")
+
+ if(NULL == (offset_array =
+ (MPI_Aint *)H5MM_malloc((size_t)count * sizeof(MPI_Aint))) )
+
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, \
+ "memory allocation failed for collective offset table length array")
/* Fill arrays */
node = H5SL_first(cache_ptr->coll_write_list);
HDassert(node);
if(NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node)))
- HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
+ HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, \
+ "can't retrieve skip list item")
/* Set up initial array position & buffer base address */
length_array[0] = (int)entry_ptr->size;
@@ -1419,8 +1003,10 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id)
node = H5SL_next(node);
i = 1;
while(node) {
+
if(NULL == (entry_ptr = (H5C_cache_entry_t *)H5SL_item(node)))
- HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't retrieve skip list item")
+ HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, \
+ "can't retrieve skip list item")
/* Set up array position */
length_array[i] = (int)entry_ptr->size;
@@ -1433,48 +1019,85 @@ H5C__collective_write(H5F_t *f, hid_t dxpl_id)
} /* end while */
/* Create memory MPI type */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, buf_array, MPI_BYTE, &btype)))
+ if(MPI_SUCCESS != (mpi_code =
+ MPI_Type_create_hindexed(count, length_array,
+ buf_array, MPI_BYTE,
+ &btype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
+
btype_created = TRUE;
+
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&btype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* Create file MPI type */
- if(MPI_SUCCESS != (mpi_code = MPI_Type_create_hindexed(count, length_array, offset_array, MPI_BYTE, &ftype)))
+ if(MPI_SUCCESS != (mpi_code =
+ MPI_Type_create_hindexed(count, length_array,
+ offset_array, MPI_BYTE,
+ &ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_create_hindexed failed", mpi_code)
+
ftype_created = TRUE;
+
if(MPI_SUCCESS != (mpi_code = MPI_Type_commit(&ftype)))
HMPI_GOTO_ERROR(FAIL, "MPI_Type_commit failed", mpi_code)
/* Pass buf type, file type to the file driver */
if(H5FD_mpi_setup_collective(dxpl_id, &btype, &ftype) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O properties")
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, \
+ "can't set MPI-I/O properties")
/* Write data */
- if(H5F_block_write(f, H5FD_MEM_DEFAULT, (haddr_t)0, (size_t)1, dxpl_id, base_buf) < 0)
- HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to write entries collectively")
+ if(H5F_block_write(f, H5FD_MEM_DEFAULT, (haddr_t)0,
+ (size_t)1, dxpl_id, base_buf) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, \
+ "unable to write entries collectively")
+
} /* end if */
else {
MPI_Status mpi_stat;
- MPI_File mpi_fh_p;
+ MPI_File *mpi_fh_p;
MPI_File mpi_fh;
+ MPI_Info *info_p;
+ MPI_Info info;
if(H5F_get_mpi_handle(f, (MPI_File **)&mpi_fh_p) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get mpi file handle")
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, \
+ "can't get mpi file handle")
+
mpi_fh = *(MPI_File*)mpi_fh_p;
- /* just to match up with the 1st MPI_File_set_view from H5FD_mpio_write() */
- if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)))
+ if (H5F_get_mpi_info(f, &info_p) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, \
+ "can't get mpi file info")
+
+ info = *info_p;
+
+ /* just to match up with the 1st MPI_File_set_view from
+ * H5FD_mpio_write()
+ */
+ if(MPI_SUCCESS != (mpi_code =
+ MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE,
+ MPI_BYTE, "native",
+ info)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
/* just to match up with MPI_File_write_at_all from H5FD_mpio_write() */
HDmemset(&mpi_stat, 0, sizeof(MPI_Status));
- if(MPI_SUCCESS != (mpi_code = MPI_File_write_at_all(mpi_fh, (MPI_Offset)0, NULL, 0, MPI_BYTE, &mpi_stat)))
+ if(MPI_SUCCESS != (mpi_code =
+ MPI_File_write_at_all(mpi_fh, (MPI_Offset)0,
+ NULL, 0, MPI_BYTE, &mpi_stat)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_write_at_all failed", mpi_code)
- /* just to match up with the 2nd MPI_File_set_view (reset) in H5FD_mpio_write() */
- if(MPI_SUCCESS != (mpi_code = MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE, MPI_BYTE, "native", MPI_INFO_NULL)))
+ /* just to match up with the 2nd MPI_File_set_view (reset) in
+ * H5FD_mpio_write()
+ */
+ if(MPI_SUCCESS != (mpi_code =
+ MPI_File_set_view(mpi_fh, (MPI_Offset)0, MPI_BYTE,
+ MPI_BYTE, "native",
+ info)))
HMPI_GOTO_ERROR(FAIL, "MPI_File_set_view failed", mpi_code)
+
} /* end else */
done:
@@ -1493,10 +1116,517 @@ done:
if(orig_xfer_mode != H5FD_MPIO_COLLECTIVE) {
HDassert(plist);
if(H5P_set(plist, H5D_XFER_IO_XFER_MODE_NAME, &orig_xfer_mode) < 0)
- HDONE_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set MPI-I/O property")
+ HDONE_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, \
+ "can't set MPI-I/O property")
} /* end if */
FUNC_LEAVE_NOAPI(ret_value);
} /* end H5C__collective_write() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_candidate_entries
+ *
+ * Purpose: Flush or clear (as indicated) the candidate entries that
+ * have been marked in the metadata cache. In so doing,
+ * observe rings and flush dependencies.
+ *
+ * Note that this function presumes that:
+ *
+ * 1) no candidate entries are protected,
+ *
+ * 2) all candidate entries are dirty, and
+ *
+ * 3) if a candidate entry has a dirty flush dependency
+ * child, that child is also a candidate entry.
+ *
+ * The function will fail if any of these preconditions are
+ * not met.
+ *
+ * Candidate entries are marked by setting either the
+ * flush_immediately or the clear_on_unprotect flags in the
+ * cache entry (but not both). Entries marked flush_immediately
+ * will be flushed, those marked clear_on_unprotect will be
+ * cleared.
+ *
+ * Note that this function is a modified version of
+ * H5C_flush_cache() -- any changes there may need to be
+ * reflected here and vise versa.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer
+ * 2/10/17
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_candidate_entries(H5F_t *f, hid_t dxpl_id,
+ unsigned entries_to_flush[H5C_RING_NTYPES],
+ unsigned entries_to_clear[H5C_RING_NTYPES])
+{
+#if H5C_DO_SANITY_CHECKS
+ int i;
+ uint32_t index_len = 0;
+ size_t index_size = (size_t)0;
+ size_t clean_index_size = (size_t)0;
+ size_t dirty_index_size = (size_t)0;
+ size_t slist_size = (size_t)0;
+ uint32_t slist_len = 0;
+#endif /* H5C_DO_SANITY_CHECKS */
+ H5C_ring_t ring;
+ H5C_t * cache_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ cache_ptr = f->shared->cache;
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+
+ HDassert(entries_to_flush[H5C_RING_UNDEFINED] == 0);
+ HDassert(entries_to_clear[H5C_RING_UNDEFINED] == 0);
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(cache_ptr->index_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->clean_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->dirty_index_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+ HDassert(cache_ptr->slist_ring_len[H5C_RING_UNDEFINED] == 0);
+ HDassert(cache_ptr->slist_ring_size[H5C_RING_UNDEFINED] == (size_t)0);
+
+ for(i = H5C_RING_USER; i < H5C_RING_NTYPES; i++) {
+ index_len += cache_ptr->index_ring_len[i];
+ index_size += cache_ptr->index_ring_size[i];
+ clean_index_size += cache_ptr->clean_index_ring_size[i];
+ dirty_index_size += cache_ptr->dirty_index_ring_size[i];
+
+ slist_len += cache_ptr->slist_ring_len[i];
+ slist_size += cache_ptr->slist_ring_size[i];
+ } /* end for */
+
+ HDassert(cache_ptr->index_len == index_len);
+ HDassert(cache_ptr->index_size == index_size);
+ HDassert(cache_ptr->clean_index_size == clean_index_size);
+ HDassert(cache_ptr->dirty_index_size == dirty_index_size);
+ HDassert(cache_ptr->slist_len == slist_len);
+ HDassert(cache_ptr->slist_size == slist_size);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if(H5C_validate_protected_entry_list(cache_ptr) < 0
+ || H5C_validate_pinned_entry_list(cache_ptr) < 0
+ || H5C_validate_lru_list(cache_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+ cache_ptr->flush_in_progress = TRUE;
+
+ /* flush each ring, starting from the outermost ring and
+ * working inward.
+ */
+ ring = H5C_RING_USER;
+ while(ring < H5C_RING_NTYPES) {
+ if(H5C__flush_candidates_in_ring(f, dxpl_id, ring, entries_to_flush[ring], entries_to_clear[ring]) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "flush candidates in ring failed")
+
+ ring++;
+ } /* end while */
+
+done:
+ cache_ptr->flush_in_progress = FALSE;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_candidate_entries() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__flush_candidates_in_ring
+ *
+ * Purpose: Flush or clear (as indicated) the candidate entries
+ * contained in the specified cache and ring. All candidate
+ * entries in rings outside the specified ring must have been
+ * flushed (or cleared) on entry.
+ *
+ * Note that this function presumes that:
+ *
+ * 1) no candidate entries are protected,
+ *
+ * 2) all candidate entries are dirty, and
+ *
+ * 3) if a candidate entry has a dirty flush dependency
+ * child, that child is also a candidate entry.
+ *
+ * The function will fail if any of these preconditions are
+ * not met.
+ *
+ * Candidate entries are marked by setting either the
+ * flush_immediately or the clear_on_unprotect flags in the
+ * cache entry (but not both). Entries marked flush_immediately
+ * will be flushed, those marked clear_on_unprotect will be
+ * cleared.
+ *
+ * Candidate entries residing in the LRU must be flushed
+ * (or cleared) in LRU order to avoid performance issues.
+ *
+ * Return: Non-negative on success/Negative on failure.
+ *
+ * Programmer: John Mainzer
+ * 2/10/17
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__flush_candidates_in_ring(H5F_t *f, hid_t dxpl_id, H5C_ring_t ring,
+ unsigned entries_to_flush, unsigned entries_to_clear)
+{
+ H5C_t * cache_ptr;
+ hbool_t progress;
+ hbool_t restart_scan = FALSE;
+ unsigned entries_flushed = 0;
+ unsigned entries_cleared = 0;
+#if H5C_DO_SANITY_CHECKS
+ unsigned init_index_len;
+#endif /* H5C_DO_SANITY_CHECKS */
+ unsigned clear_flags = H5C__FLUSH_CLEAR_ONLY_FLAG |
+ H5C__GENERATE_IMAGE_FLAG |
+ H5C__UPDATE_PAGE_BUFFER_FLAG;
+ unsigned flush_flags = H5C__NO_FLAGS_SET;
+ unsigned op_flags;
+ H5C_cache_entry_t *op_ptr;
+ H5C_cache_entry_t *entry_ptr;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+ cache_ptr = f->shared->cache;
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+ HDassert(cache_ptr->slist_ptr);
+ HDassert(ring > H5C_RING_UNDEFINED);
+ HDassert(ring < H5C_RING_NTYPES);
+
+#if H5C_DO_EXTREME_SANITY_CHECKS
+ if((H5C_validate_protected_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_pinned_entry_list(cache_ptr) < 0) ||
+ (H5C_validate_lru_list(cache_ptr) < 0))
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "an extreme sanity check failed on entry")
+#endif /* H5C_DO_EXTREME_SANITY_CHECKS */
+
+#if H5C_DO_SANITY_CHECKS
+ /* index len should not change */
+ init_index_len = cache_ptr->index_len;
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ /* Examine entries in the LRU list, and flush or clear all entries
+ * so marked in the target ring.
+ *
+ * With the current implementation of flush dependencies, no entry
+ * in the LRU can have flush dependency children -- thus one pass
+ * through the LRU will be sufficient.
+ *
+ * It is possible that this will change -- hence the assertion.
+ */
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+ while(((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))
+ && (entry_ptr != NULL)) {
+ hbool_t prev_is_dirty = FALSE;
+ H5C_cache_entry_t *next_ptr;
+
+ /* Entries in the LRU must not have flush dependency children */
+ HDassert(entry_ptr->flush_dep_nchildren == 0);
+
+ /* Remember dirty state of entry to advance to */
+ if(entry_ptr->prev != NULL)
+ prev_is_dirty = entry_ptr->prev->is_dirty;
+
+ /* If the entry is in the ring */
+ if(entry_ptr->ring == ring) {
+ /* If this process needs to clear this entry. */
+ if(entry_ptr->clear_on_unprotect) {
+ HDassert(entry_ptr->is_dirty);
+
+ /* Set entry and flags for operation */
+ op_ptr = entry_ptr;
+ op_flags = clear_flags;
+
+ /* Set next entry appropriately */
+ next_ptr = entry_ptr->next;
+
+ /* Reset entry flag */
+ entry_ptr->clear_on_unprotect = FALSE;
+ entries_cleared++;
+ } /* end if */
+ else if(entry_ptr->flush_immediately) {
+ HDassert(entry_ptr->is_dirty);
+
+ /* Set entry and flags for operation */
+ op_ptr = entry_ptr;
+ op_flags = flush_flags;
+
+ /* Set next entry appropriately */
+ next_ptr = entry_ptr->next;
+
+ /* Reset entry flag */
+ entry_ptr->flush_immediately = FALSE;
+ entries_flushed++;
+ } /* end else-if */
+ else {
+ /* No operation for this entry */
+ op_ptr = NULL;
+
+ /* Set next entry appropriately */
+ next_ptr = entry_ptr;
+ } /* end else */
+
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->prev;
+
+ /* Check for operation */
+ if(op_ptr) {
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ *
+ * Note that as of this writing, this
+ * case cannot occur in the parallel case.
+ *
+ * Note also that there is no test code to verify
+ * that this code actually works (although similar code
+ * in the serial version exists and is tested).
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
+ if(H5C__flush_single_entry(f, dxpl_id, op_ptr, op_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush entry")
+
+ if(cache_ptr->entries_removed_counter != 0
+ || cache_ptr->last_entry_removed_ptr != NULL)
+ restart_scan = TRUE;
+ } /* end if */
+ } /* end if */
+ else {
+ /* Remember "next" pointer (after advancing entries) */
+ next_ptr = entry_ptr;
+
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->prev;
+ } /* end else */
+
+ /* Check for restarts, etc. */
+ if((entry_ptr != NULL) &&
+ (restart_scan || (entry_ptr->is_dirty != prev_is_dirty)
+ || (entry_ptr->next != next_ptr) || entry_ptr->is_protected
+ || entry_ptr->is_pinned)) {
+
+ /* Something has happened to the LRU -- start over
+ * from the tail.
+ *
+ * Recall that this code should be un-reachable at present,
+ * as all the operations by entries on flush that could cause
+ * it to be reachable are disallowed in the parallel case at
+ * present. Hence the following assertion which should be
+ * removed if the above changes.
+ */
+ HDassert(!restart_scan);
+ HDassert(entry_ptr->is_dirty == prev_is_dirty);
+ HDassert(entry_ptr->next == next_ptr);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(!entry_ptr->is_pinned);
+
+ HDassert(FALSE); /* see comment above */
+
+ restart_scan = FALSE;
+ entry_ptr = cache_ptr->LRU_tail_ptr;
+
+ H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
+ } /* end if */
+ } /* end while */
+
+ /* It is also possible that some of the cleared entries are on the
+ * pinned list. Must scan that also.
+ *
+ * Observe that in the case of the pinned entry list, most of the
+ * entries will have flush dependency children. As entries with
+ * flush dependency children may not be flushed until all of their
+ * children are clean, multiple passes throguh the pinned entry list
+ * may be required.
+ *
+ * WARNING:
+ *
+ * As we now allow unpinning, and removal of other entries as a side
+ * effect of flushing an entry, it is possible that the next entry
+ * in a PEL scan could either be no longer pinned, or no longer in
+ * the cache by the time we get to it.
+ *
+ * At present, this should not be possible in this case, as we disallow
+ * such operations in the parallel version of the library. However,
+ * this may change, and to that end, I have included code to detect
+ * such changes and cause this function to fail if they are detected.
+ */
+ progress = TRUE;
+ while(progress && ((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))) {
+ progress = FALSE;
+ entry_ptr = cache_ptr->pel_head_ptr;
+ while((entry_ptr != NULL) &&
+ ((entries_flushed < entries_to_flush) || (entries_cleared < entries_to_clear))) {
+ H5C_cache_entry_t *prev_ptr;
+ hbool_t next_is_dirty = FALSE;
+
+ HDassert(entry_ptr->is_pinned);
+
+ /* Remember dirty state of entry to advance to */
+ if(entry_ptr->next != NULL)
+ next_is_dirty = entry_ptr->next->is_dirty;
+
+ if(entry_ptr->ring == ring && entry_ptr->flush_dep_ndirty_children == 0) {
+ if(entry_ptr->clear_on_unprotect) {
+ HDassert(entry_ptr->is_dirty);
+
+ /* Set entry and flags for operation */
+ op_ptr = entry_ptr;
+ op_flags = clear_flags;
+
+ /* Reset entry flag */
+ entry_ptr->clear_on_unprotect = FALSE;
+ entries_cleared++;
+ progress = TRUE;
+ } /* end if */
+ else if(entry_ptr->flush_immediately) {
+ HDassert(entry_ptr->is_dirty);
+
+ /* Set entry and flags for operation */
+ op_ptr = entry_ptr;
+ op_flags = flush_flags;
+
+ /* Reset entry flag */
+ entry_ptr->flush_immediately = FALSE;
+ entries_flushed++;
+ progress = TRUE;
+ } /* end else-if */
+ else
+ /* No operation for this entry */
+ op_ptr = NULL;
+
+ /* Check for operation */
+ if(op_ptr) {
+ /* reset entries_removed_counter and
+ * last_entry_removed_ptr prior to the call to
+ * H5C__flush_single_entry() so that we can spot
+ * unexpected removals of entries from the cache,
+ * and set the restart_scan flag if proceeding
+ * would be likely to cause us to scan an entry
+ * that is no longer in the cache.
+ *
+ * Note that as of this writing, this
+ * case cannot occur in the parallel case.
+ *
+ * Note also that there is no test code to verify
+ * that this code actually works (although similar code
+ * in the serial version exists and is tested).
+ */
+ cache_ptr->entries_removed_counter = 0;
+ cache_ptr->last_entry_removed_ptr = NULL;
+
+ /* Add this entry to the list of entries to collectively write
+ *
+ * This comment is misleading -- the entry will be added to the
+ * collective write list only if said list exists.
+ *
+ * JRM -- 2/9/17
+ */
+ if(H5C__flush_single_entry(f, dxpl_id, op_ptr, op_flags) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush entry")
+
+ if(cache_ptr->entries_removed_counter != 0
+ || cache_ptr->last_entry_removed_ptr != NULL)
+ restart_scan = TRUE;
+ } /* end if */
+ } /* end if */
+
+ /* Remember "previous" pointer (after advancing entries) */
+ prev_ptr = entry_ptr;
+
+ /* Advance to next entry */
+ entry_ptr = entry_ptr->next;
+
+ /* Check for restarts, etc. */
+ if((entry_ptr != NULL) &&
+ (restart_scan || (entry_ptr->is_dirty != next_is_dirty)
+ || (entry_ptr->prev != prev_ptr) || entry_ptr->is_protected
+ || !entry_ptr->is_pinned)) {
+ /* Something has happened to the pinned entry list -- start
+ * over from the head.
+ *
+ * Recall that this code should be un-reachable at present,
+ * as all the operations by entries on flush that could cause
+ * it to be reachable are disallowed in the parallel case at
+ * present. Hence the following assertion which should be
+ * removed if the above changes.
+ */
+
+ HDassert(!restart_scan);
+ HDassert(entry_ptr->is_dirty == next_is_dirty);
+ HDassert(entry_ptr->prev == prev_ptr);
+ HDassert(!entry_ptr->is_protected);
+ HDassert(entry_ptr->is_pinned);
+
+ HDassert(FALSE); /* see comment above */
+
+ restart_scan = FALSE;
+
+ entry_ptr = cache_ptr->pel_head_ptr;
+
+ /* we don't keeps stats for pinned entry list scan
+ * restarts. If this code ever becomes reachable,
+ * define the necessary field, and implement the
+ * the following macro:
+ *
+ * H5C__UPDATE_STATS_FOR_PEL_SCAN_RESTART(cache_ptr)
+ */
+ } /* end if */
+ } /* end while ( ( entry_ptr != NULL ) &&
+ * ( ( entries_flushed > entries_to_flush ) ||
+ * ( entries_cleared > entries_to_clear ) ) )
+ */
+ } /* end while ( ( ( entries_flushed > entries_to_flush ) ||
+ * ( entries_cleared > entries_to_clear ) ) &&
+ * ( progress ) )
+ */
+
+#if H5C_DO_SANITY_CHECKS
+ HDassert(init_index_len == cache_ptr->index_len);
+#endif /* H5C_DO_SANITY_CHECKS */
+
+ if(entries_flushed != entries_to_flush || entries_cleared != entries_to_clear) {
+ entry_ptr = cache_ptr->il_head;
+ while(entry_ptr != NULL) {
+ HDassert(!entry_ptr->clear_on_unprotect || (entry_ptr->ring > ring));
+ HDassert(!entry_ptr->flush_immediately || (entry_ptr->ring > ring));
+ entry_ptr = entry_ptr->il_next;
+ } /* end while */
+
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "can't flush/clear all entries")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C__flush_candidates_in_ring() */
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Cpkg.h b/src/H5Cpkg.h
index fae4d74..fdb14a5 100644
--- a/src/H5Cpkg.h
+++ b/src/H5Cpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -45,6 +43,9 @@
/* Package Private Macros */
/**************************/
+/* Number of epoch markers active */
+#define H5C__MAX_EPOCH_MARKERS 10
+
/* Cache configuration settings */
#define H5C__HASH_TABLE_LEN (64 * 1024) /* must be a power of 2 */
#define H5C__H5C_T_MAGIC 0x005CAC0E
@@ -52,12 +53,6 @@
/* Initial allocated size of the "flush_dep_parent" array */
#define H5C_FLUSH_DEP_PARENT_INIT 8
-/* Cache client ID for epoch markers */
-/* Note that H5C__MAX_EPOCH_MARKERS is defined in H5Cprivate.h, not here because
- * it is needed to dimension arrays in H5C_t.
- */
-#define H5C__EPOCH_MARKER_TYPE H5C__MAX_NUM_TYPE_IDS
-
/****************************************************************************
*
* We maintain doubly linked lists of instances of H5C_cache_entry_t for a
@@ -153,6 +148,13 @@
*
* JRM - 9/8/05
*
+ * - Added macros supporting the index list -- a doubly liked list of
+ * all entries in the index. This list is necessary to reduce the
+ * cost of visiting all entries in the cache, which was previously
+ * done via a scan of the hash table.
+ *
+ * JRM - 10/15/15
+ *
****************************************************************************/
#if H5C_DO_SANITY_CHECKS
@@ -205,7 +207,6 @@ if ( ( (entry_ptr) == NULL ) || \
( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (head_ptr) != (tail_ptr) ) \
) || \
- ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (head_ptr) != (tail_ptr) ) || \
( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
@@ -371,7 +372,6 @@ if ( ( (entry_ptr) == NULL ) || \
( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (hd_ptr) != (tail_ptr) ) \
) || \
- ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
@@ -459,6 +459,128 @@ if ( ( (entry_ptr) == NULL ) || \
} \
} /* H5C__AUX_DLL_REMOVE() */
+#if H5C_DO_SANITY_CHECKS
+
+#define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
+if ( ( (hd_ptr) == NULL ) || \
+ ( (tail_ptr) == NULL ) || \
+ ( (entry_ptr) == NULL ) || \
+ ( (len) <= 0 ) || \
+ ( (Size) < (entry_ptr)->size ) || \
+ ( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
+ ( ( (entry_ptr)->il_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->il_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
+ ( ( (len) == 1 ) && \
+ ( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
+ ( (entry_ptr)->il_next == NULL ) && \
+ ( (entry_ptr)->il_prev == NULL ) && \
+ ( (Size) == (entry_ptr)->size ) \
+ ) \
+ ) \
+ ) \
+ ) { \
+ HDassert(0 && "il DLL pre remove SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "il DLL pre remove SC failed") \
+}
+
+#define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
+if ( ( (entry_ptr) == NULL ) || \
+ ( (entry_ptr)->il_next != NULL ) || \
+ ( (entry_ptr)->il_prev != NULL ) || \
+ ( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (hd_ptr) != (tail_ptr) ) \
+ ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
+ ( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->il_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->il_next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HDassert(0 && "IL DLL pre insert SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL pre insert SC failed") \
+}
+
+#define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
+if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
+ ( (head_ptr) != (tail_ptr) ) \
+ ) || \
+ ( ( (len) == 1 ) && \
+ ( ( (head_ptr) != (tail_ptr) ) || \
+ ( (head_ptr) == NULL ) || ( (head_ptr)->size != (Size) ) \
+ ) \
+ ) || \
+ ( ( (len) >= 1 ) && \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->il_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->il_next != NULL ) \
+ ) \
+ ) \
+ ) { \
+ HDassert(0 && "IL DLL sanity check failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "IL DLL sanity check failed") \
+}
+
+#else /* H5C_DO_SANITY_CHECKS */
+
+#define H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
+#define H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv)
+#define H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv)
+
+#endif /* H5C_DO_SANITY_CHECKS */
+
+
+#define H5C__IL_DLL_APPEND(entry_ptr, head_ptr, tail_ptr, len, Size, fail_val)\
+{ \
+ H5C__IL_DLL_PRE_INSERT_SC(entry_ptr, head_ptr, tail_ptr, len, Size, \
+ fail_val) \
+ if ( (head_ptr) == NULL ) \
+ { \
+ (head_ptr) = (entry_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ else \
+ { \
+ (tail_ptr)->il_next = (entry_ptr); \
+ (entry_ptr)->il_prev = (tail_ptr); \
+ (tail_ptr) = (entry_ptr); \
+ } \
+ (len)++; \
+ (Size) += entry_ptr->size; \
+ H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fail_val) \
+} /* H5C__IL_DLL_APPEND() */
+
+#define H5C__IL_DLL_REMOVE(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
+{ \
+ H5C__IL_DLL_PRE_REMOVE_SC(entry_ptr, head_ptr, tail_ptr, len, Size, fv) \
+ { \
+ if ( (head_ptr) == (entry_ptr) ) \
+ { \
+ (head_ptr) = (entry_ptr)->il_next; \
+ if ( (head_ptr) != NULL ) \
+ (head_ptr)->il_prev = NULL; \
+ } \
+ else \
+ (entry_ptr)->il_prev->il_next = (entry_ptr)->il_next; \
+ if ( (tail_ptr) == (entry_ptr) ) \
+ { \
+ (tail_ptr) = (entry_ptr)->il_prev; \
+ if ( (tail_ptr) != NULL ) \
+ (tail_ptr)->il_next = NULL; \
+ } \
+ else \
+ (entry_ptr)->il_next->il_prev = (entry_ptr)->il_prev; \
+ entry_ptr->il_next = NULL; \
+ entry_ptr->il_prev = NULL; \
+ (len)--; \
+ (Size) -= entry_ptr->size; \
+ } \
+ H5C__IL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
+} /* H5C__IL_DLL_REMOVE() */
+
/***********************************************************************
*
@@ -471,23 +593,6 @@ if ( ( (entry_ptr) == NULL ) || \
* H5C__UPDATE_CACHE_HIT_RATE_STATS(), which is always active as
* the cache hit rate stats are always collected and available.
*
- * Changes:
- *
- * JRM -- 3/21/06
- * Added / updated macros for pinned entry related stats.
- *
- * JRM -- 8/9/06
- * More pinned entry stats related updates.
- *
- * JRM -- 3/31/07
- * Updated H5C__UPDATE_STATS_FOR_PROTECT() to keep stats on
- * read and write protects.
- *
- * MAM -- 1/15/09
- * Created H5C__UPDATE_MAX_INDEX_SIZE_STATS to contain
- * common code within macros that update the maximum
- * index, clean_index, and dirty_index statistics fields.
- *
***********************************************************************/
#define H5C__UPDATE_CACHE_HIT_RATE_STATS(cache_ptr, hit) \
@@ -528,7 +633,8 @@ if ( ( (entry_ptr) == NULL ) || \
((cache_ptr)->cache_flush_moves[(entry_ptr)->type->id])++; \
if ( entry_ptr->flush_in_progress ) \
((cache_ptr)->entry_flush_moves[(entry_ptr)->type->id])++; \
- (((cache_ptr)->moves)[(entry_ptr)->type->id])++;
+ (((cache_ptr)->moves)[(entry_ptr)->type->id])++; \
+ (cache_ptr)->entries_relocated_counter++;
#define H5C__UPDATE_STATS_FOR_ENTRY_SIZE_CHANGE(cache_ptr, entry_ptr, new_size)\
if ( cache_ptr->flush_in_progress ) \
@@ -570,8 +676,40 @@ if ( ( (entry_ptr) == NULL ) || \
#define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr) \
((cache_ptr)->LRU_scan_restarts)++;
-#define H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr) \
- ((cache_ptr)->hash_bucket_scan_restarts)++;
+#define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr) \
+ ((cache_ptr)->index_scan_restarts)++;
+
+#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr) \
+{ \
+ (cache_ptr)->images_created++; \
+}
+
+#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr) \
+{ \
+ /* make sure image len is still good */ \
+ HDassert((cache_ptr)->image_len > 0); \
+ (cache_ptr)->images_read++; \
+}
+
+#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr) \
+{ \
+ /* make sure image len is still good */ \
+ HDassert((cache_ptr)->image_len > 0); \
+ (cache_ptr)->images_loaded++; \
+ (cache_ptr)->last_image_size = (cache_ptr)->image_len; \
+}
+
+#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty) \
+{ \
+ (cache_ptr)->prefetches++; \
+ if ( dirty ) \
+ (cache_ptr)->dirty_prefetches++; \
+}
+
+#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr) \
+{ \
+ (cache_ptr)->prefetch_hits++; \
+}
#if H5C_COLLECT_CACHE_ENTRY_STATS
@@ -654,6 +792,7 @@ if ( ( (entry_ptr) == NULL ) || \
((cache_ptr)->max_size)[(entry_ptr)->type->id] ) \
((cache_ptr)->max_size)[(entry_ptr)->type->id] \
= (entry_ptr)->size; \
+ cache_ptr->entries_inserted_counter++; \
}
#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
@@ -738,6 +877,7 @@ if ( ( (entry_ptr) == NULL ) || \
(cache_ptr)->max_slist_len = (cache_ptr)->slist_len; \
if ( (cache_ptr)->slist_size > (cache_ptr)->max_slist_size ) \
(cache_ptr)->max_slist_size = (cache_ptr)->slist_size; \
+ cache_ptr->entries_inserted_counter++; \
}
#define H5C__UPDATE_STATS_FOR_PROTECT(cache_ptr, entry_ptr, hit) \
@@ -794,7 +934,12 @@ if ( ( (entry_ptr) == NULL ) || \
#define H5C__UPDATE_STATS_FOR_UNPIN(cache_ptr, entry_ptr)
#define H5C__UPDATE_STATS_FOR_SLIST_SCAN_RESTART(cache_ptr)
#define H5C__UPDATE_STATS_FOR_LRU_SCAN_RESTART(cache_ptr)
-#define H5C__UPDATE_STATS_FOR_HASH_BUCKET_SCAN_RESTART(cache_ptr)
+#define H5C__UPDATE_STATS_FOR_INDEX_SCAN_RESTART(cache_ptr)
+#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_CREATE(cache_ptr)
+#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_READ(cache_ptr)
+#define H5C__UPDATE_STATS_FOR_CACHE_IMAGE_LOAD(cache_ptr)
+#define H5C__UPDATE_STATS_FOR_PREFETCH(cache_ptr, dirty)
+#define H5C__UPDATE_STATS_FOR_PREFETCH_HIT(cache_ptr)
#endif /* H5C_COLLECT_CACHE_STATS */
@@ -823,6 +968,14 @@ if ( ( (entry_ptr) == NULL ) || \
*
* JRM -- 9/1/15
*
+ * - Updated existing index macros and sanity checks macros to
+ * maintain an doubly linked list of all entries in the index.
+ * This is necessary to reduce the computational cost of visiting
+ * all entries in the index, which used to be done by scanning
+ * the hash table.
+ *
+ * JRM -- 10/15/15
+ *
***********************************************************************/
/* H5C__HASH_TABLE_LEN is defined in H5Cpkg.h. It mut be a power of two. */
@@ -856,13 +1009,14 @@ if ( ( (cache_ptr) == NULL ) || \
(cache_ptr)->index_size ) || \
( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
- "Pre HT insert SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT insert SC failed") \
}
-#define H5C__POST_HT_INSERT_SC(cache_ptr, fail_val) \
+#define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
( (cache_ptr)->index_size != \
@@ -877,10 +1031,11 @@ if ( ( (cache_ptr) == NULL ) || \
(cache_ptr)->index_size ) || \
( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
- "Post HT insert SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT insert SC failed") \
}
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
@@ -917,9 +1072,11 @@ if ( ( (cache_ptr) == NULL ) || \
(cache_ptr)->index_size ) || \
( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Pre HT remove SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT remove SC failed") \
}
#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
@@ -941,9 +1098,11 @@ if ( ( (cache_ptr) == NULL ) || \
(cache_ptr)->index_size ) || \
( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "Post HT remove SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT remove SC failed") \
}
/* (Keep in sync w/H5C_TEST__PRE_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
@@ -955,11 +1114,11 @@ if ( ( (cache_ptr) == NULL ) || \
( ! H5F_addr_defined(Addr) ) || \
( H5C__HASH_FCN(Addr) < 0 ) || \
( H5C__HASH_FCN(Addr) >= H5C__HASH_TABLE_LEN ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "Pre HT search SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "pre HT search SC failed") \
}
/* (Keep in sync w/H5C_TEST__POST_SUC_HT_SEARCH_SC macro in test/cache_common.h -QAK) */
-#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
+#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
( (cache_ptr)->index_len < 1 ) || \
@@ -967,7 +1126,6 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_size < (entry_ptr)->size ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
- ( H5F_addr_ne((entry_ptr)->addr, (Addr)) ) || \
( (entry_ptr)->size <= 0 ) || \
( ((cache_ptr)->index)[k] == NULL ) || \
( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \
@@ -978,8 +1136,7 @@ if ( ( (cache_ptr) == NULL ) || \
( (entry_ptr)->ht_prev->ht_next != (entry_ptr) ) ) || \
( ( (entry_ptr)->ht_next != NULL ) && \
( (entry_ptr)->ht_next->ht_prev != (entry_ptr) ) ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
- "Post successful HT search SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post successful HT search SC failed") \
}
/* (Keep in sync w/H5C_TEST__POST_HT_SHIFT_TO_FRONT macro in test/cache_common.h -QAK) */
@@ -987,8 +1144,7 @@ if ( ( (cache_ptr) == NULL ) || \
if ( ( (cache_ptr) == NULL ) || \
( ((cache_ptr)->index)[k] != (entry_ptr) ) || \
( (entry_ptr)->ht_prev != NULL ) ) { \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, \
- "Post HT shift to front SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, fail_val, "post HT shift to front SC failed") \
}
#define H5C__PRE_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
@@ -1019,10 +1175,11 @@ if ( ( (cache_ptr) == NULL ) || \
(cache_ptr)->index_size ) || \
( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Pre HT entry size change SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT entry size change SC failed") \
}
#define H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
@@ -1048,10 +1205,11 @@ if ( ( (cache_ptr) == NULL ) || \
(cache_ptr)->index_size ) || \
( (cache_ptr)->index_ring_size[(entry_ptr)->ring] != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
- (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
+ (cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) || \
+ ( (cache_ptr)->index_len != (cache_ptr)->il_len ) || \
+ ( (cache_ptr)->index_size != (cache_ptr)->il_size ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Post HT entry size change SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT entry size change SC failed") \
}
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
@@ -1078,8 +1236,7 @@ if ( \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Pre HT update for entry clean SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry clean SC failed") \
}
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
@@ -1106,8 +1263,7 @@ if ( \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Pre HT update for entry dirty SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "pre HT update for entry dirty SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr) \
@@ -1123,8 +1279,7 @@ if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Post HT update for entry clean SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry clean SC failed") \
}
#define H5C__POST_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr) \
@@ -1140,18 +1295,17 @@ if ( ( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_ring_size[(entry_ptr)->ring] + \
(cache_ptr)->dirty_index_ring_size[(entry_ptr)->ring]) ) ) { \
HDassert(FALSE); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, \
- "Post HT update for entry dirty SC failed") \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "post HT update for entry dirty SC failed") \
}
#else /* H5C_DO_SANITY_CHECKS */
#define H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
-#define H5C__POST_HT_INSERT_SC(cache_ptr, fail_val)
+#define H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val)
#define H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr)
#define H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr)
#define H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val)
-#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val)
+#define H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val)
#define H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val)
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_CLEAN_SC(cache_ptr, entry_ptr)
#define H5C__PRE_HT_UPDATE_FOR_ENTRY_DIRTY_SC(cache_ptr, entry_ptr)
@@ -1165,71 +1319,77 @@ if ( ( (cache_ptr)->index_size != \
#endif /* H5C_DO_SANITY_CHECKS */
-#define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
-{ \
- int k; \
- H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
- k = H5C__HASH_FCN((entry_ptr)->addr); \
- if(((cache_ptr)->index)[k] != NULL) { \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
- } \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- (cache_ptr)->index_len++; \
- (cache_ptr)->index_size += (entry_ptr)->size; \
- ((cache_ptr)->index_ring_len[entry_ptr->ring])++; \
- ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
- += (entry_ptr)->size; \
- if((entry_ptr)->is_dirty) { \
- (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
- ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
- += (entry_ptr)->size; \
- } else { \
- (cache_ptr)->clean_index_size += (entry_ptr)->size; \
- ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
- += (entry_ptr)->size; \
- } \
- if ((entry_ptr)->flush_me_last) { \
- (cache_ptr)->num_last_entries++; \
- HDassert((cache_ptr)->num_last_entries <= 2); \
- } \
- H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
- H5C__POST_HT_INSERT_SC(cache_ptr, fail_val) \
+#define H5C__INSERT_IN_INDEX(cache_ptr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ H5C__PRE_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
+ k = H5C__HASH_FCN((entry_ptr)->addr); \
+ if(((cache_ptr)->index)[k] != NULL) { \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr); \
+ } \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ (cache_ptr)->index_len++; \
+ (cache_ptr)->index_size += (entry_ptr)->size; \
+ ((cache_ptr)->index_ring_len[entry_ptr->ring])++; \
+ ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ if((entry_ptr)->is_dirty) { \
+ (cache_ptr)->dirty_index_size += (entry_ptr)->size; \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ } else { \
+ (cache_ptr)->clean_index_size += (entry_ptr)->size; \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
+ += (entry_ptr)->size; \
+ } \
+ if((entry_ptr)->flush_me_last) { \
+ (cache_ptr)->num_last_entries++; \
+ HDassert((cache_ptr)->num_last_entries <= 2); \
+ } \
+ H5C__IL_DLL_APPEND((entry_ptr), (cache_ptr)->il_head, \
+ (cache_ptr)->il_tail, (cache_ptr)->il_len, \
+ (cache_ptr)->il_size, fail_val) \
+ H5C__UPDATE_STATS_FOR_HT_INSERTION(cache_ptr) \
+ H5C__POST_HT_INSERT_SC(cache_ptr, entry_ptr, fail_val) \
}
-#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr) \
-{ \
- int k; \
- H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
- k = H5C__HASH_FCN((entry_ptr)->addr); \
- if ( (entry_ptr)->ht_next ) \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- if ( (entry_ptr)->ht_prev ) \
- (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- if ( ((cache_ptr)->index)[k] == (entry_ptr) ) \
- ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
- (entry_ptr)->ht_next = NULL; \
- (entry_ptr)->ht_prev = NULL; \
- (cache_ptr)->index_len--; \
- (cache_ptr)->index_size -= (entry_ptr)->size; \
- ((cache_ptr)->index_ring_len[entry_ptr->ring])--; \
- ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
- -= (entry_ptr)->size; \
- if((entry_ptr)->is_dirty) { \
- (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
- ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
- -= (entry_ptr)->size; \
- } else { \
- (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
- ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
- -= (entry_ptr)->size; \
- } \
- if((entry_ptr)->flush_me_last) { \
- (cache_ptr)->num_last_entries--; \
- HDassert((cache_ptr)->num_last_entries <= 1); \
- } \
- H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
- H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+#define H5C__DELETE_FROM_INDEX(cache_ptr, entry_ptr, fail_val) \
+{ \
+ int k; \
+ H5C__PRE_HT_REMOVE_SC(cache_ptr, entry_ptr) \
+ k = H5C__HASH_FCN((entry_ptr)->addr); \
+ if((entry_ptr)->ht_next) \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ if((entry_ptr)->ht_prev) \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ if(((cache_ptr)->index)[k] == (entry_ptr)) \
+ ((cache_ptr)->index)[k] = (entry_ptr)->ht_next; \
+ (entry_ptr)->ht_next = NULL; \
+ (entry_ptr)->ht_prev = NULL; \
+ (cache_ptr)->index_len--; \
+ (cache_ptr)->index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->index_ring_len[entry_ptr->ring])--; \
+ ((cache_ptr)->index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ if((entry_ptr)->is_dirty) { \
+ (cache_ptr)->dirty_index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->dirty_index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ } else { \
+ (cache_ptr)->clean_index_size -= (entry_ptr)->size; \
+ ((cache_ptr)->clean_index_ring_size[entry_ptr->ring]) \
+ -= (entry_ptr)->size; \
+ } \
+ if((entry_ptr)->flush_me_last) { \
+ (cache_ptr)->num_last_entries--; \
+ HDassert((cache_ptr)->num_last_entries <= 1); \
+ } \
+ H5C__IL_DLL_REMOVE((entry_ptr), (cache_ptr)->il_head, \
+ (cache_ptr)->il_tail, (cache_ptr)->il_len, \
+ (cache_ptr)->il_size, fail_val) \
+ H5C__UPDATE_STATS_FOR_HT_DELETION(cache_ptr) \
+ H5C__POST_HT_REMOVE_SC(cache_ptr, entry_ptr) \
}
#define H5C__SEARCH_INDEX(cache_ptr, Addr, entry_ptr, fail_val) \
@@ -1239,51 +1399,51 @@ if ( ( (cache_ptr)->index_size != \
H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
k = H5C__HASH_FCN(Addr); \
entry_ptr = ((cache_ptr)->index)[k]; \
- while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) { \
+ while(entry_ptr) { \
+ if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \
+ H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
+ if(entry_ptr != ((cache_ptr)->index)[k]) { \
+ if((entry_ptr)->ht_next) \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ HDassert((entry_ptr)->ht_prev != NULL); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+ } \
+ break; \
+ } \
(entry_ptr) = (entry_ptr)->ht_next; \
(depth)++; \
} \
- if ( entry_ptr ) { \
- H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
- if ( entry_ptr != ((cache_ptr)->index)[k] ) { \
- if ( (entry_ptr)->ht_next ) \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- HDassert( (entry_ptr)->ht_prev != NULL ); \
- (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_prev = NULL; \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
- } \
- } \
H5C__UPDATE_STATS_FOR_HT_SEARCH(cache_ptr, (entry_ptr != NULL), depth) \
}
#define H5C__SEARCH_INDEX_NO_STATS(cache_ptr, Addr, entry_ptr, fail_val) \
{ \
int k; \
- int depth = 0; \
H5C__PRE_HT_SEARCH_SC(cache_ptr, Addr, fail_val) \
k = H5C__HASH_FCN(Addr); \
entry_ptr = ((cache_ptr)->index)[k]; \
- while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) { \
- (entry_ptr) = (entry_ptr)->ht_next; \
- (depth)++; \
- } \
- if ( entry_ptr ) { \
- H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k, fail_val) \
- if ( entry_ptr != ((cache_ptr)->index)[k] ) { \
- if ( (entry_ptr)->ht_next ) \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
- HDassert( (entry_ptr)->ht_prev != NULL ); \
- (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_prev = NULL; \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+ while(entry_ptr) { \
+ if(H5F_addr_eq(Addr, (entry_ptr)->addr)) { \
+ H5C__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k, fail_val) \
+ if(entry_ptr != ((cache_ptr)->index)[k]) { \
+ if((entry_ptr)->ht_next) \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ HDassert((entry_ptr)->ht_prev != NULL); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ H5C__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k, fail_val) \
+ } \
+ break; \
} \
+ (entry_ptr) = (entry_ptr)->ht_next; \
} \
}
@@ -1320,20 +1480,23 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->index_size += (new_size); \
((cache_ptr)->index_ring_size[entry_ptr->ring]) -= (old_size); \
((cache_ptr)->index_ring_size[entry_ptr->ring]) += (new_size); \
- if ( was_clean ) { \
+ if(was_clean) { \
(cache_ptr)->clean_index_size -= (old_size); \
((cache_ptr)->clean_index_ring_size[entry_ptr->ring])-= (old_size); \
} else { \
(cache_ptr)->dirty_index_size -= (old_size); \
((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])-= (old_size); \
} \
- if ( (entry_ptr)->is_dirty ) { \
+ if((entry_ptr)->is_dirty) { \
(cache_ptr)->dirty_index_size += (new_size); \
((cache_ptr)->dirty_index_ring_size[entry_ptr->ring])+= (new_size); \
} else { \
(cache_ptr)->clean_index_size += (new_size); \
((cache_ptr)->clean_index_ring_size[entry_ptr->ring])+= (new_size); \
} \
+ H5C__DLL_UPDATE_FOR_SIZE_CHANGE((cache_ptr)->il_len, \
+ (cache_ptr)->il_size, \
+ (old_size), (new_size)) \
H5C__POST_HT_ENTRY_SIZE_CHANGE_SC(cache_ptr, old_size, new_size, \
entry_ptr) \
}
@@ -1433,8 +1596,7 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_size ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
- "Can't insert entry in skip list") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
@@ -1469,8 +1631,7 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->slist_size ); \
\
if(H5SL_insert((cache_ptr)->slist_ptr, entry_ptr, &(entry_ptr)->addr) < 0) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), \
- "Can't insert entry in skip list") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, (fail_val), "can't insert entry in skip list") \
\
(entry_ptr)->in_slist = TRUE; \
(cache_ptr)->slist_changed = TRUE; \
@@ -1522,8 +1683,7 @@ if ( ( (cache_ptr)->index_size != \
\
if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
!= (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
- "Can't delete entry from skip list.") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
\
HDassert( (cache_ptr)->slist_len > 0 ); \
if(!(during_flush)) \
@@ -1560,8 +1720,7 @@ if ( ( (cache_ptr)->index_size != \
\
if ( H5SL_remove((cache_ptr)->slist_ptr, &(entry_ptr)->addr) \
!= (entry_ptr) ) \
- HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, \
- "Can't delete entry from skip list.") \
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "can't delete entry from skip list") \
\
HDassert( (cache_ptr)->slist_len > 0 ); \
if(!(during_flush)) \
@@ -2080,6 +2239,120 @@ if ( ( (cache_ptr)->index_size != \
/*-------------------------------------------------------------------------
*
+ * Macro: H5C__UPDATE_RP_FOR_INSERT_APPEND
+ *
+ * Purpose: Update the replacement policy data structures for an
+ * insertion of the specified cache entry.
+ *
+ * Unlike H5C__UPDATE_RP_FOR_INSERTION below, mark the
+ * new entry as the LEAST recently used entry, not the
+ * most recently used.
+ *
+ * For now at least, this macro should only be used in
+ * the reconstruction of the metadata cache from a cache
+ * image block.
+ *
+ * At present, we only support the modified LRU policy, so
+ * this function deals with that case unconditionally. If
+ * we ever support other replacement policies, the function
+ * should switch on the current policy and act accordingly.
+ *
+ * Return: N/A
+ *
+ * Programmer: John Mainzer, 8/15/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
+
+#define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* insert the entry at the tail of the LRU list. */ \
+ \
+ H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* insert the entry at the tail of the clean or dirty LRU list as \
+ * appropriate. \
+ */ \
+ \
+ if ( entry_ptr->is_dirty ) { \
+ H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, (fail_val)) \
+ } else { \
+ H5C__AUX_DLL_APPEND((entry_ptr), (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, (fail_val)) \
+ } \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+}
+
+#else /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+#define H5C__UPDATE_RP_FOR_INSERT_APPEND(cache_ptr, entry_ptr, fail_val) \
+{ \
+ HDassert( (cache_ptr) ); \
+ HDassert( (cache_ptr)->magic == H5C__H5C_T_MAGIC ); \
+ HDassert( (entry_ptr) ); \
+ HDassert( !((entry_ptr)->is_protected) ); \
+ HDassert( !((entry_ptr)->is_read_only) ); \
+ HDassert( ((entry_ptr)->ro_ref_count) == 0 ); \
+ HDassert( (entry_ptr)->size > 0 ); \
+ \
+ if ( (entry_ptr)->is_pinned ) { \
+ \
+ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->pel_head_ptr, \
+ (cache_ptr)->pel_tail_ptr, \
+ (cache_ptr)->pel_len, \
+ (cache_ptr)->pel_size, (fail_val)) \
+ \
+ } else { \
+ \
+ /* modified LRU specific code */ \
+ \
+ /* insert the entry at the tail of the LRU list. */ \
+ \
+ H5C__DLL_APPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
+ \
+ /* End modified LRU specific code. */ \
+ } \
+}
+
+#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
+
+
+/*-------------------------------------------------------------------------
+ *
* Macro: H5C__UPDATE_RP_FOR_INSERTION
*
* Purpose: Update the replacement policy data structures for an
@@ -2278,7 +2551,6 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- HDassert( (cache_ptr)->pel_len >= 0 ); \
\
} else { \
\
@@ -2341,7 +2613,6 @@ if ( ( (cache_ptr)->index_size != \
(cache_ptr)->pel_tail_ptr, \
(cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- HDassert( (cache_ptr)->pel_len >= 0 ); \
\
} else { \
\
@@ -2685,41 +2956,40 @@ if ( ( (cache_ptr)->index_size != \
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
(cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- HDassert( (cache_ptr)->pel_len >= 0 ); \
\
- /* modified LRU specific code */ \
+ /* modified LRU specific code */ \
\
- /* insert the entry at the head of the LRU list. */ \
+ /* insert the entry at the head of the LRU list. */ \
\
- H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
- (cache_ptr)->LRU_tail_ptr, \
- (cache_ptr)->LRU_list_len, \
- (cache_ptr)->LRU_list_size, (fail_val)) \
+ H5C__DLL_PREPEND((entry_ptr), (cache_ptr)->LRU_head_ptr, \
+ (cache_ptr)->LRU_tail_ptr, \
+ (cache_ptr)->LRU_list_len, \
+ (cache_ptr)->LRU_list_size, (fail_val)) \
\
- /* Similarly, insert the entry at the head of either the clean \
- * or dirty LRU list as appropriate. \
- */ \
+ /* Similarly, insert the entry at the head of either the clean \
+ * or dirty LRU list as appropriate. \
+ */ \
\
- if ( (entry_ptr)->is_dirty ) { \
+ if ( (entry_ptr)->is_dirty ) { \
\
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->dLRU_head_ptr, \
- (cache_ptr)->dLRU_tail_ptr, \
- (cache_ptr)->dLRU_list_len, \
- (cache_ptr)->dLRU_list_size, \
- (fail_val)) \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->dLRU_head_ptr, \
+ (cache_ptr)->dLRU_tail_ptr, \
+ (cache_ptr)->dLRU_list_len, \
+ (cache_ptr)->dLRU_list_size, \
+ (fail_val)) \
\
- } else { \
+ } else { \
\
- H5C__AUX_DLL_PREPEND((entry_ptr), \
- (cache_ptr)->cLRU_head_ptr, \
- (cache_ptr)->cLRU_tail_ptr, \
- (cache_ptr)->cLRU_list_len, \
- (cache_ptr)->cLRU_list_size, \
- (fail_val)) \
- } \
+ H5C__AUX_DLL_PREPEND((entry_ptr), \
+ (cache_ptr)->cLRU_head_ptr, \
+ (cache_ptr)->cLRU_tail_ptr, \
+ (cache_ptr)->cLRU_list_len, \
+ (cache_ptr)->cLRU_list_size, \
+ (fail_val)) \
+ } \
\
- /* End modified LRU specific code. */ \
+ /* End modified LRU specific code. */ \
\
} /* H5C__UPDATE_RP_FOR_UNPIN */
@@ -2742,7 +3012,6 @@ if ( ( (cache_ptr)->index_size != \
H5C__DLL_REMOVE((entry_ptr), (cache_ptr)->pel_head_ptr, \
(cache_ptr)->pel_tail_ptr, (cache_ptr)->pel_len, \
(cache_ptr)->pel_size, (fail_val)) \
- HDassert( (cache_ptr)->pel_len >= 0 ); \
\
/* modified LRU specific code */ \
\
@@ -2915,22 +3184,22 @@ if ( ( (hd_ptr) == NULL ) || \
( (len) <= 0 ) || \
( (Size) < (entry_ptr)->size ) || \
( ( (Size) == (entry_ptr)->size ) && ( ! ( (len) == 1 ) ) ) || \
- ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
+ ( ( (entry_ptr)->coll_prev == NULL ) && ( (hd_ptr) != (entry_ptr) ) ) || \
( ( (entry_ptr)->coll_next == NULL ) && ( (tail_ptr) != (entry_ptr) ) ) || \
( ( (len) == 1 ) && \
( ! ( ( (hd_ptr) == (entry_ptr) ) && ( (tail_ptr) == (entry_ptr) ) && \
- ( (entry_ptr)->coll_next == NULL ) && \
- ( (entry_ptr)->coll_prev == NULL ) && \
+ ( (entry_ptr)->coll_next == NULL ) && \
+ ( (entry_ptr)->coll_prev == NULL ) && \
( (Size) == (entry_ptr)->size ) \
) \
) \
) \
) { \
- HDassert(0 && "coll DLL pre remove SC failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \
+ HDassert(0 && "coll DLL pre remove SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "coll DLL pre remove SC failed") \
}
-#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
+#define H5C__COLL_DLL_SC(head_ptr, tail_ptr, len, Size, fv) \
if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (head_ptr) != (tail_ptr) ) \
) || \
@@ -2942,36 +3211,35 @@ if ( ( ( ( (head_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
) \
) || \
( ( (len) >= 1 ) && \
- ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \
- ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
+ ( ( (head_ptr) == NULL ) || ( (head_ptr)->coll_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
) \
) \
) { \
- HDassert(0 && "COLL DLL sanity check failed"); \
+ HDassert(0 && "COLL DLL sanity check failed"); \
HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL sanity check failed") \
}
#define H5C__COLL_DLL_PRE_INSERT_SC(entry_ptr, hd_ptr, tail_ptr, len, Size, fv) \
if ( ( (entry_ptr) == NULL ) || \
- ( (entry_ptr)->coll_next != NULL ) || \
- ( (entry_ptr)->coll_prev != NULL ) || \
+ ( (entry_ptr)->coll_next != NULL ) || \
+ ( (entry_ptr)->coll_prev != NULL ) || \
( ( ( (hd_ptr) == NULL ) || ( (tail_ptr) == NULL ) ) && \
( (hd_ptr) != (tail_ptr) ) \
) || \
- ( (len) < 0 ) || \
( ( (len) == 1 ) && \
( ( (hd_ptr) != (tail_ptr) ) || ( (Size) <= 0 ) || \
( (hd_ptr) == NULL ) || ( (hd_ptr)->size != (Size) ) \
) \
) || \
( ( (len) >= 1 ) && \
- ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \
- ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
+ ( ( (hd_ptr) == NULL ) || ( (hd_ptr)->coll_prev != NULL ) || \
+ ( (tail_ptr) == NULL ) || ( (tail_ptr)->coll_next != NULL ) \
) \
) \
) { \
- HDassert(0 && "COLL DLL pre insert SC failed"); \
- HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \
+ HDassert(0 && "COLL DLL pre insert SC failed"); \
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, (fv), "COLL DLL pre insert SC failed") \
}
#else /* H5C_DO_SANITY_CHECKS */
@@ -3274,10 +3542,8 @@ typedef struct H5C_tag_info_t {
* types are stored in the type_name_table discussed below, and
* indexed by the ids.
*
- * type_name_table_ptr: Pointer to an array of pointer to char of length
- * max_type_id + 1. The strings pointed to by the entries
- * in the array are the names of the entry types associated
- * with the indexing type IDs.
+ * class_table_ptr: Pointer to an array of H5C_class_t of length
+ * max_type_id + 1. Entry classes for the cache.
*
* max_cache_size: Nominal maximum number of bytes that may be stored in the
* cache. This value should be viewed as a soft limit, as the
@@ -3309,6 +3575,9 @@ typedef struct H5C_tag_info_t {
* clean data so as to avoid case b) above. Again, this is
* a soft limit.
*
+ * close_warning_received: Boolean flag indicating that a file closing
+ * warning has been received.
+ *
*
* In addition to the call back functions required for each entry, the
* cache requires the following call back functions for this instance of
@@ -3345,6 +3614,17 @@ typedef struct H5C_tag_info_t {
* The cache requires an index to facilitate searching for entries. The
* following fields support that index.
*
+ * Addendum: JRM -- 10/14/15
+ *
+ * We sometimes need to visit all entries in the cache. In the past, this
+ * was done by scanning the hash table. However, this is expensive, and
+ * we have come to scan the hash table often enough that it has become a
+ * performance issue. To repair this, I have added code to maintain a
+ * list of all entries in the index -- call this list the index list.
+ *
+ * The index list is maintained by the same macros that maintain the
+ * index, and must have the same length and size as the index proper.
+ *
* index_len: Number of entries currently in the hash table used to index
* the cache.
*
@@ -3412,6 +3692,36 @@ typedef struct H5C_tag_info_t {
* changing the H5C__HASH_FCN macro and the deletion of the
* H5C__HASH_MASK #define. No other changes should be required.
*
+ * il_len: Number of entries on the index list.
+ *
+ * This must always be equal to index_len. As such, this
+ * field is redundant. However, the existing linked list
+ * management macros expect to maintain a length field, so
+ * this field exists primarily to avoid adding complexity to
+ * these macros.
+ *
+ * il_size: Number of bytes of cache entries currently stored in the
+ * index list.
+ *
+ * This must always be equal to index_size. As such, this
+ * field is redundant. However, the existing linked list
+ * management macros expect to maintain a size field, so
+ * this field exists primarily to avoid adding complexity to
+ * these macros.
+ *
+ * il_head: Pointer to the head of the doubly linked list of entries in
+ * the index list. Note that cache entries on this list are
+ * linked by their il_next and il_prev fields.
+ *
+ * This field is NULL if the index is empty.
+ *
+ * il_tail: Pointer to the tail of the doubly linked list of entries in
+ * the index list. Note that cache entries on this list are
+ * linked by their il_next and il_prev fields.
+ *
+ * This field is NULL if the index is empty.
+ *
+ *
* With the addition of the take ownership flag, it is possible that
* an entry may be removed from the cache as the result of the flush of
* a second entry. In general, this causes little trouble, but it is
@@ -3783,7 +4093,39 @@ typedef struct H5C_tag_info_t {
*
* size_decreased: Boolean flag set to TRUE whenever the maximum cache
* size is decreased. The flag triggers a call to
- * H5C_make_space_in_cache() on the next call to H5C_protect().
+ * H5C__make_space_in_cache() on the next call to H5C_protect().
+ *
+ * resize_in_progress: As the metadata cache has become re-entrant, it is
+ * possible that a protect may trigger a call to
+ * H5C__auto_adjust_cache_size(), which may trigger a flush,
+ * which may trigger a protect, which will result in another
+ * call to H5C__auto_adjust_cache_size().
+ *
+ * The resize_in_progress boolean flag is used to detect this,
+ * and to prevent the infinite recursion that would otherwise
+ * occur.
+ *
+ * Note that this issue is not hypothetical -- this field
+ * was added 12/29/15 to fix a bug exposed in the testing
+ * of changes to the file driver info superblock extension
+ * management code needed to support rings.
+ *
+ * msic_in_progress: As the metadata cache has become re-entrant, and as
+ * the free space manager code has become more tightly
+ * integrated with the metadata cache, it is possible that
+ * a call to H5C_insert_entry() may trigger a call to
+ * H5C_make_space_in_cache(), which, via H5C__flush_single_entry()
+ * and client callbacks, may trigger an infinite regression
+ * of calls to H5C_make_space_in_cache().
+ *
+ * The msic_in_progress boolean flag is used to detect this,
+ * and prevent the infinite regression that would otherwise
+ * occur.
+ *
+ * Note that this is issue is not hypothetical -- this field
+ * was added 2/16/17 to address this issue when it was
+ * exposed by modifications to test/fheap.c to cause it to
+ * use paged allocation.
*
* resize_ctl: Instance of H5C_auto_size_ctl_t containing configuration
* data for automatic cache resizing.
@@ -3860,6 +4202,146 @@ typedef struct H5C_tag_info_t {
* this field will be reset every automatic resize epoch.
*
*
+ * Metadata cache image management related fields.
+ *
+ * image_ctl: Instance of H5C_cache_image_ctl_t containing configuration
+ * data for generation of a cache image on file close.
+ *
+ * serialization_in_progress: Boolean field that is set to TRUE iff
+ * the cache is in the process of being serialized. This
+ * field is needed to support the H5C_serialization_in_progress()
+ * call, which is in turn required for sanity checks in some
+ * cache clients.
+ *
+ * load_image: Boolean flag indicating that the metadata cache image
+ * superblock extension message exists and should be
+ * read, and the image block read and decoded on the next
+ * call to H5C_protect().
+ *
+ * image_loaded: Boolean flag indicating that the metadata cache has
+ * loaded the metadata cache image as directed by the
+ * MDC cache image superblock extension message.
+ *
+ * delete_image: Boolean flag indicating whether the metadata cache image
+ * superblock message should be deleted and the cache image
+ * file space freed after they have been read and decoded.
+ *
+ * This flag should be set to TRUE iff the file is opened
+ * R/W and there is a cache image to be read.
+ *
+ * image_addr: haddr_t containing the base address of the on disk
+ * metadata cache image, or HADDR_UNDEF if that value is
+ * undefined. Note that this field is used both in the
+ * construction and write, and the read and decode of
+ * metadata cache image blocks.
+ *
+ * image_len: hsize_t containing the size of the on disk metadata cache
+ * image, or zero if that value is undefined. Note that this
+ * field is used both in the construction and write, and the
+ * read and decode of metadata cache image blocks.
+ *
+ * image_data_len: size_t containing the number of bytes of data in the
+ * on disk metadata cache image, or zero if that value is
+ * undefined.
+ *
+ * In most cases, this value is the same as the image_len
+ * above. It exists to allow for metadata cache image blocks
+ * that are larger than the actual image. Thus in all
+ * cases image_data_len <= image_len.
+ *
+ * To create the metadata cache image, we must first serialize all the
+ * entries in the metadata cache. This is done by a scan of the index.
+ * As entries must be serialized in increasing flush dependency height
+ * order, we scan the index repeatedly, once for each flush dependency
+ * height in increasing order.
+ *
+ * This operation is complicated by the fact that entries other the the
+ * target may be inserted, loaded, relocated, or removed from the cache
+ * (either by eviction or the take ownership flag) as the result of a
+ * pre_serialize or serialize callback. While entry removals are not
+ * a problem for the scan of the index, insertions, loads, and relocations
+ * are. Hence the entries loaded, inserted, and relocated counters
+ * listed below have been implemented to allow these conditions to be
+ * detected and dealt with by restarting the scan.
+ *
+ * The serialization operation is further complicated by the fact that
+ * the flush dependency height of a given entry may increase (as the
+ * result of an entry load or insert) or decrease (as the result of an
+ * entry removal -- via either eviction or the take ownership flag). The
+ * entry_fd_height_change_counter field is maintained to allow detection
+ * of this condition, and a restart of the scan when it occurs.
+ *
+ * Note that all these new fields would work just as well as booleans.
+ *
+ * entries_loaded_counter: Number of entries loaded into the cache
+ * since the last time this field was reset.
+ *
+ * entries_inserted_counter: Number of entries inserted into the cache
+ * since the last time this field was reset.
+ *
+ * entries relocated_counter: Number of entries whose base address has
+ * been changed since the last time this field was reset.
+ *
+ * entry_fd_height_change_counter: Number of entries whose flush dependency
+ * height has changed since the last time this field was reset.
+ *
+ * The following fields are used assemble the cache image prior to
+ * writing it to disk.
+ *
+ * num_entries_in_image: Unsigned integer field containing the number of entries
+ * to be copied into the metadata cache image. Note that
+ * this value will be less than the number of entries in
+ * the cache, and the superblock and its related entries
+ * are not written to the metadata cache image.
+ *
+ * image_entries: Pointer to a dynamically allocated array of instance of
+ * H5C_image_entry_t of length num_entries_in_image, or NULL
+ * if that array does not exist. This array is used to
+ * assemble entry data to be included in the image, and to
+ * sort them by flush dependency height and LRU rank.
+ *
+ * image_buffer: Pointer to the dynamically allocated buffer of length
+ * image_len in which the metadata cache image is assembled,
+ * or NULL if that buffer does not exist.
+ *
+ *
+ * Free Space Manager Related fields:
+ *
+ * The free space managers must be informed when we are about to close
+ * or flush the file so that they order themselves accordingly. This used
+ * to be done much later in the close process, but with cache image and
+ * page buffering, this is no longer viable, as we must finalize the on
+ * disk image of all metadata much sooner.
+ *
+ * This is handled by the H5MF_settle_raw_data_fsm() and
+ * H5MF_settle_meta_data_FSM() routines. As these calls are expensive,
+ * the following fields are used to track whether the target free space
+ * managers are clean.
+ *
+ * They are also used in sanity checking, as once a free space manager is
+ * settled, it should not become unsettled (i.e. be asked to allocate or
+ * free file space) either ever (in the case of a file close) or until the
+ * flush is complete.
+ *
+ * rdfsm_settled: Boolean flag indicating whether the raw data free space
+ * manager is settled -- i.e. whether the correct space has
+ * been allocated for it in the file.
+ *
+ * Note that the name of this field is deceptive. In the
+ * multi file case, the flag applies to all free space
+ * managers that are not involved in allocating space for
+ * free space manager metadata.
+ *
+ * mdfsm_settled: Boolean flag indicating whether the meta data free space
+ * manager is settled -- i.e. whether the correct space has
+ * been allocated for it in the file.
+ *
+ * Note that the name of this field is deceptive. In the
+ * multi file case, the flag applies only to free space
+ * managers that are involved in allocating space for free
+ * space managers.
+ *
+ *
* Statistics collection fields:
*
* When enabled, these fields are used to collect statistics as described
@@ -4036,23 +4518,85 @@ typedef struct H5C_tag_info_t {
* max_pel_size: Largest value attained by the pel_size field in the
* current epoch.
*
- * calls_to_msic: Total number of calls to H5C_make_space_in_cache
+ * calls_to_msic: Total number of calls to H5C__make_space_in_cache
*
* total_entries_skipped_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C_make_space_in_cache().
+ * enforcing the min_clean_fraction in H5C__make_space_in_cache().
+ *
+ * total_dirty_pf_entries_skipped_in_msic: Number of dirty prefetched entries
+ * skipped in H5C__make_space_in_cache(). Note that this can
+ * only occur when a file is opened R/O with a cache image
+ * containing dirty entries.
*
* total_entries_scanned_in_msic: Number of clean entries skipped while
- * enforcing the min_clean_fraction in H5C_make_space_in_cache().
+ * enforcing the min_clean_fraction in H5C__make_space_in_cache().
*
* max_entries_skipped_in_msic: Maximum number of clean entries skipped
- * in any one call to H5C_make_space_in_cache().
+ * in any one call to H5C__make_space_in_cache().
+ *
+ * max_dirty_pf_entries_skipped_in_msic: Maximum number of dirty prefetched
+ * entries skipped in any one call to H5C__make_space_in_cache().
+ * Note that this can only occur when the file is opened
+ * R/O with a cache image containing dirty entries.
*
* max_entries_scanned_in_msic: Maximum number of entries scanned over
- * in any one call to H5C_make_space_in_cache().
+ * in any one call to H5C__make_space_in_cache().
*
* entries_scanned_to_make_space: Number of entries scanned only when looking
* for entries to evict in order to make space in cache.
*
+ *
+ * The following fields track statistics on cache images.
+ *
+ * images_created: Integer field containing the number of cache images
+ * created since the last time statistics were reset.
+ *
+ * At present, this field must always be either 0 or 1.
+ * Further, since cache images are only created at file
+ * close, this field should only be set at that time.
+ *
+ * images_read: Integer field containing the number of cache images
+ * read from file. Note that reading an image is different
+ * from loading it -- reading the image means just that,
+ * while loading the image refers to decoding it and loading
+ * it into the metadata cache.
+ *
+ * In the serial case, image_read should always equal
+ * images_loaded. However, in the parallel case, the
+ * image should only be read by process 0. All other
+ * processes should receive the cache image via a broadcast
+ * from process 0.
+ *
+ * images_loaded: Integer field containing the number of cache images
+ * loaded since the last time statistics were reset.
+ *
+ * At present, this field must always be either 0 or 1.
+ * Further, since cache images are only loaded at the
+ * time of the first protect or on file close, this value
+ * should only change on those events.
+ *
+ * last_image_size: Size of the most recently loaded metadata cache image
+ * loaded into the cache, or zero if no image has been
+ * loaded.
+ *
+ * At present, at most one cache image can be loaded into
+ * the metadata cache for any given file, and this image
+ * will be loaded either on the first protect, or on file
+ * close if no entry is protected before then.
+ *
+ *
+ * Fields for tracking prefetched entries. Note that flushes and evictions
+ * of prefetched entries are tracked in the flushes and evictions arrays
+ * discused above.
+ *
+ * prefetches: Number of prefetched entries that are loaded to the
+ * cache.
+ *
+ * dirty_prefetches: Number of dirty prefetched entries that are loaded
+ * into the cache.
+ *
+ * prefetch_hits: Number of prefetched entries that are actually used.
+ *
*
* As entries are now capable of moving, loading, dirtying, and deleting
* other entries in their pre_serialize and serialize callbacks, it has
@@ -4074,10 +4618,14 @@ typedef struct H5C_tag_info_t {
* avoid potential issues with change of status of the next
* entry in the scan.
*
- * hash_bucket_scan_restarts: Number of times a scan of a hash bucket list
- * (that contains calls to H5C__flush_single_entry()) has been
- * restarted to avoid potential issues with change of status
- * of the next entry in the scan.
+ * index_scan_restarts: Number of times a scan of the index has been
+ * restarted to avoid potential issues with load, insertion
+ * or change in flush dependency height of an entry other
+ * than the target entry as the result of call(s) to the
+ * pre_serialize or serialize callbacks.
+ *
+ * Note that at present, this condition can only be triggerd
+ * by a call to H5C_serialize_single_entry().
*
* The remaining stats are collected only when both H5C_COLLECT_CACHE_STATS
* and H5C_COLLECT_CACHE_ENTRY_STATS are true.
@@ -4119,6 +4667,11 @@ typedef struct H5C_tag_info_t {
* field is intended to allow marking of output of with
* the processes mpi rank.
*
+ * get_entry_ptr_from_addr_counter: Counter used to track the number of
+ * times the H5C_get_entry_ptr_from_addr() function has been
+ * called successfully. This field is only defined when
+ * NDEBUG is not #defined.
+ *
****************************************************************************/
struct H5C_t {
uint32_t magic;
@@ -4129,24 +4682,29 @@ struct H5C_t {
FILE * log_file_ptr;
void * aux_ptr;
int32_t max_type_id;
- const char * (* type_name_table_ptr);
+ const H5C_class_t * const *class_table_ptr;
size_t max_cache_size;
size_t min_clean_size;
H5C_write_permitted_func_t check_write_permitted;
hbool_t write_permitted;
H5C_log_flush_func_t log_flush;
hbool_t evictions_enabled;
+ hbool_t close_warning_received;
/* Fields for maintaining [hash table] index of entries */
- int32_t index_len;
+ uint32_t index_len;
size_t index_size;
- int32_t index_ring_len[H5C_RING_NTYPES];
+ uint32_t index_ring_len[H5C_RING_NTYPES];
size_t index_ring_size[H5C_RING_NTYPES];
size_t clean_index_size;
size_t clean_index_ring_size[H5C_RING_NTYPES];
size_t dirty_index_size;
size_t dirty_index_ring_size[H5C_RING_NTYPES];
- H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN];
+ H5C_cache_entry_t * index[H5C__HASH_TABLE_LEN];
+ uint32_t il_len;
+ size_t il_size;
+ H5C_cache_entry_t * il_head;
+ H5C_cache_entry_t * il_tail;
/* Fields to detect entries removed during scans */
int64_t entries_removed_counter;
@@ -4155,15 +4713,15 @@ struct H5C_t {
/* Fields for maintaining list of in-order entries, for flushing */
hbool_t slist_changed;
- int32_t slist_len;
+ uint32_t slist_len;
size_t slist_size;
- int32_t slist_ring_len[H5C_RING_NTYPES];
+ uint32_t slist_ring_len[H5C_RING_NTYPES];
size_t slist_ring_size[H5C_RING_NTYPES];
H5SL_t * slist_ptr;
- int32_t num_last_entries;
+ uint32_t num_last_entries;
#if H5C_DO_SANITY_CHECKS
- int64_t slist_len_increase;
- int64_t slist_size_increase;
+ int32_t slist_len_increase;
+ ssize_t slist_size_increase;
#endif /* H5C_DO_SANITY_CHECKS */
/* Fields for maintaining list of tagged entries */
@@ -4171,38 +4729,38 @@ struct H5C_t {
hbool_t ignore_tags;
/* Fields for tracking protected entries */
- int32_t pl_len;
+ uint32_t pl_len;
size_t pl_size;
H5C_cache_entry_t * pl_head_ptr;
H5C_cache_entry_t * pl_tail_ptr;
/* Fields for tracking pinned entries */
- int32_t pel_len;
+ uint32_t pel_len;
size_t pel_size;
H5C_cache_entry_t * pel_head_ptr;
H5C_cache_entry_t * pel_tail_ptr;
/* Fields for complete LRU list of entries */
- int32_t LRU_list_len;
+ uint32_t LRU_list_len;
size_t LRU_list_size;
H5C_cache_entry_t * LRU_head_ptr;
H5C_cache_entry_t * LRU_tail_ptr;
/* Fields for clean LRU list of entries */
- int32_t cLRU_list_len;
+ uint32_t cLRU_list_len;
size_t cLRU_list_size;
H5C_cache_entry_t * cLRU_head_ptr;
H5C_cache_entry_t * cLRU_tail_ptr;
/* Fields for dirty LRU list of entries */
- int32_t dLRU_list_len;
+ uint32_t dLRU_list_len;
size_t dLRU_list_size;
H5C_cache_entry_t * dLRU_head_ptr;
H5C_cache_entry_t * dLRU_tail_ptr;
#ifdef H5_HAVE_PARALLEL
/* Fields for collective metadata reads */
- int32_t coll_list_len;
+ uint32_t coll_list_len;
size_t coll_list_size;
H5C_cache_entry_t * coll_head_ptr;
H5C_cache_entry_t * coll_tail_ptr;
@@ -4219,6 +4777,8 @@ struct H5C_t {
hbool_t resize_enabled;
hbool_t cache_full;
hbool_t size_decreased;
+ hbool_t resize_in_progress;
+ hbool_t msic_in_progress;
H5C_auto_size_ctl_t resize_ctl;
/* Fields for epoch markers used in automatic cache size adjustment */
@@ -4234,6 +4794,27 @@ struct H5C_t {
int64_t cache_hits;
int64_t cache_accesses;
+ /* fields supporting generation of a cache image on file close */
+ H5C_cache_image_ctl_t image_ctl;
+ hbool_t serialization_in_progress;
+ hbool_t load_image;
+ hbool_t image_loaded;
+ hbool_t delete_image;
+ haddr_t image_addr;
+ hsize_t image_len;
+ hsize_t image_data_len;
+ int64_t entries_loaded_counter;
+ int64_t entries_inserted_counter;
+ int64_t entries_relocated_counter;
+ int64_t entry_fd_height_change_counter;
+ uint32_t num_entries_in_image;
+ H5C_image_entry_t * image_entries;
+ void * image_buffer;
+
+ /* Free Space Manager Related fields */
+ hbool_t rdfsm_settled;
+ hbool_t mdfsm_settled;
+
#if H5C_COLLECT_CACHE_STATS
/* stats fields */
int64_t hits[H5C__MAX_NUM_TYPE_IDS + 1];
@@ -4267,35 +4848,48 @@ struct H5C_t {
int64_t total_successful_ht_search_depth;
int64_t failed_ht_searches;
int64_t total_failed_ht_search_depth;
- int32_t max_index_len;
+ uint32_t max_index_len;
size_t max_index_size;
size_t max_clean_index_size;
size_t max_dirty_index_size;
/* Fields for in-order skip list */
- int32_t max_slist_len;
+ uint32_t max_slist_len;
size_t max_slist_size;
/* Fields for protected entry list */
- int32_t max_pl_len;
+ uint32_t max_pl_len;
size_t max_pl_size;
/* Fields for pinned entry list */
- int32_t max_pel_len;
+ uint32_t max_pel_len;
size_t max_pel_size;
/* Fields for tracking 'make space in cache' (msic) operations */
int64_t calls_to_msic;
int64_t total_entries_skipped_in_msic;
+ int64_t total_dirty_pf_entries_skipped_in_msic;
int64_t total_entries_scanned_in_msic;
int32_t max_entries_skipped_in_msic;
+ int32_t max_dirty_pf_entries_skipped_in_msic;
int32_t max_entries_scanned_in_msic;
int64_t entries_scanned_to_make_space;
/* Fields for tracking skip list scan restarts */
int64_t slist_scan_restarts;
int64_t LRU_scan_restarts;
- int64_t hash_bucket_scan_restarts;
+ int64_t index_scan_restarts;
+
+ /* Fields for tracking cache image operations */
+ int32_t images_created;
+ int32_t images_read;
+ int32_t images_loaded;
+ hsize_t last_image_size;
+
+ /* Fields for tracking prefetched entries */
+ int64_t prefetches;
+ int64_t dirty_prefetches;
+ int64_t prefetch_hits;
#if H5C_COLLECT_CACHE_ENTRY_STATS
int32_t max_accesses[H5C__MAX_NUM_TYPE_IDS + 1];
@@ -4308,6 +4902,10 @@ struct H5C_t {
#endif /* H5C_COLLECT_CACHE_STATS */
char prefix[H5C__PREFIX_LEN];
+
+#ifndef NDEBUG
+ int64_t get_entry_ptr_from_addr_counter;
+#endif /* NDEBUG */
};
/* Define typedef for tagged cache entry iteration callbacks */
@@ -4318,18 +4916,29 @@ typedef int (*H5C_tag_iter_cb_t)(H5C_cache_entry_t *entry, void *ctx);
/* Package Private Variables */
/*****************************/
-/* Metadata cache epoch class */
-H5_DLLVAR const H5C_class_t H5C__epoch_marker_class;
-
/******************************/
/* Package Private Prototypes */
/******************************/
+H5_DLL herr_t H5C__prep_image_for_file_close(H5F_t *f, hid_t dxpl_id,
+ hbool_t *image_generated);
+H5_DLL herr_t H5C__deserialize_prefetched_entry(H5F_t *f, hid_t dxpl_id,
+ H5C_t * cache_ptr, H5C_cache_entry_t** entry_ptr_ptr,
+ const H5C_class_t * type, haddr_t addr, void * udata);
/* General routines */
-H5_DLL herr_t H5C__flush_single_entry(const H5F_t *f, hid_t dxpl_id,
+H5_DLL herr_t H5C__flush_single_entry(H5F_t *f, hid_t dxpl_id,
H5C_cache_entry_t *entry_ptr, unsigned flags);
+H5_DLL herr_t H5C__generate_cache_image(H5F_t *f, hid_t dxpl_id, H5C_t *cache_ptr);
+H5_DLL herr_t H5C__load_cache_image(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5C__mark_flush_dep_serialized(H5C_cache_entry_t * entry_ptr);
+H5_DLL herr_t H5C__mark_flush_dep_unserialized(H5C_cache_entry_t * entry_ptr);
+H5_DLL herr_t H5C__make_space_in_cache(H5F_t * f, hid_t dxpl_id,
+ size_t space_needed, hbool_t write_permitted);
H5_DLL herr_t H5C__flush_marked_entries(H5F_t * f, hid_t dxpl_id);
+H5_DLL herr_t H5C__generate_image(H5F_t *f, H5C_t *cache_ptr,
+ H5C_cache_entry_t *entry_ptr, hid_t dxpl_id);
+H5_DLL herr_t H5C__serialize_cache(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C__iter_tagged_entries(H5C_t *cache, haddr_t tag, hbool_t match_global,
H5C_tag_iter_cb_t cb, void *cb_ctx);
diff --git a/src/H5Cprefetched.c b/src/H5Cprefetched.c
new file mode 100644
index 0000000..6237d57
--- /dev/null
+++ b/src/H5Cprefetched.c
@@ -0,0 +1,350 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Cprefetched.c
+ * December 28 2016
+ * Quincey Koziol
+ *
+ * Purpose: Metadata cache prefetched entry callbacks.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5ACprivate.h" /* Metadata cache */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5MMprivate.h" /* Memory management */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+/****************************************************************************
+ *
+ * Declarations for prefetched cache entry callbacks.
+ *
+ ****************************************************************************/
+static herr_t H5C__prefetched_entry_get_initial_load_size(void *udata_ptr,
+ size_t *image_len_ptr);
+static herr_t H5C__prefetched_entry_get_final_load_size(const void *image_ptr,
+ size_t image_len, void *udata_ptr, size_t *actual_len_ptr);
+static htri_t H5C__prefetched_entry_verify_chksum(const void *image_ptr,
+ size_t len, void *udata_ptr);
+static void * H5C__prefetched_entry_deserialize(const void *image_ptr,
+ size_t len, void *udata, hbool_t *dirty_ptr);
+static herr_t H5C__prefetched_entry_image_len(const void *thing,
+ size_t *image_len_ptr);
+static herr_t H5C__prefetched_entry_pre_serialize(H5F_t *f,
+ hid_t dxpl_id, void *thing, haddr_t addr, size_t len,
+ haddr_t *new_addr_ptr, size_t *new_len_ptr, unsigned *flags_ptr);
+static herr_t H5C__prefetched_entry_serialize(const H5F_t *f, void *image_ptr,
+ size_t len, void *thing);
+static herr_t H5C__prefetched_entry_notify(H5C_notify_action_t action,
+ void *thing);
+static herr_t H5C__prefetched_entry_free_icr(void *thing);
+static herr_t H5C__prefetched_entry_fsf_size(const void *thing,
+ size_t *fsf_size_ptr);
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/* Declare external the free list for H5C_cache_entry_t's */
+H5FL_EXTERN(H5C_cache_entry_t);
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+const H5AC_class_t H5AC_PREFETCHED_ENTRY[1] = {{
+ /* id = */ H5AC_PREFETCHED_ENTRY_ID,
+ /* name = */ "prefetched entry",
+ /* mem_type = */ H5FD_MEM_DEFAULT, /* value doesn't matter */
+ /* flags = */ H5AC__CLASS_NO_FLAGS_SET,
+ /* get_initial_load_size = */ H5C__prefetched_entry_get_initial_load_size,
+ /* get_final_load_size = */ H5C__prefetched_entry_get_final_load_size,
+ /* verify_chksum = */ H5C__prefetched_entry_verify_chksum,
+ /* deserialize = */ H5C__prefetched_entry_deserialize,
+ /* image_len = */ H5C__prefetched_entry_image_len,
+ /* pre_serialize = */ H5C__prefetched_entry_pre_serialize,
+ /* serialize = */ H5C__prefetched_entry_serialize,
+ /* notify = */ H5C__prefetched_entry_notify,
+ /* free_icr = */ H5C__prefetched_entry_free_icr,
+ /* fsf_size = */ H5C__prefetched_entry_fsf_size,
+}};
+
+
+
+/***************************************************************************
+ * With two exceptions, these functions should never be called, and thus
+ * there is little point in documenting them separately as they all simply
+ * throw an error.
+ *
+ * See header comments for the two exceptions (free_icr and notify).
+ *
+ * JRM - 8/13/15
+ *
+ ***************************************************************************/
+
+static herr_t
+H5C__prefetched_entry_get_initial_load_size(void H5_ATTR_UNUSED *udata_ptr,
+ size_t H5_ATTR_UNUSED *image_len_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__prefetched_entry_get_initial_load_size() */
+
+static herr_t
+H5C__prefetched_entry_get_final_load_size(const void H5_ATTR_UNUSED *image_ptr,
+ size_t H5_ATTR_UNUSED image_len, void H5_ATTR_UNUSED *udata_ptr,
+ size_t H5_ATTR_UNUSED *actual_len_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__prefetched_entry_get_final_load_size() */
+
+static htri_t
+H5C__prefetched_entry_verify_chksum(const void H5_ATTR_UNUSED *image_ptr,
+ size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED *udata_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__prefetched_verify_chksum() */
+
+
+static void *
+H5C__prefetched_entry_deserialize(const void H5_ATTR_UNUSED * image_ptr,
+ size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED * udata,
+ hbool_t H5_ATTR_UNUSED * dirty_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(NULL)
+} /* end H5C__prefetched_entry_deserialize() */
+
+
+static herr_t
+H5C__prefetched_entry_image_len(const void H5_ATTR_UNUSED *thing,
+ size_t H5_ATTR_UNUSED *image_len_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__prefetched_entry_image_len() */
+
+
+static herr_t
+H5C__prefetched_entry_pre_serialize(H5F_t H5_ATTR_UNUSED *f,
+ hid_t H5_ATTR_UNUSED dxpl_id, void H5_ATTR_UNUSED *thing,
+ haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED len,
+ haddr_t H5_ATTR_UNUSED *new_addr_ptr,
+ size_t H5_ATTR_UNUSED *new_len_ptr,
+ unsigned H5_ATTR_UNUSED *flags_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__prefetched_entry_pre_serialize() */
+
+
+static herr_t
+H5C__prefetched_entry_serialize(const H5F_t H5_ATTR_UNUSED *f,
+ void H5_ATTR_UNUSED *image_ptr,
+ size_t H5_ATTR_UNUSED len, void H5_ATTR_UNUSED *thing)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__prefetched_entry_serialize() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__prefetched_entry_notify
+ *
+ * Purpose: On H5AC_NOTIFY_ACTION_BEFORE_EVICT, check to see if the
+ * target entry is a child in a flush dependency relationship.
+ * If it is, destroy that flush dependency relationship.
+ *
+ * Ignore on all other notifications.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 8/13/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__prefetched_entry_notify(H5C_notify_action_t action, void *_thing)
+{
+ H5C_cache_entry_t * entry_ptr = (H5C_cache_entry_t *)_thing;
+ unsigned u;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->prefetched);
+
+ switch(action) {
+ case H5C_NOTIFY_ACTION_AFTER_INSERT:
+ case H5C_NOTIFY_ACTION_AFTER_LOAD:
+ case H5C_NOTIFY_ACTION_AFTER_FLUSH:
+ case H5C_NOTIFY_ACTION_ENTRY_DIRTIED:
+ case H5C_NOTIFY_ACTION_ENTRY_CLEANED:
+ case H5C_NOTIFY_ACTION_CHILD_DIRTIED:
+ case H5C_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5C_NOTIFY_ACTION_CHILD_SERIALIZED:
+ /* do nothing */
+ break;
+
+ case H5C_NOTIFY_ACTION_BEFORE_EVICT:
+ for(u = 0; u < entry_ptr->flush_dep_nparents; u++) {
+ H5C_cache_entry_t * parent_ptr;
+
+ /* Sanity checks */
+ HDassert(entry_ptr->flush_dep_parent);
+ parent_ptr = entry_ptr->flush_dep_parent[u];
+ HDassert(parent_ptr);
+ HDassert(parent_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(parent_ptr->flush_dep_nchildren > 0);
+
+ /* Destroy flush dependency with flush dependency parent */
+ if(H5C_destroy_flush_dependency(parent_ptr, entry_ptr) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTUNDEPEND, FAIL, "unable to destroy prefetched entry flush dependency")
+
+ if(parent_ptr->prefetched) {
+ /* In prefetched entries, the fd_child_count field is
+ * used in sanity checks elsewhere. Thus update this
+ * field to reflect the destruction of the flush
+ * dependency relationship.
+ */
+ HDassert(parent_ptr->fd_child_count > 0);
+ (parent_ptr->fd_child_count)--;
+ } /* end if */
+ } /* end for */
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+ break;
+ } /* end switch */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5C__prefetched_entry_notify() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5C__prefetched_entry_free_icr
+ *
+ * Purpose: Free the in core representation of the prefetched entry.
+ * Verify that the image buffer associated with the entry
+ * has been either transferred or freed.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 8/13/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5C__prefetched_entry_free_icr(void *_thing)
+{
+ H5C_cache_entry_t *entry_ptr = (H5C_cache_entry_t *)_thing;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(entry_ptr);
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
+ HDassert(entry_ptr->prefetched);
+
+ /* Release array for flush dependency parent addresses */
+ if(entry_ptr->fd_parent_addrs != NULL) {
+ HDassert(entry_ptr->fd_parent_count > 0);
+ entry_ptr->fd_parent_addrs = (haddr_t *)H5MM_xfree((void *)entry_ptr->fd_parent_addrs);
+ } /* end if */
+ else
+ HDassert(entry_ptr->fd_parent_count == 0);
+
+ if(entry_ptr->image_ptr != NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_SYSTEM, FAIL, "prefetched entry image buffer still attatched?")
+
+ entry_ptr = H5FL_FREE(H5C_cache_entry_t, entry_ptr);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5C__prefetched_entry_free_icr() */
+
+
+static herr_t
+H5C__prefetched_entry_fsf_size(const void H5_ATTR_UNUSED *thing,
+ size_t H5_ATTR_UNUSED *fsf_size_ptr)
+{
+ FUNC_ENTER_STATIC_NOERR /* Yes, even though this pushes an error on the stack */
+
+ HERROR(H5E_CACHE, H5E_SYSTEM, "called unreachable fcn.");
+
+ FUNC_LEAVE_NOAPI(FAIL)
+} /* end H5C__prefetched_entry_fsf_size() */
+
diff --git a/src/H5Cprivate.h b/src/H5Cprivate.h
index ed75bec..5335f80 100644
--- a/src/H5Cprivate.h
+++ b/src/H5Cprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -41,7 +39,7 @@
/**************************/
/* Cache configuration settings */
-#define H5C__MAX_NUM_TYPE_IDS 29
+#define H5C__MAX_NUM_TYPE_IDS 30
#define H5C__PREFIX_LEN 32
/* This sanity checking constant was picked out of the air. Increase
@@ -66,11 +64,11 @@
#endif /* H5_HAVE_PARALLEL */
/* Flags for cache client class behavior */
-#define H5C__CLASS_NO_FLAGS_SET ((unsigned)0x0)
-#define H5C__CLASS_SPECULATIVE_LOAD_FLAG ((unsigned)0x1)
+#define H5C__CLASS_NO_FLAGS_SET ((unsigned)0x0)
+#define H5C__CLASS_SPECULATIVE_LOAD_FLAG ((unsigned)0x1)
/* The following flags may only appear in test code */
-#define H5C__CLASS_SKIP_READS ((unsigned)0x2)
-#define H5C__CLASS_SKIP_WRITES ((unsigned)0x4)
+#define H5C__CLASS_SKIP_READS ((unsigned)0x2)
+#define H5C__CLASS_SKIP_WRITES ((unsigned)0x4)
/* Flags for pre-serialize callback */
#define H5C__SERIALIZE_NO_FLAGS_SET ((unsigned)0)
@@ -114,9 +112,7 @@
/* Cache configuration versions */
#define H5C__CURR_AUTO_SIZE_CTL_VER 1
#define H5C__CURR_AUTO_RESIZE_RPT_FCN_VER 1
-
-/* Number of epoch markers active */
-#define H5C__MAX_EPOCH_MARKERS 10
+#define H5C__CURR_CACHE_IMAGE_CTL_VER 1
/* Default configuration settings */
#define H5C__DEF_AR_UPPER_THRESHHOLD 0.9999f
@@ -187,6 +183,7 @@
* H5C__TAKE_OWNERSHIP_FLAG
* H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG
* H5C__GENERATE_IMAGE_FLAG
+ * H5C__UPDATE_PAGE_BUFFER_FLAG
*/
#define H5C__NO_FLAGS_SET 0x00000
#define H5C__SET_FLUSH_MARKER_FLAG 0x00001
@@ -207,6 +204,7 @@
#define H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG 0x08000
#define H5C__DURING_FLUSH_FLAG 0x10000 /* Set when the entire cache is being flushed */
#define H5C__GENERATE_IMAGE_FLAG 0x20000 /* Set during parallel I/O */
+#define H5C__UPDATE_PAGE_BUFFER_FLAG 0x40000 /* Set during parallel I/O */
/* Debugging/sanity checking/statistics settings */
#ifndef NDEBUG
@@ -562,7 +560,7 @@ typedef struct H5C_t H5C_t;
*
* The typedef for the pre-serialize callback is as follows:
*
- * typedef herr_t (*H5C_pre_serialize_func_t)(const H5F_t *f,
+ * typedef herr_t (*H5C_pre_serialize_func_t)(H5F_t *f,
* hid_t dxpl_id,
* void * thing,
* haddr_t addr,
@@ -870,7 +868,9 @@ typedef enum H5C_notify_action_t {
H5C_NOTIFY_ACTION_ENTRY_DIRTIED, /* Entry has been marked dirty. */
H5C_NOTIFY_ACTION_ENTRY_CLEANED, /* Entry has been marked clean. */
H5C_NOTIFY_ACTION_CHILD_DIRTIED, /* Dependent child has been marked dirty. */
- H5C_NOTIFY_ACTION_CHILD_CLEANED /* Dependent child has been marked clean. */
+ H5C_NOTIFY_ACTION_CHILD_CLEANED, /* Dependent child has been marked clean. */
+ H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED, /* Dependent child has been marked unserialized. */
+ H5C_NOTIFY_ACTION_CHILD_SERIALIZED /* Dependent child has been marked serialized. */
} H5C_notify_action_t;
/* Cache client callback function pointers */
@@ -881,7 +881,7 @@ typedef htri_t (*H5C_verify_chksum_func_t)(const void *image_ptr, size_t len, vo
typedef void *(*H5C_deserialize_func_t)(const void *image_ptr,
size_t len, void *udata_ptr, hbool_t *dirty_ptr);
typedef herr_t (*H5C_image_len_func_t)(const void *thing, size_t *image_len_ptr);
-typedef herr_t (*H5C_pre_serialize_func_t)(const H5F_t *f, hid_t dxpl_id,
+typedef herr_t (*H5C_pre_serialize_func_t)(H5F_t *f, hid_t dxpl_id,
void *thing, haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
typedef herr_t (*H5C_serialize_func_t)(const H5F_t *f, void *image_ptr,
@@ -940,11 +940,13 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
* ring.
*
* Free space managers managing file space must be flushed next,
- * and are assigned to the second outermost ring.
+ * and are assigned to the second and third outermost rings. Two rings
+ * are used here as the raw data free space manager must be flushed before
+ * the metadata free space manager.
*
* The object header and associated chunks used to implement superblock
* extension messages must be flushed next, and are thus assigned to
- * the third outermost ring.
+ * the fourth outermost ring.
*
* The superblock proper must be flushed last, and is thus assigned to
* the innermost ring.
@@ -958,12 +960,13 @@ typedef herr_t (*H5C_log_flush_func_t)(H5C_t *cache_ptr, haddr_t addr,
* debugging.
*/
-#define H5C_RING_UNDEFINED 0 /* shouldn't appear in the cache */
-#define H5C_RING_USER 1 /* outermost ring */
-#define H5C_RING_FSM 2
-#define H5C_RING_SBE 4 /* temporarily merged with H5C_RING_SB */
-#define H5C_RING_SB 4 /* innermost ring */
-#define H5C_RING_NTYPES 5
+#define H5C_RING_UNDEFINED 0 /* shouldn't appear in the cache */
+#define H5C_RING_USER 1 /* outermost ring */
+#define H5C_RING_RDFSM 2
+#define H5C_RING_MDFSM 3
+#define H5C_RING_SBE 4
+#define H5C_RING_SB 5 /* innermost ring */
+#define H5C_RING_NTYPES 6
typedef int H5C_ring_t;
@@ -992,16 +995,16 @@ typedef int H5C_ring_t;
* just before the entry is freed.
*
* This is necessary, as the LRU list can be changed out
- * from under H5C_make_space_in_cache() by the serialize
+ * from under H5C__make_space_in_cache() by the serialize
* callback which may change the size of an existing entry,
* and/or load a new entry while serializing the target entry.
*
* This in turn can cause a recursive call to
- * H5C_make_space_in_cache() which may either flush or evict
+ * H5C__make_space_in_cache() which may either flush or evict
* the next entry that the first invocation of that function
* was about to examine.
*
- * The magic field allows H5C_make_space_in_cache() to
+ * The magic field allows H5C__make_space_in_cache() to
* detect this case, and re-start its scan from the bottom
* of the LRU when this situation occurs.
*
@@ -1063,9 +1066,9 @@ typedef int H5C_ring_t;
*
* is_read_only: Boolean flag that is only meaningful if is_protected is
* TRUE. In this circumstance, it indicates whether the
- * entry has been protected read only, or read/write.
+ * entry has been protected read-only, or read/write.
*
- * If the entry has been protected read only (i.e. is_protected
+ * If the entry has been protected read-only (i.e. is_protected
* and is_read_only are both TRUE), we allow the entry to be
* protected more than once.
*
@@ -1075,7 +1078,7 @@ typedef int H5C_ring_t;
* the entry is actually unprotected.
*
* ro_ref_count: Integer field used to maintain a count of the number of
- * outstanding read only protects on this entry. This field
+ * outstanding read-only protects on this entry. This field
* must be zero whenever either is_protected or is_read_only
* are TRUE.
*
@@ -1214,6 +1217,19 @@ typedef int H5C_ring_t;
* either dirty or have a nonzero flush_dep_ndirty_children. If
* this field is nonzero, then this entry cannot be flushed.
*
+ * flush_dep_nunser_children: Number of flush dependency children
+ * that are either unserialized, or have a non-zero number of
+ * positive number of unserialized children.
+ *
+ * Note that since there is no requirement that a clean entry
+ * be serialized, it is possible that flush_dep_nunser_children
+ * to be greater than flush_dep_ndirty_children.
+ *
+ * This field exist to facilitate correct ordering of entry
+ * serializations when it is necessary to serialize all the
+ * entries in the metadata cache. Thus in the cache
+ * serialization, no entry can be serialized unless this
+ * field contains 0.
*
* Fields supporting the hash table:
*
@@ -1221,6 +1237,15 @@ typedef int H5C_ring_t;
* If there are multiple entries in any hash bin, they are stored in a doubly
* linked list.
*
+ * Addendum: JRM -- 10/14/15
+ *
+ * We have come to scan all entries in the cache frequently enough that
+ * the cost of doing so by scanning the hash table has become unacceptable.
+ * To reduce this cost, the index now also maintains a doubly linked list
+ * of all entries in the index. This list is known as the index list.
+ * The il_next and il_prev fields discussed below were added to support
+ * the index list.
+ *
* ht_next: Next pointer used by the hash table to store multiple
* entries in a single hash bin. This field points to the
* next entry in the doubly linked list of entries in the
@@ -1231,6 +1256,16 @@ typedef int H5C_ring_t;
* previous entry in the doubly linked list of entries in
* the hash bin, or NULL if there is no previuos entry.
*
+ * il_next: Next pointer used by the index to maintain a doubly linked
+ * list of all entries in the index (and thus in the cache).
+ * This field contains a pointer to the next entry in the
+ * index list, or NULL if there is no next entry.
+ *
+ * il_prev: Prev pointer used by the index to maintain a doubly linked
+ * list of all entries in the index (and thus in the cache).
+ * This field contains a pointer to the previous entry in the
+ * index list, or NULL if there is no previous entry.
+ *
*
* Fields supporting replacement policies:
*
@@ -1304,6 +1339,227 @@ typedef int H5C_ring_t;
* In either case, when there is no previous item, it should
* be NULL.
*
+ * Fields supporting the cache image feature:
+ *
+ * The following fields are used to store data about the entry which must
+ * be stored in the cache image block, but which will typically be either
+ * lost or heavily altered in the process of serializing the cache and
+ * preparing its contents to be copied into the cache image block.
+ *
+ * Some fields are also used in loading the contents of the metadata cache
+ * image back into the cache, and in managing such entries until they are
+ * either protected by the library (at which point they become regular
+ * entries) or are evicted. See discussion of the prefetched field for
+ * further details.
+ *
+ * include_in_image: Boolean flag indicating whether this entry should
+ * be included in the metadata cache image. This field should
+ * always be false prior to the H5C_prep_for_file_close() call.
+ * During that call, it should be set to TRUE for all entries
+ * that are to be included in the metadata cache image. At
+ * present, only the superblock, the superblock extension
+ * object header and its chunks (if any) are omitted from
+ * the image.
+ *
+ * lru_rank: Rank of the entry in the LRU just prior to file close.
+ *
+ * Note that the first entry on the LRU has lru_rank 1,
+ * and that entries not on the LRU at that time will have
+ * either lru_rank -1 (if pinned) or 0 (if loaded during
+ * the process of flushing the cache.
+ *
+ * image_dirty: Boolean flag indicating whether the entry should be marked
+ * as dirty in the metadata cache image. The flag is set to
+ * TRUE iff the entry is dirty when H5C_prep_for_file_close()
+ * is called.
+ *
+ * fd_parent_count: If the entry is a child in one or more flush dependency
+ * relationships, this field contains the number of flush
+ * dependency parents.
+ *
+ * In all other cases, the field is set to zero.
+ *
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any parents
+ * that are not in the image are removed from this count and
+ * from the fd_parent_addrs array below.
+ *
+ * Finally observe that if the entry is dirty and in the
+ * cache image, and its parent is dirty and not in the cache
+ * image, then the entry must be removed from the cache image
+ * to avoid violating the flush dependency flush ordering.
+ *
+ * fd_parent_addrs: If the entry is a child in one or more flush dependency
+ * relationship when H5C_prep_for_file_close() is called, this
+ * field must contain a pointer to an array of size
+ * fd_parent_count containing the on disk addresses of the
+ * parent.
+ *
+ * In all other cases, the field is set to NULL.
+ *
+ * Note that while this list of addresses is initially taken
+ * from the flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any parents
+ * that are not in the image are removed from this list, and
+ * and from the fd_parent_count above.
+ *
+ * Finally observe that if the entry is dirty and in the
+ * cache image, and its parent is dirty and not in the cache
+ * image, then the entry must be removed from the cache image
+ * to avoid violating the flush dependency flush ordering.
+ *
+ * fd_child_count: If the entry is a parent in a flush dependency
+ * relationship, this field contains the number of flush
+ * dependency children.
+ *
+ * In all other cases, the field is set to zero.
+ *
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any children
+ * that are not in the image are removed from this count.
+ *
+ * fd_dirty_child_count: If the entry is a parent in a flush dependency
+ * relationship, this field contains the number of dirty flush
+ * dependency children.
+ *
+ * In all other cases, the field is set to zero.
+ *
+ * Note that while this count is initially taken from the
+ * flush dependency fields above, if the entry is in the
+ * cache image (i.e. include_in_image is TRUE), any dirty
+ * children that are not in the image are removed from this
+ * count.
+ *
+ * image_fd_height: Flush dependency height of the entry in the cache image.
+ *
+ * The flush dependency height of any entry involved in a
+ * flush dependency relationship is defined to be the
+ * longest flush dependency path from that entry to an entry
+ * with no flush depenency children.
+ *
+ * Since the image_fd_height is used to order entries in the
+ * cache image so that fd parents preceed fd children, for
+ * purposes of this field, and entry is at flush dependency
+ * level 0 if it either has no children, or if all of its
+ * children are not in the cache image.
+ *
+ * Note that if a child in a flush dependency relationship is
+ * dirty and in the cache image, and its parent is dirty and
+ * not in the cache image, then the child must be excluded
+ * from the cache image to maintain flush ordering.
+ *
+ * prefetched: Boolean flag indicating that the on disk image of the entry
+ * has been loaded into the cache prior any request for the
+ * entry by the rest of the library.
+ *
+ * As of this writing (8/10/15), this can only happen through
+ * the load of a cache image block, although other scenarios
+ * are contemplated for the use of this feature. Note that
+ * unlike the usual prefetch situation, this means that a
+ * prefetched entry can be dirty, and/or can be a party to
+ * flush dependency relationship(s). This complicates matters
+ * somewhat.
+ *
+ * The essential feature of a prefetched entry is that it
+ * consists only of a buffer containing the on disk image of
+ * the entry. Thus it must be deserialized before it can
+ * be passed back to the library on a protect call. This
+ * task is handled by H5C_deserialized_prefetched_entry().
+ * In essence, this routine calls the deserialize callback
+ * provided in the protect call with the on disk image,
+ * deletes the prefetched entry from the cache, and replaces
+ * it with the deserialized entry returned by the deserialize
+ * callback.
+ *
+ * Further, if the prefetched entry is a flush dependency parent,
+ * all its flush dependency children (which must also be
+ * prefetched entries), must be tranfered to the new cache
+ * entry returned by the deserailization callback.
+ *
+ * Finally, if the prefetched entry is a flush dependency child,
+ * this flush dependency must be destroyed prior to the
+ * deserialize call.
+ *
+ * In addition to the above special processing on the first
+ * protect call on a prefetched entry (after which is no longer
+ * a prefetched entry), prefetched entries also require special
+ * tretment on flush and evict.
+ *
+ * On flush, a dirty prefetched entry must simply be written
+ * to disk and marked clean without any call to any client
+ * callback.
+ *
+ * On eviction, if a prefetched entry is a flush dependency
+ * child, that flush dependency relationship must be destroyed
+ * just prior to the eviction. If the flush dependency code
+ * is working properly, it should be impossible for any entry
+ * that is a flush dependency parent to be evicted.
+ *
+ * prefetch_type_id: Integer field containing the type ID of the prefetched
+ * entry. This ID must match the ID of the type provided in any
+ * protect call on the prefetched entry.
+ *
+ * The value of this field is undefined in prefetched is FALSE.
+ *
+ * age: Number of times a prefetched entry has appeared in
+ * subsequent cache images. The field exists to allow
+ * imposition of a limit on how many times a prefetched
+ * entry can appear in subsequent cache images without being
+ * converted to a regular entry.
+ *
+ * This field must be zero if prefetched is FALSE.
+ *
+ * prefetched_dirty: Boolean field that must be set to FALSE unless the
+ * following conditions hold:
+ *
+ * 1) The file has been opened R/O.
+ *
+ * 2) The entry is either a prefetched entry, or was
+ * re-constructed from a prefetched entry.
+ *
+ * 3) The base prefetched entry was marked dirty.
+ *
+ * This field exists to solve the following problem with
+ * files containing cache images that are opened R/O.
+ *
+ * If the cache image contains a dirty entry, that entry
+ * must be marked clean when it is inserted into the cache
+ * in the read-only case, as otherwise the metadata cache
+ * will attempt to flush it on file close -- which is poor
+ * form in the read-only case.
+ *
+ * However, since the entry is marked clean, it is possible
+ * that the metadata cache will evict it if the size of the
+ * metadata in the file exceeds the size of the metadata cache,
+ * and the application visits much of this data.
+ *
+ * If this happens, and the metadata cache is then asked for
+ * this entry, it will attempt to read it from file, and will
+ * obtain either obsolete or invalid data depending on whether
+ * the entry has ever been written to it assigned location in
+ * the file.
+ *
+ * With this background, the purpose of this field should be
+ * obvious -- when set, it allows the eviction candidate
+ * selection code to skip over the entry, thus avoiding the
+ * issue.
+ *
+ * Since the issue only arises in the R/O case, there is
+ * no possible interaction with SWMR. There are also
+ * potential interactions with Evict On Close -- at present,
+ * we deal with this by disabling EOC in the R/O case.
+ *
+ * serialization_count: Integer field used to maintain a count of the
+ * number of times each entry is serialized during cache
+ * serialization. While no entry should be serialized more than
+ * once in any serialization call, throw an assertion if any
+ * flush depencency parent is serialized more than once during
+ * a single cache serialization.
+ *
+ * This is a debugging field, and thus is maintained only if
+ * NDEBUG is undefined.
*
* Fields supporting tagged entries:
*
@@ -1347,12 +1603,12 @@ typedef int H5C_ring_t;
****************************************************************************/
typedef struct H5C_cache_entry_t {
uint32_t magic;
- H5C_t * cache_ptr;
+ H5C_t *cache_ptr;
haddr_t addr;
size_t size;
- void * image_ptr;
+ void *image_ptr;
hbool_t image_up_to_date;
- const H5C_class_t * type;
+ const H5C_class_t *type;
hbool_t is_dirty;
hbool_t dirtied;
hbool_t is_protected;
@@ -1379,27 +1635,48 @@ typedef struct H5C_cache_entry_t {
unsigned flush_dep_parent_nalloc;
unsigned flush_dep_nchildren;
unsigned flush_dep_ndirty_children;
+ unsigned flush_dep_nunser_children;
hbool_t pinned_from_client;
hbool_t pinned_from_cache;
/* fields supporting the hash table: */
- struct H5C_cache_entry_t * ht_next;
- struct H5C_cache_entry_t * ht_prev;
+ struct H5C_cache_entry_t *ht_next;
+ struct H5C_cache_entry_t *ht_prev;
+ struct H5C_cache_entry_t *il_next;
+ struct H5C_cache_entry_t *il_prev;
/* fields supporting replacement policies: */
- struct H5C_cache_entry_t * next;
- struct H5C_cache_entry_t * prev;
- struct H5C_cache_entry_t * aux_next;
- struct H5C_cache_entry_t * aux_prev;
+ struct H5C_cache_entry_t *next;
+ struct H5C_cache_entry_t *prev;
+ struct H5C_cache_entry_t *aux_next;
+ struct H5C_cache_entry_t *aux_prev;
#ifdef H5_HAVE_PARALLEL
- struct H5C_cache_entry_t * coll_next;
- struct H5C_cache_entry_t * coll_prev;
+ struct H5C_cache_entry_t *coll_next;
+ struct H5C_cache_entry_t *coll_prev;
#endif /* H5_HAVE_PARALLEL */
+ /* fields supporting cache image */
+ hbool_t include_in_image;
+ int32_t lru_rank;
+ hbool_t image_dirty;
+ uint64_t fd_parent_count;
+ haddr_t *fd_parent_addrs;
+ uint64_t fd_child_count;
+ uint64_t fd_dirty_child_count;
+ uint32_t image_fd_height;
+ hbool_t prefetched;
+ int prefetch_type_id;
+ int32_t age;
+ hbool_t prefetched_dirty;
+
+#ifndef NDEBUG /* debugging field */
+ int serialization_count;
+#endif /* NDEBUG */
+
/* fields supporting tag lists */
- struct H5C_cache_entry_t * tl_next;
- struct H5C_cache_entry_t * tl_prev;
- struct H5C_tag_info_t * tag_info;
+ struct H5C_cache_entry_t *tl_next;
+ struct H5C_cache_entry_t *tl_prev;
+ struct H5C_tag_info_t *tag_info;
#if H5C_COLLECT_CACHE_ENTRY_STATS
/* cache entry stats fields */
@@ -1410,6 +1687,168 @@ typedef struct H5C_cache_entry_t {
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
} H5C_cache_entry_t;
+
+/****************************************************************************
+ *
+ * structure H5C_image_entry_t
+ *
+ * Instances of the H5C_image_entry_t structure are used to store data on
+ * metadata cache entries used in the construction of the metadata cache
+ * image block. In essence this structure is a greatly simplified version
+ * of H5C_cache_entry_t.
+ *
+ * The fields of this structure are discussed individually below:
+ *
+ * JRM - 8/5/15
+ *
+ * magic: Unsigned 32 bit integer that must always be set to
+ * H5C_IMAGE_ENTRY_T_MAGIC when the entry is valid.
+ * The field must be set to H5C_IMAGE_ENTRY_T_BAD_MAGIC
+ * just before the entry is freed.
+ *
+ * addr: Base address of the cache entry on disk.
+ *
+ * size: Length of the cache entry on disk in bytes.
+ *
+ * ring: Instance of H5C_ring_t indicating the flush ordering ring
+ * to which this entry is assigned.
+ *
+ * age: Number of times this prefetech entry has appeared in
+ * the current sequence of cache images. This field is
+ * initialized to 0 if the instance of H5C_image_entry_t
+ * is constructed from a regular entry.
+ *
+ * If the instance is constructed from a prefetched entry
+ * currently residing in the metadata cache, the field is
+ * set to 1 + the age of the prefetched entry, or to
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX if that sum exceeds
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX.
+ *
+ * type_id: Integer field containing the type ID of the entry.
+ *
+ * lru_rank: Rank of the entry in the LRU just prior to file close.
+ *
+ * Note that the first entry on the LRU has lru_rank 1,
+ * and that entries not on the LRU at that time will have
+ * either lru_rank -1 (if pinned) or 0 (if loaded during
+ * the process of flushing the cache.
+ *
+ * is_dirty: Boolean flag indicating whether the contents of the cache
+ * entry has been modified since the last time it was written
+ * to disk as a regular piece of metadata.
+ *
+ * image_fd_height: Flush dependency height of the entry in the cache image.
+ *
+ * The flush dependency height of any entry involved in a
+ * flush dependency relationship is defined to be the
+ * longest flush dependency path from that entry to an entry
+ * with no flush depenency children.
+ *
+ * Since the image_fd_height is used to order entries in the
+ * cache image so that fd parents preceed fd children, for
+ * purposes of this field, an entry is at flush dependency
+ * level 0 if it either has no children, or if all of its
+ * children are not in the cache image.
+ *
+ * Note that if a child in a flush dependency relationship is
+ * dirty and in the cache image, and its parent is dirty and
+ * not in the cache image, then the child must be excluded
+ * from the cache image to maintain flush ordering.
+ *
+ * fd_parent_count: If the entry is a child in one or more flush dependency
+ * relationships, this field contains the number of flush
+ * dependency parents.
+ *
+ * In all other cases, the field is set to zero.
+ *
+ * Note that while this count is initially taken from the
+ * flush dependency fields in the associated instance of
+ * H5C_cache_entry_t, if the entry is in the cache image
+ * (i.e. include_in_image is TRUE), any parents that are
+ * not in the image are removed from this count and
+ * from the fd_parent_addrs array below.
+ *
+ * Finally observe that if the entry is dirty and in the
+ * cache image, and its parent is dirty and not in the cache
+ * image, then the entry must be removed from the cache image
+ * to avoid violating the flush dependency flush ordering.
+ * This should have happened before the construction of
+ * the instance of H5C_image_entry_t.
+ *
+ * fd_parent_addrs: If the entry is a child in one or more flush dependency
+ * relationship when H5C_prep_for_file_close() is called, this
+ * field must contain a pointer to an array of size
+ * fd_parent_count containing the on disk addresses of the
+ * parents.
+ *
+ * In all other cases, the field is set to NULL.
+ *
+ * Note that while this list of addresses is initially taken
+ * from the flush dependency fields in the associated instance of
+ * H5C_cache_entry_t, if the entry is in the cache image
+ * (i.e. include_in_image is TRUE), any parents that are not
+ * in the image are removed from this list, and from the
+ * fd_parent_count above.
+ *
+ * Finally observe that if the entry is dirty and in the
+ * cache image, and its parent is dirty and not in the cache
+ * image, then the entry must be removed from the cache image
+ * to avoid violating the flush dependency flush ordering.
+ * This should have happened before the construction of
+ * the instance of H5C_image_entry_t.
+ *
+ * fd_child_count: If the entry is a parent in a flush dependency
+ * relationship, this field contains the number of flush
+ * dependency children.
+ *
+ * In all other cases, the field is set to zero.
+ *
+ * Note that while this count is initially taken from the
+ * flush dependency fields in the associated instance of
+ * H5C_cache_entry_t, if the entry is in the cache image
+ * (i.e. include_in_image is TRUE), any children
+ * that are not in the image are removed from this count.
+ *
+ * fd_dirty_child_count: If the entry is a parent in a flush dependency
+ * relationship, this field contains the number of dirty flush
+ * dependency children.
+ *
+ * In all other cases, the field is set to zero.
+ *
+ * Note that while this count is initially taken from the
+ * flush dependency fields in the associated instance of
+ * H5C_cache_entry_t, if the entry is in the cache image
+ * (i.e. include_in_image is TRUE), any dirty children
+ * that are not in the image are removed from this count.
+ *
+ * image_ptr: Pointer to void. When not NULL, this field points to a
+ * dynamically allocated block of size bytes in which the
+ * on disk image of the metadata cache entry is stored.
+ *
+ * If the entry is dirty, the pre-serialize and serialize
+ * callbacks must be used to update this image before it is
+ * written to disk
+ *
+ *
+ ****************************************************************************/
+
+typedef struct H5C_image_entry_t {
+ uint32_t magic;
+ haddr_t addr;
+ size_t size;
+ H5C_ring_t ring;
+ int32_t age;
+ int32_t type_id;
+ int32_t lru_rank;
+ hbool_t is_dirty;
+ unsigned image_fd_height;
+ uint64_t fd_parent_count;
+ haddr_t *fd_parent_addrs;
+ uint64_t fd_child_count;
+ uint64_t fd_dirty_child_count;
+ void *image_ptr;
+} H5C_image_entry_t;
+
/****************************************************************************
*
* structure H5C_auto_size_ctl_t
@@ -1699,12 +2138,98 @@ typedef struct H5C_auto_size_ctl_t {
double empty_reserve;
} H5C_auto_size_ctl_t;
+/****************************************************************************
+ *
+ * structure H5C_cache_image_ctl_t
+ *
+ * Instances of H5C_image_ctl_t are used to get and set the control
+ * fields for generation of a metadata cache image on file close.
+ *
+ * At present control of construction of a cache image is via a FAPL
+ * property at file open / create.
+ *
+ * The fields of the structure are discussed individually below:
+ *
+ * version: Integer field containing the version number of this version
+ * of the H5C_image_ctl_t structure. Any instance of
+ * H5C_image_ctl_t passed to the cache must have a known
+ * version number, or an error will be flagged.
+ *
+ * generate_image: Boolean flag indicating whether a cache image should
+ * be created on file close.
+ *
+ * save_resize_status: Boolean flag indicating whether the cache image
+ * should include the adaptive cache resize configuration and status.
+ * Note that this field is ignored at present.
+ *
+ * entry_ageout: Integer field indicating the maximum number of
+ * times a prefetched entry can appear in subsequent cache images.
+ * This field exists to allow the user to avoid the buildup of
+ * infrequently used entries in long sequences of cache images.
+ *
+ * The value of this field must lie in the range
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE (-1) to
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX (100).
+ *
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE means that no limit
+ * is imposed on number of times a prefeteched entry can appear
+ * in subsequent cache images.
+ *
+ * A value of 0 prevents prefetched entries from being included
+ * in cache images.
+ *
+ * Positive integers restrict prefetched entries to the specified
+ * number of appearances.
+ *
+ * Note that the number of subsequent cache images that a prefetched
+ * entry has appeared in is tracked in an 8 bit field. Thus, while
+ * H5AC__CACHE_IMAGE__ENTRY_AGEOUT__MAX can be increased from its
+ * current value, any value in excess of 255 will be the functional
+ * equivalent of H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE.
+ *
+ * flags: Unsigned integer containing flags controling which aspects of the
+ * cache image functinality is actually executed. The primary impetus
+ * behind this field is to allow developement of tests for partial
+ * implementations that will require little if any modification to run
+ * with the full implementation. In normal operation, all flags should
+ * be set.
+ *
+ ****************************************************************************/
+
+#define H5C_CI__GEN_MDCI_SBE_MESG ((unsigned)0x0001)
+#define H5C_CI__GEN_MDC_IMAGE_BLK ((unsigned)0x0002)
+#define H5C_CI__SUPRESS_ENTRY_WRITES ((unsigned)0x0004)
+#define H5C_CI__WRITE_CACHE_IMAGE ((unsigned)0x0008)
+
+/* This #define must set all defined H5C_CI flags. It is
+ * used in the default value for instances of H5C_cache_image_ctl_t.
+ * This value will only be modified in test code.
+ */
+#define H5C_CI__ALL_FLAGS ((unsigned)0x000F)
+
+#define H5C__DEFAULT_CACHE_IMAGE_CTL \
+{ \
+ /* version = */ H5C__CURR_CACHE_IMAGE_CTL_VER, \
+ /* generate_image = */ FALSE, \
+ /* save_resize_status = */ FALSE, \
+ /* entry_ageout = */ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE, \
+ /* flags = */ H5C_CI__ALL_FLAGS \
+}
+
+typedef struct H5C_cache_image_ctl_t {
+ int32_t version;
+ hbool_t generate_image;
+ hbool_t save_resize_status;
+ int32_t entry_ageout;
+ unsigned flags;
+} H5C_cache_image_ctl_t;
+
/***************************************/
/* Library-private Function Prototypes */
/***************************************/
H5_DLL H5C_t *H5C_create(size_t max_cache_size, size_t min_clean_size,
- int max_type_id, const char *(*type_name_table_ptr),
+ int max_type_id, const H5C_class_t * const *class_table_ptr,
H5C_write_permitted_func_t check_write_permitted, hbool_t write_permitted,
H5C_log_flush_func_t log_flush, void *aux_ptr);
H5_DLL herr_t H5C_set_up_logging(H5C_t *cache_ptr, const char log_location[], hbool_t start_immediately);
@@ -1724,6 +2249,7 @@ H5_DLL herr_t H5C_expunge_entry(H5F_t *f, hid_t dxpl_id,
const H5C_class_t *type, haddr_t addr, unsigned flags);
H5_DLL herr_t H5C_flush_cache(H5F_t *f, hid_t dxpl_id, unsigned flags);
H5_DLL herr_t H5C_flush_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag);
+H5_DLL herr_t H5C_force_cache_image_load(H5F_t * f, hid_t dxpl_id);
H5_DLL herr_t H5C_evict_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag, hbool_t match_global);
H5_DLL herr_t H5C_expunge_tag_type_metadata(H5F_t *f, hid_t dxpl_id, haddr_t tag, int type_id, unsigned flags);
H5_DLL herr_t H5C_get_tag(const void *thing, /*OUT*/ haddr_t *tag);
@@ -1733,42 +2259,53 @@ herr_t H5C_verify_tag(int id, haddr_t tag);
H5_DLL herr_t H5C_flush_to_min_clean(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C_get_cache_auto_resize_config(const H5C_t *cache_ptr,
H5C_auto_size_ctl_t *config_ptr);
+H5_DLL herr_t H5C_get_cache_image_config(const H5C_t * cache_ptr,
+ H5C_cache_image_ctl_t *config_ptr);
H5_DLL herr_t H5C_get_cache_size(H5C_t *cache_ptr, size_t *max_size_ptr,
size_t *min_clean_size_ptr, size_t *cur_size_ptr,
- int32_t *cur_num_entries_ptr);
+ uint32_t *cur_num_entries_ptr);
H5_DLL herr_t H5C_get_cache_hit_rate(H5C_t *cache_ptr, double *hit_rate_ptr);
H5_DLL herr_t H5C_get_entry_status(const H5F_t *f, haddr_t addr,
size_t *size_ptr, hbool_t *in_cache_ptr, hbool_t *is_dirty_ptr,
hbool_t *is_protected_ptr, hbool_t *is_pinned_ptr, hbool_t *is_corked_ptr,
- hbool_t *is_flush_dep_parent_ptr, hbool_t *is_flush_dep_child_ptr);
+ hbool_t *is_flush_dep_parent_ptr, hbool_t *is_flush_dep_child_ptr,
+ hbool_t *image_up_to_date_ptr);
H5_DLL herr_t H5C_get_evictions_enabled(const H5C_t *cache_ptr, hbool_t *evictions_enabled_ptr);
H5_DLL void * H5C_get_aux_ptr(const H5C_t *cache_ptr);
H5_DLL FILE *H5C_get_trace_file_ptr(const H5C_t *cache_ptr);
H5_DLL FILE *H5C_get_trace_file_ptr_from_entry(const H5C_cache_entry_t *entry_ptr);
+H5_DLL herr_t H5C_image_stats(H5C_t * cache_ptr, hbool_t print_header);
H5_DLL herr_t H5C_insert_entry(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
haddr_t addr, void *thing, unsigned int flags);
+H5_DLL herr_t H5C_load_cache_image_on_next_protect(H5F_t *f, haddr_t addr,
+ hsize_t len, hbool_t rw);
H5_DLL herr_t H5C_mark_entry_dirty(void *thing);
H5_DLL herr_t H5C_mark_entry_clean(void *thing);
+H5_DLL herr_t H5C_mark_entry_unserialized(void *thing);
+H5_DLL herr_t H5C_mark_entry_serialized(void *thing);
H5_DLL herr_t H5C_move_entry(H5C_t *cache_ptr, const H5C_class_t *type,
haddr_t old_addr, haddr_t new_addr);
H5_DLL herr_t H5C_pin_protected_entry(void *thing);
+H5_DLL herr_t H5C_prep_for_file_close(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5C_create_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL void * H5C_protect(H5F_t *f, hid_t dxpl_id, const H5C_class_t *type,
haddr_t addr, void *udata, unsigned flags);
H5_DLL herr_t H5C_reset_cache_hit_rate_stats(H5C_t *cache_ptr);
H5_DLL herr_t H5C_resize_entry(void *thing, size_t new_size);
H5_DLL herr_t H5C_set_cache_auto_resize_config(H5C_t *cache_ptr, H5C_auto_size_ctl_t *config_ptr);
+H5_DLL herr_t H5C_set_cache_image_config(const H5F_t *f, H5C_t *cache_ptr,
+ H5C_cache_image_ctl_t *config_ptr);
H5_DLL herr_t H5C_set_evictions_enabled(H5C_t *cache_ptr, hbool_t evictions_enabled);
H5_DLL herr_t H5C_set_prefix(H5C_t *cache_ptr, char *prefix);
H5_DLL herr_t H5C_set_trace_file_ptr(H5C_t *cache_ptr, FILE *trace_file_ptr);
H5_DLL herr_t H5C_stats(H5C_t *cache_ptr, const char *cache_name,
hbool_t display_detailed_stats);
H5_DLL void H5C_stats__reset(H5C_t *cache_ptr);
-H5_DLL herr_t H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name);
H5_DLL herr_t H5C_unpin_entry(void *thing);
H5_DLL herr_t H5C_destroy_flush_dependency(void *parent_thing, void *child_thing);
H5_DLL herr_t H5C_unprotect(H5F_t *f, hid_t dxpl_id, haddr_t addr, void *thing,
unsigned int flags);
+H5_DLL herr_t H5C_validate_cache_image_config(H5C_cache_image_ctl_t * ctl_ptr);
H5_DLL herr_t H5C_validate_resize_config(H5C_auto_size_ctl_t *config_ptr,
unsigned int tests);
H5_DLL herr_t H5C_ignore_tags(H5C_t *cache_ptr);
@@ -1776,18 +2313,43 @@ H5_DLL hbool_t H5C_get_ignore_tags(const H5C_t *cache_ptr);
H5_DLL herr_t H5C_retag_entries(H5C_t * cache_ptr, haddr_t src_tag, haddr_t dest_tag);
H5_DLL herr_t H5C_cork(H5C_t *cache_ptr, haddr_t obj_addr, unsigned action, hbool_t *corked);
H5_DLL herr_t H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring);
-H5_DLL herr_t H5C_remove_entry(void * thing);
+H5_DLL herr_t H5C_unsettle_entry_ring(void *thing);
+H5_DLL herr_t H5C_unsettle_ring(H5F_t * f, H5C_ring_t ring);
+H5_DLL herr_t H5C_remove_entry(void *thing);
+H5_DLL herr_t H5C_cache_image_status(H5F_t * f, hbool_t *load_ci_ptr,
+ hbool_t *write_ci_ptr);
+H5_DLL hbool_t H5C_cache_image_pending(const H5C_t *cache_ptr);
+H5_DLL herr_t H5C_get_mdc_image_info(H5C_t *cache_ptr, haddr_t *image_addr, hsize_t *image_len);
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5C_apply_candidate_list(H5F_t *f, hid_t dxpl_id,
- H5C_t *cache_ptr, int num_candidates, haddr_t *candidates_list_ptr,
+ H5C_t *cache_ptr, unsigned num_candidates, haddr_t *candidates_list_ptr,
int mpi_rank, int mpi_size);
H5_DLL herr_t H5C_construct_candidate_list__clean_cache(H5C_t *cache_ptr);
H5_DLL herr_t H5C_construct_candidate_list__min_clean(H5C_t *cache_ptr);
H5_DLL herr_t H5C_clear_coll_entries(H5C_t * cache_ptr, hbool_t partial);
-H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, int32_t ce_array_len,
+H5_DLL herr_t H5C_mark_entries_as_clean(H5F_t *f, hid_t dxpl_id, unsigned ce_array_len,
haddr_t *ce_array_ptr);
#endif /* H5_HAVE_PARALLEL */
+#ifndef NDEBUG /* debugging functions */
+H5_DLL herr_t H5C_dump_cache(H5C_t *cache_ptr, const char *cache_name);
+H5_DLL herr_t H5C_dump_cache_LRU(H5C_t *cache_ptr, const char *cache_name);
+H5_DLL hbool_t H5C_get_serialization_in_progress(const H5C_t *cache_ptr);
+H5_DLL hbool_t H5C_cache_is_clean(const H5C_t *cache_ptr, H5C_ring_t inner_ring);
+H5_DLL herr_t H5C_dump_cache_skip_list(H5C_t *cache_ptr, char *calling_fcn);
+#ifdef H5_HAVE_PARALLEL
+H5_DLL herr_t H5C_dump_coll_write_list(H5C_t * cache_ptr, char * calling_fcn);
+#endif /* H5_HAVE_PARALLEL */
+H5_DLL herr_t H5C_get_entry_ptr_from_addr(H5C_t *cache_ptr, haddr_t addr,
+ void **entry_ptr_ptr);
+H5_DLL herr_t H5C_flush_dependency_exists(H5C_t *cache_ptr, haddr_t parent_addr,
+ haddr_t child_addr, hbool_t *fd_exists_ptr);
+H5_DLL herr_t H5C_verify_entry_type(H5C_t *cache_ptr, haddr_t addr,
+ const H5C_class_t *expected_type, hbool_t *in_cache_ptr,
+ hbool_t *type_ok_ptr);
+H5_DLL herr_t H5C_validate_index_list(H5C_t *cache_ptr);
+#endif /* NDEBUG */
+
#endif /* !_H5Cprivate_H */
diff --git a/src/H5Cpublic.h b/src/H5Cpublic.h
index 39ebbe3..62107d9 100644
--- a/src/H5Cpublic.h
+++ b/src/H5Cpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Cquery.c b/src/H5Cquery.c
index be0d4fa..6c927b0 100644
--- a/src/H5Cquery.c
+++ b/src/H5Cquery.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -131,7 +129,7 @@ H5C_get_cache_size(H5C_t * cache_ptr,
size_t * max_size_ptr,
size_t * min_clean_size_ptr,
size_t * cur_size_ptr,
- int32_t * cur_num_entries_ptr)
+ uint32_t * cur_num_entries_ptr)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -231,7 +229,8 @@ H5C_get_entry_status(const H5F_t *f,
hbool_t * is_pinned_ptr,
hbool_t * is_corked_ptr,
hbool_t * is_flush_dep_parent_ptr,
- hbool_t * is_flush_dep_child_ptr)
+ hbool_t * is_flush_dep_child_ptr,
+ hbool_t * image_up_to_date_ptr)
{
H5C_t * cache_ptr;
H5C_cache_entry_t * entry_ptr = NULL;
@@ -280,6 +279,8 @@ H5C_get_entry_status(const H5F_t *f,
*is_flush_dep_parent_ptr = (entry_ptr->flush_dep_nchildren > 0);
if(is_flush_dep_child_ptr != NULL)
*is_flush_dep_child_ptr = (entry_ptr->flush_dep_nparents > 0);
+ if(image_up_to_date_ptr != NULL )
+ *image_up_to_date_ptr = entry_ptr->image_up_to_date;
} /* end else */
done:
@@ -441,7 +442,8 @@ H5C_get_entry_ring(const H5F_t *f, haddr_t addr, H5C_ring_t *ring)
/* Locate the entry at the address */
H5C__SEARCH_INDEX(cache_ptr, addr, entry_ptr, FAIL)
- HDassert(entry_ptr);
+ if(entry_ptr == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_NOTFOUND, FAIL, "can't find entry in index")
/* Return the ring value */
*ring = entry_ptr->ring;
@@ -450,3 +452,33 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5C_get_entry_ring() */
+/*-------------------------------------------------------------------------
+ * Function: H5C_get_mdc_image_info
+ *
+ * Purpose: To retrieve the address and size of the cache image in the file.
+ *
+ * Return: SUCCEED on success, and FAIL on failure.
+ *
+ * Programmer: Vailin Choi; March 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5C_get_mdc_image_info(H5C_t * cache_ptr, haddr_t *image_addr, hsize_t *image_len)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ if((cache_ptr == NULL) || (cache_ptr->magic != H5C__H5C_T_MAGIC))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad cache_ptr on entry")
+ if(image_addr == NULL || image_len == NULL)
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "bad image_addr or image_len on entry")
+
+ *image_addr = cache_ptr->image_addr;
+ *image_len = cache_ptr->image_len;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5C_get_mdc_image_info() */
+
diff --git a/src/H5Ctag.c b/src/H5Ctag.c
index 157a838..a9b2ec0 100644
--- a/src/H5Ctag.c
+++ b/src/H5Ctag.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -58,8 +56,18 @@
typedef struct {
H5F_t * f; /* File pointer for evicting entry */
hid_t dxpl_id; /* DXPL for evicting entry */
- hbool_t evicted_entries_last_pass; /* Flag to indicate that an entry was evicted when iterating over cache */
- hbool_t pinned_entries_need_evicted; /* Flag to indicate that a pinned entry was attempted to be evicted */
+ hbool_t evicted_entries_last_pass; /* Flag to indicate that an entry
+ * was evicted when iterating over
+ * cache
+ */
+ hbool_t pinned_entries_need_evicted;/* Flag to indicate that a pinned
+ * entry was attempted to be evicted
+ */
+ hbool_t skipped_pf_dirty_entries; /* Flag indicating that one or more
+ * entries marked prefetched_dirty
+ * were encountered and not
+ * evicted.
+ */
} H5C_tag_iter_evict_ctx_t;
/* Typedef for tagged entry iterator callback context - expunge tag type metadata */
@@ -465,12 +473,14 @@ H5C__evict_tagged_entries_cb(H5C_cache_entry_t *entry, void *_ctx)
entry and we'll loop back around again (as evicting other
entries will hopefully unpin this entry) */
ctx->pinned_entries_need_evicted = TRUE;
- else {
+ else if(!entry->prefetched_dirty) {
/* Evict the Entry */
if(H5C__flush_single_entry(ctx->f, ctx->dxpl_id, entry, H5C__FLUSH_INVALIDATE_FLAG | H5C__FLUSH_CLEAR_ONLY_FLAG | H5C__DEL_FROM_SLIST_ON_DESTROY_FLAG) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, H5_ITER_ERROR, "Entry eviction failed.")
ctx->evicted_entries_last_pass = TRUE;
- } /* end else */
+ } else {
+ ctx->skipped_pf_dirty_entries = TRUE;
+ }
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -516,6 +526,7 @@ H5C_evict_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag, hbool_t match_gl
/* Reset pinned/evicted tracking flags */
ctx.pinned_entries_need_evicted = FALSE;
ctx.evicted_entries_last_pass = FALSE;
+ ctx.skipped_pf_dirty_entries = FALSE;
/* Iterate through entries in the cache */
if(H5C__iter_tagged_entries(cache, tag, match_global, H5C__evict_tagged_entries_cb, &ctx) < 0)
@@ -524,8 +535,32 @@ H5C_evict_tagged_entries(H5F_t * f, hid_t dxpl_id, haddr_t tag, hbool_t match_gl
/* Keep doing this until we have stopped evicted entries */
} while(TRUE == ctx.evicted_entries_last_pass);
- /* Fail if we have finished evicting entries and pinned entries still need evicted */
- if(ctx.pinned_entries_need_evicted)
+ /* In most cases, fail if we have finished evicting entries and pinned
+ * entries still need evicted
+ *
+ * However, things can get strange if the file was opened R/O and
+ * the file contains a cache image and the cache image contains dirty
+ * entries.
+ *
+ * Since the file was opened read only, dirty entries in the cache
+ * image were marked as clean when they were inserted into the metadata
+ * cache. This is necessary, as if they are marked dirty, the metadata
+ * cache will attempt to write them on file close, which is frowned
+ * upon when the file is opened R/O.
+ *
+ * On the other hand, such entries (marked prefetched_dirty) must not
+ * be evicted, as should the cache be asked to re-load them, the cache
+ * will attempt to read them from the file, and at best load an outdated
+ * version.
+ *
+ * To avoid this, H5C__evict_tagged_entries_cb has been modified to
+ * skip such entries. However, by doing so, it may prevent pinned
+ * entries from becoming unpinned.
+ *
+ * Thus we must ignore ctx.pinned_entries_need_evicted if
+ * ctx.skipped_pf_dirty_entries is TRUE.
+ */
+ if((!ctx.skipped_pf_dirty_entries) && (ctx.pinned_entries_need_evicted))
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "Pinned entries still need evicted?!")
done:
diff --git a/src/H5Ctest.c b/src/H5Ctest.c
index 876b63a..2cd0a5d 100644
--- a/src/H5Ctest.c
+++ b/src/H5Ctest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5D.c b/src/H5D.c
index 44e4baa..fc2024a 100644
--- a/src/H5D.c
+++ b/src/H5D.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -1089,3 +1087,43 @@ done:
FUNC_LEAVE_API(ret_value)
} /* H5Dget_chunk_index_type() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5Dget_chunk_storage_size
+ *
+ * Purpose: Returns the size of an allocated chunk.
+ *
+ * Return: Non-negative on success, negative on failure
+ *
+ * Programmer: Matthew Strong (GE Healthcare)
+ * 20 October 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Dget_chunk_storage_size(hid_t dset_id, const hsize_t *offset, hsize_t *chunk_nbytes)
+{
+ H5D_t *dset = NULL;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE3("e", "i*h*h", dset_id, offset, chunk_nbytes);
+
+ /* Check arguments */
+ if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+ if( NULL == offset )
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid argument (null)")
+ if( NULL == chunk_nbytes )
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid argument (null)")
+
+ if(H5D_CHUNKED != dset->shared->layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
+
+ /* Call private function */
+ if(H5D__get_chunk_storage_size(dset, H5P_DATASET_XFER_DEFAULT, offset, chunk_nbytes) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't get storage size of chunk")
+
+done:
+ FUNC_LEAVE_API(ret_value);
+} /* H5Dget_chunk_storage_size() */
diff --git a/src/H5Dbtree.c b/src/H5Dbtree.c
index 8ef14b9..8177e13 100644
--- a/src/H5Dbtree.c
+++ b/src/H5Dbtree.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/src/H5Dbtree2.c b/src/H5Dbtree2.c
index f687a5d..2e19392 100644
--- a/src/H5Dbtree2.c
+++ b/src/H5Dbtree2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Dchunk.c b/src/H5Dchunk.c
index 7a646af..33fc036 100644
--- a/src/H5Dchunk.c
+++ b/src/H5Dchunk.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
@@ -305,6 +303,9 @@ static herr_t H5D__chunk_collective_fill(const H5D_t *dset, hid_t dxpl_id,
H5D_chunk_coll_info_t *chunk_info, size_t chunk_size, const void *fill_buf);
#endif /* H5_HAVE_PARALLEL */
+static int
+H5D__chunk_dump_index_cb(const H5D_chunk_rec_t *chunk_rec, void *_udata);
+
/*********************/
/* Package Variables */
/*********************/
@@ -371,8 +372,7 @@ H5FL_EXTERN(H5S_sel_iter_t);
/*-------------------------------------------------------------------------
* Function: H5D__chunk_direct_write
*
- * Purpose: Internal routine to write a chunk
- * directly into the file.
+ * Purpose: Internal routine to write a chunk directly into the file.
*
* Return: Non-negative on success/Negative on failure
*
@@ -410,7 +410,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
/* Allocate dataspace and initialize it if it hasn't been. */
if(!(*layout->ops->is_space_alloc)(&layout->storage)) {
- /* Allocate storage */
+ /* Allocate storage */
if(H5D__alloc_storage(&io_info, H5D_ALLOC_WRITE, FALSE, NULL) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to initialize storage")
}
@@ -445,11 +445,17 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
/* Set up the size of chunk for user data */
udata.chunk_block.length = data_size;
- /* Create the chunk it if it doesn't exist, or reallocate the chunk
- * if its size changed.
- */
- if(H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert, scaled) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
+ if (0 == idx_info.pline->nused && H5F_addr_defined(old_chunk.offset)) {
+ /* If there are no filters and we are overwriting the chunk we can just set values */
+ need_insert = FALSE;
+ }
+ else {
+ /* Otherwise, create the chunk it if it doesn't exist, or reallocate the chunk
+ * if its size has changed.
+ */
+ if (H5D__chunk_file_alloc(&idx_info, &old_chunk, &udata.chunk_block, &need_insert, scaled) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTALLOC, FAIL, "unable to allocate chunk")
+ }
/* Make sure the address of the chunk is returned. */
if(!H5F_addr_defined(udata.chunk_block.offset))
@@ -467,7 +473,7 @@ H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
if(H5D__chunk_cache_evict(dset, io_info.md_dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], FALSE) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
} /* end if */
/* Write the data to the file */
@@ -493,6 +499,246 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5D__chunk_direct_read
+ *
+ * Purpose: Internal routine to read a chunk directly from the file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Matthew Strong (GE Healthcare)
+ * 14 February 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__chunk_direct_read(const H5D_t *dset, hid_t dxpl_id, hsize_t *offset,
+ uint32_t* filters, void *buf)
+{
+ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
+ const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* raw data chunk cache */
+ H5D_chunk_ud_t udata; /* User data for querying chunk info */
+ hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */
+ H5D_io_info_t io_info; /* to hold the dset and two dxpls (meta and raw data) */
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
+ hbool_t md_dxpl_generated = FALSE; /* bool to indicate whether we should free the md_dxpl_id at exit */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL)
+
+ /* Check args */
+ HDassert(dset && H5D_CHUNKED == layout->type);
+ HDassert(offset);
+ HDassert(filters);
+ HDassert(buf);
+
+ *filters = 0;
+
+ io_info.dset = dset;
+ io_info.raw_dxpl_id = dxpl_id;
+ io_info.md_dxpl_id = dxpl_id;
+
+ /* set the dxpl IO type for sanity checking at the FD layer */
+#ifdef H5_DEBUG_BUILD
+ if(H5D_set_io_info_dxpls(&io_info, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't set metadata and raw data dxpls")
+ md_dxpl_generated = TRUE;
+#endif /* H5_DEBUG_BUILD */
+
+ /* Allocate dataspace and initialize it if it hasn't been. */
+ if(!(*layout->ops->is_space_alloc)(&layout->storage))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "storage is not initialized")
+
+ /* Calculate the index of this chunk */
+ H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled);
+ scaled[dset->shared->ndims] = 0;
+
+ /* Reset fields about the chunk we are looking for */
+ udata.filter_mask = 0;
+ udata.chunk_block.offset = HADDR_UNDEF;
+ udata.chunk_block.length = 0;
+ udata.idx_hint = UINT_MAX;
+
+ /* Find out the file address of the chunk */
+ if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* Check if the requested chunk exists in the chunk cache */
+ if(UINT_MAX != udata.idx_hint) {
+ H5D_rdcc_ent_t *ent = rdcc->slot[udata.idx_hint];
+ hbool_t flush;
+
+ /* Sanity checks */
+ HDassert(udata.idx_hint < rdcc->nslots);
+ HDassert(rdcc->slot[udata.idx_hint]);
+
+ flush = (ent->dirty == TRUE) ? TRUE : FALSE;
+
+ /* Fill the DXPL cache values for later use */
+ if(H5D__get_dxpl_cache(io_info.raw_dxpl_id, &dxpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
+ /* Flush the chunk to disk and clear the cache entry */
+ if(H5D__chunk_cache_evict(dset, io_info.md_dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], flush) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
+
+ /* Reset fields about the chunk we are looking for */
+ udata.filter_mask = 0;
+ udata.chunk_block.offset = HADDR_UNDEF;
+ udata.chunk_block.length = 0;
+ udata.idx_hint = UINT_MAX;
+
+ /* Get the new file address / chunk size after flushing */
+ if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ }
+
+ /* Make sure the address of the chunk is returned. */
+ if(!H5F_addr_defined(udata.chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
+
+ /* Read the chunk data into the supplied buffer */
+ if(H5F_block_read(dset->oloc.file, H5FD_MEM_DRAW, udata.chunk_block.offset, udata.chunk_block.length, io_info.raw_dxpl_id, buf) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "unable to read raw data chunk")
+
+ /* Return the filter mask */
+ *filters = udata.filter_mask;
+
+done:
+#ifdef H5_DEBUG_BUILD
+ if(md_dxpl_generated && H5I_dec_ref(io_info.md_dxpl_id) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't close metadata dxpl")
+#endif /* H5_DEBUG_BUILD */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5D__chunk_direct_read() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5D__get_chunk_storage_size
+ *
+ * Purpose: Internal routine to read the storage size of a chunk on disk.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Matthew Strong (GE Healthcare)
+ * 20 October 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5D__get_chunk_storage_size(H5D_t *dset, hid_t dxpl_id, const hsize_t *offset, hsize_t *storage_size)
+{
+ const H5O_layout_t *layout = &(dset->shared->layout); /* Dataset layout */
+ const H5D_rdcc_t *rdcc = &(dset->shared->cache.chunk); /* raw data chunk cache */
+ hsize_t scaled[H5S_MAX_RANK]; /* Scaled coordinates for this chunk */
+ H5D_io_info_t io_info; /* to hold the dset and two dxpls (meta and raw data) */
+ H5D_chunk_ud_t udata; /* User data for querying chunk info */
+ H5D_dxpl_cache_t _dxpl_cache; /* Data transfer property cache buffer */
+ H5D_dxpl_cache_t *dxpl_cache = &_dxpl_cache; /* Data transfer property cache */
+ hbool_t md_dxpl_generated = FALSE; /* bool to indicate whether we should free the md_dxpl_id at exit */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC_TAG(dxpl_id, dset->oloc.addr, FAIL)
+
+ /* Check args */
+ HDassert(dset && H5D_CHUNKED == layout->type);
+ HDassert(offset);
+ HDassert(storage_size);
+
+ *storage_size = 0;
+
+ io_info.dset = dset;
+ io_info.raw_dxpl_id = dxpl_id;
+ io_info.md_dxpl_id = dxpl_id;
+
+ /* set the dxpl IO type for sanity checking at the FD layer */
+#ifdef H5_DEBUG_BUILD
+ if(H5D_set_io_info_dxpls(&io_info, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTCOPY, FAIL, "can't set metadata and raw data dxpls")
+ md_dxpl_generated = TRUE;
+#endif /* H5_DEBUG_BUILD */
+
+ /* Allocate dataspace and initialize it if it hasn't been. */
+ if(!(*layout->ops->is_space_alloc)(&layout->storage))
+ HGOTO_DONE(SUCCEED)
+
+ /* Calculate the index of this chunk */
+ H5VM_chunk_scaled(dset->shared->ndims, offset, layout->u.chunk.dim, scaled);
+ scaled[dset->shared->ndims] = 0;
+
+ /* Reset fields about the chunk we are looking for */
+ udata.chunk_block.offset = HADDR_UNDEF;
+ udata.chunk_block.length = 0;
+ udata.idx_hint = UINT_MAX;
+
+ /* Find out the file address of the chunk */
+ if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+
+ /* Sanity check */
+ HDassert((H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length > 0) ||
+ (!H5F_addr_defined(udata.chunk_block.offset) && udata.chunk_block.length == 0));
+
+ /* The requested chunk is not in cache or on disk */
+ if(!H5F_addr_defined(udata.chunk_block.offset) && UINT_MAX == udata.idx_hint)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk storage is not allocated")
+
+ /* Check if there are filters registered to the dataset */
+ if( dset->shared->dcpl_cache.pline.nused > 0 ) {
+ /* Check if the requested chunk exists in the chunk cache */
+ if(UINT_MAX != udata.idx_hint) {
+ H5D_rdcc_ent_t *ent = rdcc->slot[udata.idx_hint];
+
+ /* Sanity checks */
+ HDassert(udata.idx_hint < rdcc->nslots);
+ HDassert(rdcc->slot[udata.idx_hint]);
+
+ /* If the cached chunk is dirty, it must be flushed to get accurate size */
+ if( ent->dirty == TRUE ) {
+ /* Fill the DXPL cache values for later use */
+ if(H5D__get_dxpl_cache(dxpl_id, &dxpl_cache) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "can't fill dxpl cache")
+
+ /* Flush the chunk to disk and clear the cache entry */
+ if(H5D__chunk_cache_evict(dset, io_info.md_dxpl_id, dxpl_cache, rdcc->slot[udata.idx_hint], TRUE) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTREMOVE, FAIL, "unable to evict chunk")
+
+ /* Reset fields about the chunk we are looking for */
+ udata.chunk_block.offset = HADDR_UNDEF;
+ udata.chunk_block.length = 0;
+ udata.idx_hint = UINT_MAX;
+
+ /* Get the new file address / chunk size after flushing */
+ if(H5D__chunk_lookup(dset, io_info.md_dxpl_id, scaled, &udata) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "error looking up chunk address")
+ }
+ }
+
+ /* Make sure the address of the chunk is returned. */
+ if(!H5F_addr_defined(udata.chunk_block.offset))
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTGET, FAIL, "chunk address isn't defined")
+
+ /* Return the chunk size on disk */
+ *storage_size = udata.chunk_block.length;
+ }
+ /* There are no filters registered, return the chunk size from the storage layout */
+ else
+ *storage_size = dset->shared->layout.u.chunk.size;
+
+done:
+#ifdef H5_DEBUG_BUILD
+ if(md_dxpl_generated && H5I_dec_ref(io_info.md_dxpl_id) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't close metadata dxpl")
+#endif /* H5_DEBUG_BUILD */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__get_chunk_storage_size */
+
+
+/*-------------------------------------------------------------------------
* Function: H5D__chunk_set_info_real
*
* Purpose: Internal routine to set the information about chunks for a dataset
@@ -521,22 +767,22 @@ H5D__chunk_set_info_real(H5O_layout_chunk_t *layout, unsigned ndims,
/* Compute the # of chunks in dataset dimensions */
for(u = 0, layout->nchunks = 1, layout->max_nchunks = 1; u < ndims; u++) {
/* Round up to the next integer # of chunks, to accomodate partial chunks */
- layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u];
+ layout->chunks[u] = ((curr_dims[u] + layout->dim[u]) - 1) / layout->dim[u];
if(H5S_UNLIMITED == max_dims[u])
layout->max_chunks[u] = H5S_UNLIMITED;
else
layout->max_chunks[u] = ((max_dims[u] + layout->dim[u]) - 1) / layout->dim[u];
/* Accumulate the # of chunks */
- layout->nchunks *= layout->chunks[u];
- layout->max_nchunks *= layout->max_chunks[u];
+ layout->nchunks *= layout->chunks[u];
+ layout->max_nchunks *= layout->max_chunks[u];
} /* end for */
/* Get the "down" sizes for each dimension */
if(H5VM_array_down(ndims, layout->chunks, layout->down_chunks) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute 'down' chunk size value")
if(H5VM_array_down(ndims, layout->max_chunks, layout->max_down_chunks) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute 'down' chunk size value")
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute 'down' chunk size value")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2385,14 +2631,14 @@ H5D__chunk_dest(H5D_t *dset, hid_t dxpl_id)
/* Flush all the cached chunks */
for(ent = rdcc->head; ent; ent = next) {
- next = ent->next;
- if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0)
- nerrors++;
+ next = ent->next;
+ if(H5D__chunk_cache_evict(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0)
+ nerrors++;
} /* end for */
/* Continue even if there are failures. */
if(nerrors)
- HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks")
+ HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush one or more raw data chunks")
/* Release cache structures */
if(rdcc->slot)
@@ -2710,7 +2956,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
/* Check for chunk in cache */
if(dset->shared->cache.chunk.nslots > 0) {
/* Determine the chunk's location in the hash table */
- idx = H5D__chunk_hash_val(dset->shared, scaled);
+ idx = H5D__chunk_hash_val(dset->shared, scaled);
/* Get the chunk cache entry for that location */
ent = dset->shared->cache.chunk.slot[idx];
@@ -2734,7 +2980,7 @@ H5D__chunk_lookup(const H5D_t *dset, hid_t dxpl_id, const hsize_t *scaled,
udata->idx_hint = idx;
udata->chunk_block.offset = ent->chunk_block.offset;
udata->chunk_block.length = ent->chunk_block.length;;
- udata->chunk_idx = ent->chunk_idx;
+ udata->chunk_idx = ent->chunk_idx;
} /* end if */
else {
/* Invalidate idx_hint, to signal that the chunk is not in cache */
@@ -2999,27 +3245,27 @@ H5D__chunk_cache_evict(const H5D_t *dset, hid_t dxpl_id, const H5D_dxpl_cache_t
HDassert(ent->idx < rdcc->nslots);
if(flush) {
- /* Flush */
- if(H5D__chunk_flush_entry(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0)
- HDONE_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer")
+ /* Flush */
+ if(H5D__chunk_flush_entry(dset, dxpl_id, dxpl_cache, ent, TRUE) < 0)
+ HDONE_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "cannot flush indexed storage buffer")
} /* end if */
else {
/* Don't flush, just free chunk */
- if(ent->chunk != NULL)
- ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk,
+ if(ent->chunk != NULL)
+ ent->chunk = (uint8_t *)H5D__chunk_mem_xfree(ent->chunk,
((ent->edge_chunk_state & H5D_RDCC_DISABLE_FILTERS) ? NULL
: &(dset->shared->dcpl_cache.pline)));
} /* end else */
/* Unlink from list */
if(ent->prev)
- ent->prev->next = ent->next;
+ ent->prev->next = ent->next;
else
- rdcc->head = ent->next;
+ rdcc->head = ent->next;
if(ent->next)
- ent->next->prev = ent->prev;
+ ent->next->prev = ent->prev;
else
- rdcc->tail = ent->prev;
+ rdcc->tail = ent->prev;
ent->prev = ent->next = NULL;
/* Unlink from temporary list */
@@ -6060,9 +6306,9 @@ H5D__chunk_stats(const H5D_t *dset, hbool_t headers)
#endif
if (headers) {
- if (rdcc->nhits>0 || rdcc->nmisses>0) {
- miss_rate = 100.0 * rdcc->nmisses /
- (rdcc->nhits + rdcc->nmisses);
+ if (rdcc->stats.nhits>0 || rdcc->stats.nmisses>0) {
+ miss_rate = 100.0 * rdcc->stats.nmisses /
+ (rdcc->stats.nhits + rdcc->stats.nmisses);
} else {
miss_rate = 0.0;
}
@@ -6073,8 +6319,8 @@ H5D__chunk_stats(const H5D_t *dset, hbool_t headers)
}
fprintf(H5DEBUG(AC), " %-18s %8u %8u %7s %8d+%-9ld\n",
- "raw data chunks", rdcc->nhits, rdcc->nmisses, ascii,
- rdcc->ninits, (long)(rdcc->nflushes)-(long)(rdcc->ninits));
+ "raw data chunks", rdcc->stats.nhits, rdcc->stats.nmisses, ascii,
+ rdcc->stats.ninits, (long)(rdcc->stats.nflushes)-(long)(rdcc->stats.ninits));
}
done:
diff --git a/src/H5Dcompact.c b/src/H5Dcompact.c
index 224a1d1..99a25b6 100644
--- a/src/H5Dcompact.c
+++ b/src/H5Dcompact.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -370,9 +368,11 @@ H5D__compact_flush(H5D_t *dset, hid_t dxpl_id)
/* Check if the buffered compact information is dirty */
if(dset->shared->layout.storage.u.compact.dirty) {
- if(H5O_msg_write(&(dset->oloc), H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME, &(dset->shared->layout), dxpl_id) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update layout message")
dset->shared->layout.storage.u.compact.dirty = FALSE;
+ if(H5O_msg_write(&(dset->oloc), H5O_LAYOUT_ID, 0, H5O_UPDATE_TIME, &(dset->shared->layout), dxpl_id) < 0) {
+ dset->shared->layout.storage.u.compact.dirty = TRUE;
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update layout message")
+ }
} /* end if */
done:
diff --git a/src/H5Dcontig.c b/src/H5Dcontig.c
index a24abe6..0ee4d28 100644
--- a/src/H5Dcontig.c
+++ b/src/H5Dcontig.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Ddbg.c b/src/H5Ddbg.c
index a6c130e..b1efb20 100644
--- a/src/H5Ddbg.c
+++ b/src/H5Ddbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5Ddeprec.c b/src/H5Ddeprec.c
index c6a03c2..8d9461c 100644
--- a/src/H5Ddeprec.c
+++ b/src/H5Ddeprec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Dearray.c b/src/H5Dearray.c
index e9dbd0d..1df0a58 100644
--- a/src/H5Dearray.c
+++ b/src/H5Dearray.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
@@ -1137,7 +1135,7 @@ H5D__earray_idx_get_addr(const H5D_chk_idx_info_t *idx_info, H5D_chunk_ud_t *uda
H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.earray.unlim_dim);
/* Calculate the index of this chunk */
- idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, idx_info->layout->u.earray.swizzled_down_chunks);
+ idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, idx_info->layout->u.earray.swizzled_max_down_chunks);
} /* end if */
else {
/* Calculate the index of this chunk */
@@ -1202,7 +1200,8 @@ H5D__earray_idx_resize(H5O_layout_chunk_t *layout)
/* "Swizzle" constant dimensions for this dataset */
if(layout->u.earray.unlim_dim > 0) {
- hsize_t swizzled_chunks[H5O_LAYOUT_NDIMS]; /* Swizzled form of # of chunks in each dimension */
+ hsize_t swizzled_chunks[H5O_LAYOUT_NDIMS]; /* Swizzled form of # of chunks in each dimension */
+ hsize_t swizzled_max_chunks[H5O_LAYOUT_NDIMS]; /* Swizzled form of max # of chunks in each dimension */
/* Get the swizzled chunk dimensions */
HDmemcpy(layout->u.earray.swizzled_dim, layout->dim, (layout->ndims - 1) * sizeof(layout->dim[0]));
@@ -1215,6 +1214,14 @@ H5D__earray_idx_resize(H5O_layout_chunk_t *layout)
/* Get the swizzled "down" sizes for each dimension */
if(H5VM_array_down((layout->ndims - 1), swizzled_chunks, layout->u.earray.swizzled_down_chunks) < 0)
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute swizzled 'down' chunk size value")
+
+ /* Get the swizzled max number of chunks in each dimension */
+ HDmemcpy(swizzled_max_chunks, layout->max_chunks, (layout->ndims - 1) * sizeof(swizzled_max_chunks[0]));
+ H5VM_swizzle_coords(hsize_t, swizzled_max_chunks, layout->u.earray.unlim_dim);
+
+ /* Get the swizzled max "down" sizes for each dimension */
+ if(H5VM_array_down((layout->ndims - 1), swizzled_max_chunks, layout->u.earray.swizzled_max_down_chunks) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't compute swizzled 'down' chunk size value")
} /* end if */
done:
@@ -1414,7 +1421,7 @@ H5D__earray_idx_remove(const H5D_chk_idx_info_t *idx_info, H5D_chunk_common_ud_t
H5VM_swizzle_coords(hsize_t, swizzled_coords, idx_info->layout->u.earray.unlim_dim);
/* Calculate the index of this chunk */
- idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, idx_info->layout->u.earray.swizzled_down_chunks);
+ idx = H5VM_chunk_index(ndims, swizzled_coords, idx_info->layout->u.earray.swizzled_dim, idx_info->layout->u.earray.swizzled_max_down_chunks);
} /* end if */
else {
/* Calculate the index of this chunk */
diff --git a/src/H5Defl.c b/src/H5Defl.c
index 387cbe3..5536ba3 100644
--- a/src/H5Defl.c
+++ b/src/H5Defl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Dfarray.c b/src/H5Dfarray.c
index 6b95e12..d183a8c 100644
--- a/src/H5Dfarray.c
+++ b/src/H5Dfarray.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Vailin Choi <vchoi@hdfgroup.org>
diff --git a/src/H5Dfill.c b/src/H5Dfill.c
index 50c964b..922ac98 100644
--- a/src/H5Dfill.c
+++ b/src/H5Dfill.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Dint.c b/src/H5Dint.c
index 5a11581..1813ca6 100644
--- a/src/H5Dint.c
+++ b/src/H5Dint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -3645,7 +3643,9 @@ H5D__refresh(hid_t dset_id, H5D_t *dset, hid_t dxpl_id)
FUNC_ENTER_NOAPI_NOINIT
+ /* Sanity check */
HDassert(dset);
+ HDassert(dset->shared);
/* If the layout is virtual... */
if(dset->shared->layout.type == H5D_VIRTUAL) {
@@ -3666,9 +3666,6 @@ H5D__refresh(hid_t dset_id, H5D_t *dset, hid_t dxpl_id)
done:
/* Release hold on virtual datasets' files */
if(virt_dsets_held) {
- /* Sanity check */
- HDassert(dset->shared->layout.type == H5D_VIRTUAL);
-
/* Release the hold on source datasets' files */
if(H5D__virtual_release_source_dset_files(head) < 0)
HDONE_ERROR(H5E_DATASET, H5E_CANTDEC, FAIL, "can't release VDS source files held open")
diff --git a/src/H5Dio.c b/src/H5Dio.c
index f5087da..572e6cf 100644
--- a/src/H5Dio.c
+++ b/src/H5Dio.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -127,9 +125,13 @@ H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
hid_t file_space_id, hid_t plist_id, void *buf/*out*/)
{
H5D_t *dset = NULL;
- const H5S_t *mem_space = NULL;
- const H5S_t *file_space = NULL;
- herr_t ret_value = SUCCEED; /* Return value */
+ const H5S_t *mem_space = NULL;
+ const H5S_t *file_space = NULL;
+ H5P_genplist_t *plist; /* Property list pointer */
+ hsize_t *direct_offset = NULL;
+ hbool_t direct_read = FALSE;
+ uint32_t direct_filters = 0;
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
H5TRACE6("e", "iiiiix", dset_id, mem_type_id, mem_space_id, file_space_id,
@@ -137,28 +139,29 @@ H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
/* check arguments */
if(NULL == (dset = (H5D_t *)H5I_object_verify(dset_id, H5I_DATASET)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
if(NULL == dset->oloc.file)
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset")
if(mem_space_id < 0 || file_space_id < 0)
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
if(H5S_ALL != mem_space_id) {
- if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
+ if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
- /* Check for valid selection */
- if(H5S_SELECT_VALID(mem_space) != TRUE)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent")
+ /* Check for valid selection */
+ if(H5S_SELECT_VALID(mem_space) != TRUE)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent")
} /* end if */
+
if(H5S_ALL != file_space_id) {
- if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
+ if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
- /* Check for valid selection */
- if(H5S_SELECT_VALID(file_space) != TRUE)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent")
+ /* Check for valid selection */
+ if(H5S_SELECT_VALID(file_space) != TRUE)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "selection+offset not within extent")
} /* end if */
/* Get the default dataset transfer property list if the user didn't provide one */
@@ -168,9 +171,55 @@ H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
if(TRUE != H5P_isa_class(plist_id, H5P_DATASET_XFER))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not xfer parms")
- /* read raw data */
- if(H5D__read(dset, mem_type_id, mem_space, file_space, plist_id, buf/*out*/) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data")
+ /* Get the dataset transfer property list */
+ if(NULL == (plist = (H5P_genplist_t *)H5I_object(plist_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list")
+
+ /* Retrieve the 'direct read' flag */
+ if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, &direct_read) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting flag for direct chunk read")
+
+ if(direct_read) {
+ unsigned u;
+ hsize_t dims[H5O_LAYOUT_NDIMS];
+ hsize_t internal_offset[H5O_LAYOUT_NDIMS];
+
+ if(H5D_CHUNKED != dset->shared->layout.type)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
+
+ /* Get the direct chunk offset property */
+ if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME, &direct_offset) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting direct offset from xfer properties")
+
+ /* The library's chunking code requires the offset terminates with a zero. So transfer the
+ * offset array to an internal offset array */
+ for(u = 0; u < dset->shared->ndims; u++) {
+ /* Make sure the offset doesn't exceed the dataset's dimensions */
+ if(direct_offset[u] > dset->shared->curr_dims[u])
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
+
+ /* Make sure the offset fall right on a chunk's boundary */
+ if(direct_offset[u] % dset->shared->layout.u.chunk.dim[u])
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary")
+
+ internal_offset[u] = direct_offset[u];
+ } /* end for */
+
+ /* Terminate the offset with a zero */
+ internal_offset[dset->shared->ndims] = 0;
+
+ /* Read the raw chunk */
+ if(H5D__chunk_direct_read(dset, plist_id, internal_offset, &direct_filters, buf) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read chunk directly")
+ /* Set the chunk filter mask property */
+ if(H5P_set(plist, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME, &direct_filters) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error setting filter mask xfer property")
+ }
+ else {
+ /* read raw data */
+ if(H5D__read(dset, mem_type_id, mem_space, file_space, plist_id, buf/*out*/) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_READERROR, FAIL, "can't read data")
+ }
done:
FUNC_LEAVE_API(ret_value)
@@ -247,28 +296,28 @@ H5Dwrite(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
/* Check dataspace selections if this is not a direct write */
if(!direct_write) {
if(mem_space_id < 0 || file_space_id < 0)
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace")
-
- if(H5S_ALL != mem_space_id) {
- if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace")
-
- /* Check for valid selection */
- if(H5S_SELECT_VALID(mem_space) != TRUE)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "memory selection+offset not within extent")
- } /* end if */
- if(H5S_ALL != file_space_id) {
- if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace")
-
- /* Check for valid selection */
- if(H5S_SELECT_VALID(file_space) != TRUE)
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "file selection+offset not within extent")
- } /* end if */
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace")
+
+ if(H5S_ALL != mem_space_id) {
+ if(NULL == (mem_space = (const H5S_t *)H5I_object_verify(mem_space_id, H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace")
+
+ /* Check for valid selection */
+ if(H5S_SELECT_VALID(mem_space) != TRUE)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "memory selection+offset not within extent")
+ } /* end if */
+ if(H5S_ALL != file_space_id) {
+ if(NULL == (file_space = (const H5S_t *)H5I_object_verify(file_space_id, H5I_DATASPACE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace")
+
+ /* Check for valid selection */
+ if(H5S_SELECT_VALID(file_space) != TRUE)
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADRANGE, FAIL, "file selection+offset not within extent")
+ } /* end if */
}
if(H5D__pre_write(dset, direct_write, mem_type_id, mem_space, file_space, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't prepare for writing data")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't prepare for writing data")
done:
FUNC_LEAVE_API(ret_value)
@@ -302,15 +351,15 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
uint32_t direct_filters;
hsize_t *direct_offset;
uint32_t direct_datasize;
- hsize_t internal_offset[H5O_LAYOUT_NDIMS];
- unsigned u; /* Local index variable */
+ hsize_t internal_offset[H5O_LAYOUT_NDIMS];
+ unsigned u; /* Local index variable */
/* Get the dataset transfer property list */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataset transfer property list")
if(H5D_CHUNKED != dset->shared->layout.type)
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a chunked dataset")
/* Retrieve parameters for direct chunk write */
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME, &direct_filters) < 0)
@@ -320,19 +369,19 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
if(H5P_get(plist, H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME, &direct_datasize) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "error getting data size for direct chunk write")
- /* The library's chunking code requires the offset terminates with a zero. So transfer the
+ /* The library's chunking code requires the offset terminates with a zero. So transfer the
* offset array to an internal offset array */
- for(u = 0; u < dset->shared->ndims; u++) {
- /* Make sure the offset doesn't exceed the dataset's dimensions */
+ for(u = 0; u < dset->shared->ndims; u++) {
+ /* Make sure the offset doesn't exceed the dataset's dimensions */
if(direct_offset[u] > dset->shared->curr_dims[u])
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset exceeds dimensions of dataset")
/* Make sure the offset fall right on a chunk's boundary */
- if(direct_offset[u] % dset->shared->layout.u.chunk.dim[u])
- HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary")
+ if(direct_offset[u] % dset->shared->layout.u.chunk.dim[u])
+ HGOTO_ERROR(H5E_DATASPACE, H5E_BADTYPE, FAIL, "offset doesn't fall on chunks's boundary")
- internal_offset[u] = direct_offset[u];
- } /* end for */
+ internal_offset[u] = direct_offset[u];
+ } /* end for */
/* Terminate the offset with a zero */
internal_offset[dset->shared->ndims] = 0;
@@ -344,7 +393,7 @@ H5D__pre_write(H5D_t *dset, hbool_t direct_write, hid_t mem_type_id,
else { /* Normal write */
/* write raw data */
if(H5D__write(dset, mem_type_id, mem_space, file_space, dxpl_id, buf) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data")
+ HGOTO_ERROR(H5E_DATASET, H5E_WRITEERROR, FAIL, "can't write data")
} /* end else */
done:
diff --git a/src/H5Dlayout.c b/src/H5Dlayout.c
index cb2eca2..ec18e86 100644
--- a/src/H5Dlayout.c
+++ b/src/H5Dlayout.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -595,6 +593,7 @@ herr_t
H5D__layout_oh_read(H5D_t *dataset, hid_t dxpl_id, hid_t dapl_id, H5P_genplist_t *plist)
{
htri_t msg_exists; /* Whether a particular type of message exists */
+ hbool_t layout_copied = FALSE; /* Flag to indicate that layout message was copied */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -624,6 +623,7 @@ H5D__layout_oh_read(H5D_t *dataset, hid_t dxpl_id, hid_t dapl_id, H5P_genplist_t
*/
if(NULL == H5O_msg_read(&(dataset->oloc), H5O_LAYOUT_ID, &(dataset->shared->layout), dxpl_id))
HGOTO_ERROR(H5E_DATASET, H5E_CANTINIT, FAIL, "unable to read data layout message")
+ layout_copied = TRUE;
/* Check for external file list message (which might not exist) */
if((msg_exists = H5O_msg_exists(&(dataset->oloc), H5O_EFL_ID, dxpl_id)) < 0)
@@ -657,10 +657,16 @@ H5D__layout_oh_read(H5D_t *dataset, hid_t dxpl_id, hid_t dapl_id, H5P_genplist_t
HGOTO_ERROR(H5E_DATASET, H5E_CANTSET, FAIL, "can't set layout")
/* Set chunk sizes */
- if(H5D__chunk_set_sizes(dataset) < 0)
- HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "unable to set chunk sizes")
+ if(H5D_CHUNKED == dataset->shared->layout.type) {
+ if(H5D__chunk_set_sizes(dataset) < 0)
+ HGOTO_ERROR(H5E_DATASET, H5E_BADVALUE, FAIL, "unable to set chunk sizes")
+ }
done:
+ if(ret_value < 0 && layout_copied) {
+ if(H5O_msg_reset(H5O_LAYOUT_ID, &dataset->shared->layout) < 0)
+ HDONE_ERROR(H5E_DATASET, H5E_CANTRESET, FAIL, "unable to reset layout info")
+ }
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5D__layout_oh_read() */
diff --git a/src/H5Dmodule.h b/src/H5Dmodule.h
index 9b0c411..b259b69 100644
--- a/src/H5Dmodule.h
+++ b/src/H5Dmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Dmpio.c b/src/H5Dmpio.c
index 441cc96..0389c72 100644
--- a/src/H5Dmpio.c
+++ b/src/H5Dmpio.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Dnone.c b/src/H5Dnone.c
index 0cadac2..97ec5d6 100644
--- a/src/H5Dnone.c
+++ b/src/H5Dnone.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Vailin Choi <vchoi@hdfgroup.org>
diff --git a/src/H5Doh.c b/src/H5Doh.c
index 5cdf4bc..9abbdff 100644
--- a/src/H5Doh.c
+++ b/src/H5Doh.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5Dpkg.h b/src/H5Dpkg.h
index f54a9f2..a6857b9 100644
--- a/src/H5Dpkg.h
+++ b/src/H5Dpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -580,6 +578,7 @@ H5_DLL herr_t H5D__get_space_status(H5D_t *dset, H5D_space_status_t *allocation,
H5_DLL herr_t H5D__alloc_storage(const H5D_io_info_t *io_info, H5D_time_alloc_t time_alloc,
hbool_t full_overwrite, hsize_t old_dim[]);
H5_DLL herr_t H5D__get_storage_size(H5D_t *dset, hid_t dxpl_id, hsize_t *storage_size);
+H5_DLL herr_t H5D__get_chunk_storage_size(H5D_t *dset, hid_t dxpl_id, const hsize_t *offset, hsize_t *storage_size);
H5_DLL haddr_t H5D__get_offset(const H5D_t *dset);
H5_DLL void *H5D__vlen_get_buf_size_alloc(size_t size, void *info);
H5_DLL herr_t H5D__vlen_get_buf_size(void *elem, hid_t type_id, unsigned ndim,
@@ -687,6 +686,8 @@ H5_DLL herr_t H5D__chunk_delete(H5F_t *f, hid_t dxpl_id, H5O_t *oh,
H5O_storage_t *store);
H5_DLL herr_t H5D__chunk_direct_write(const H5D_t *dset, hid_t dxpl_id, uint32_t filters,
hsize_t *offset, uint32_t data_size, const void *buf);
+H5_DLL herr_t H5D__chunk_direct_read(const H5D_t *dset, hid_t dxpl_id, hsize_t *offset,
+ uint32_t *filters, void *buf);
#ifdef H5D_CHUNK_DEBUG
H5_DLL herr_t H5D__chunk_stats(const H5D_t *dset, hbool_t headers);
#endif /* H5D_CHUNK_DEBUG */
@@ -776,6 +777,7 @@ H5_DLL htri_t H5D__mpio_opt_possible(const H5D_io_info_t *io_info,
#ifdef H5D_TESTING
H5_DLL herr_t H5D__layout_version_test(hid_t did, unsigned *version);
H5_DLL herr_t H5D__layout_contig_size_test(hid_t did, hsize_t *size);
+H5_DLL herr_t H5D__layout_compact_dirty_test(hid_t did, hbool_t *dirty);
H5_DLL herr_t H5D__layout_idx_type_test(hid_t did, H5D_chunk_index_t *idx_type);
H5_DLL herr_t H5D__layout_type_test(hid_t did, H5D_layout_t *layout_type);
H5_DLL herr_t H5D__current_cache_size_test(hid_t did, size_t *nbytes_used, int *nused);
diff --git a/src/H5Dprivate.h b/src/H5Dprivate.h
index ea2b8c8..5d565bb 100644
--- a/src/H5Dprivate.h
+++ b/src/H5Dprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Dpublic.h b/src/H5Dpublic.h
index 81ae67d..baa844a 100644
--- a/src/H5Dpublic.h
+++ b/src/H5Dpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -42,6 +40,11 @@
#define H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_NAME "direct_chunk_filters"
#define H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_NAME "direct_chunk_offset"
#define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_NAME "direct_chunk_datasize"
+
+/* Property names for H5LTDdirect_chunk_read */
+#define H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME "direct_chunk_read_flag"
+#define H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME "direct_chunk_read_offset"
+#define H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME "direct_chunk_read_filters"
/*******************/
/* Public Typedefs */
@@ -148,6 +151,7 @@ H5_DLL hid_t H5Dget_type(hid_t dset_id);
H5_DLL hid_t H5Dget_create_plist(hid_t dset_id);
H5_DLL hid_t H5Dget_access_plist(hid_t dset_id);
H5_DLL hsize_t H5Dget_storage_size(hid_t dset_id);
+H5_DLL herr_t H5Dget_chunk_storage_size(hid_t dset_id, const hsize_t *offset, hsize_t *chunk_bytes);
H5_DLL haddr_t H5Dget_offset(hid_t dset_id);
H5_DLL herr_t H5Dread(hid_t dset_id, hid_t mem_type_id, hid_t mem_space_id,
hid_t file_space_id, hid_t plist_id, void *buf/*out*/);
diff --git a/src/H5Dscatgath.c b/src/H5Dscatgath.c
index 55111f0..4625c7a 100644
--- a/src/H5Dscatgath.c
+++ b/src/H5Dscatgath.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5Dselect.c b/src/H5Dselect.c
index 53829e5..b4d0515 100644
--- a/src/H5Dselect.c
+++ b/src/H5Dselect.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.ued>
diff --git a/src/H5Dsingle.c b/src/H5Dsingle.c
index 04b8971..e999449 100644
--- a/src/H5Dsingle.c
+++ b/src/H5Dsingle.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Vailin Choi <vchoi@hdfgroup.org>
diff --git a/src/H5Dtest.c b/src/H5Dtest.c
index 56f14f7..c2b6199 100644
--- a/src/H5Dtest.c
+++ b/src/H5Dtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
@@ -144,6 +142,47 @@ done:
/*--------------------------------------------------------------------------
NAME
+ H5D__layout_compact_dirty_test
+ PURPOSE
+ Determine the "dirty" flag of a compact layout for a dataset's layout information
+ USAGE
+ herr_t H5D__layout_compact_dirty_test(did, dirty)
+ hid_t did; IN: Dataset to query
+ hbool_t *dirty; OUT: Pointer to location to place "dirty" info
+ RETURNS
+ Non-negative on success, negative on failure
+ DESCRIPTION
+ Checks the "dirty" flag of a compact dataset.
+ GLOBAL VARIABLES
+ COMMENTS, BUGS, ASSUMPTIONS
+ DO NOT USE THIS FUNCTION FOR ANYTHING EXCEPT TESTING
+ EXAMPLES
+ REVISION LOG
+--------------------------------------------------------------------------*/
+herr_t
+H5D__layout_compact_dirty_test(hid_t did, hbool_t *dirty)
+{
+ H5D_t *dset; /* Pointer to dataset to query */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Check args */
+ if(NULL == (dset = (H5D_t *)H5I_object_verify(did, H5I_DATASET)))
+ HGOTO_ERROR(H5E_DATASET, H5E_BADTYPE, FAIL, "not a dataset")
+
+ if(dirty) {
+ HDassert(dset->shared->layout.type == H5D_COMPACT);
+ *dirty = dset->shared->layout.storage.u.compact.dirty;
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5D__layout_compact_dirty_test() */
+
+
+/*--------------------------------------------------------------------------
+ NAME
H5D__layout_type_test
PURPOSE
Determine the storage layout type for a dataset
diff --git a/src/H5Dvirtual.c b/src/H5Dvirtual.c
index c85b1e9..3be2353 100644
--- a/src/H5Dvirtual.c
+++ b/src/H5Dvirtual.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5E.c b/src/H5E.c
index aa4511b..741f0dd 100644
--- a/src/H5E.c
+++ b/src/H5E.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5EA.c b/src/H5EA.c
index e35a4ed..c524d49 100644
--- a/src/H5EA.c
+++ b/src/H5EA.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAcache.c b/src/H5EAcache.c
index 1119e39..def38af 100644
--- a/src/H5EAcache.c
+++ b/src/H5EAcache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -554,6 +552,8 @@ H5EA__cache_hdr_notify(H5AC_notify_action_t action, void *_thing))
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -958,6 +958,8 @@ H5EA__cache_iblock_notify(H5AC_notify_action_t action, void *_thing))
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -1381,6 +1383,8 @@ H5EA__cache_sblock_notify(H5AC_notify_action_t action, void *_thing))
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -1792,6 +1796,8 @@ H5EA__cache_dblock_notify(H5AC_notify_action_t action, void *_thing))
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -2171,6 +2177,8 @@ H5EA__cache_dblk_page_notify(H5AC_notify_action_t action, void *_thing))
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
diff --git a/src/H5EAdbg.c b/src/H5EAdbg.c
index ef45881..e67a5a8 100644
--- a/src/H5EAdbg.c
+++ b/src/H5EAdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAdblkpage.c b/src/H5EAdblkpage.c
index 927dcb6..2b07356 100644
--- a/src/H5EAdblkpage.c
+++ b/src/H5EAdblkpage.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAdblock.c b/src/H5EAdblock.c
index c288c69..7df0ee9 100644
--- a/src/H5EAdblock.c
+++ b/src/H5EAdblock.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAhdr.c b/src/H5EAhdr.c
index 76d8733..92d7c4d 100644
--- a/src/H5EAhdr.c
+++ b/src/H5EAhdr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAiblock.c b/src/H5EAiblock.c
index a3723c5..e25e3ef 100644
--- a/src/H5EAiblock.c
+++ b/src/H5EAiblock.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAint.c b/src/H5EAint.c
index 9f910d0..2baf1f4 100644
--- a/src/H5EAint.c
+++ b/src/H5EAint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAmodule.h b/src/H5EAmodule.h
index 405b232..d3e06b7 100644
--- a/src/H5EAmodule.h
+++ b/src/H5EAmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5EApkg.h b/src/H5EApkg.h
index 093403c..e162fab 100644
--- a/src/H5EApkg.h
+++ b/src/H5EApkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -378,21 +376,6 @@ typedef struct H5EA__ctx_cb_t {
/* Package Private Variables */
/*****************************/
-/* H5EA header inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_HDR[1];
-
-/* H5EA index block inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_IBLOCK[1];
-
-/* H5EA index block inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_SBLOCK[1];
-
-/* H5EA data block inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLOCK[1];
-
-/* H5EA data block page inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_EARRAY_DBLK_PAGE[1];
-
/* Internal extensible array testing class */
H5_DLLVAR const H5EA_class_t H5EA_CLS_TEST[1];
diff --git a/src/H5EAprivate.h b/src/H5EAprivate.h
index 66d88e0..cda1d46 100644
--- a/src/H5EAprivate.h
+++ b/src/H5EAprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAsblock.c b/src/H5EAsblock.c
index 2fa87e0..4e291c0 100644
--- a/src/H5EAsblock.c
+++ b/src/H5EAsblock.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAstat.c b/src/H5EAstat.c
index 0c27681..72c4d14 100644
--- a/src/H5EAstat.c
+++ b/src/H5EAstat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5EAtest.c b/src/H5EAtest.c
index 905aa7f..422ea68 100644
--- a/src/H5EAtest.c
+++ b/src/H5EAtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
diff --git a/src/H5Edeprec.c b/src/H5Edeprec.c
index 1a13c01..f579773 100644
--- a/src/H5Edeprec.c
+++ b/src/H5Edeprec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Eint.c b/src/H5Eint.c
index 8eea147..110c6bb 100644
--- a/src/H5Eint.c
+++ b/src/H5Eint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Emodule.h b/src/H5Emodule.h
index d624409..2d1bcd0 100644
--- a/src/H5Emodule.h
+++ b/src/H5Emodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Epkg.h b/src/H5Epkg.h
index 3af653a..90f4f80 100644
--- a/src/H5Epkg.h
+++ b/src/H5Epkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Eprivate.h b/src/H5Eprivate.h
index db413e3..57e7485 100644
--- a/src/H5Eprivate.h
+++ b/src/H5Eprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Epublic.h b/src/H5Epublic.h
index 17a35d9..3eae2da 100644
--- a/src/H5Epublic.h
+++ b/src/H5Epublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5F.c b/src/H5F.c
index a43009b..78fce2a 100644
--- a/src/H5F.c
+++ b/src/H5F.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -406,7 +404,7 @@ H5Fis_hdf5(const char *name)
HGOTO_ERROR(H5E_ARGS, H5E_BADRANGE, FAIL, "no file name specified")
/* call the private is_HDF5 function */
- if((ret_value = H5F_is_hdf5(name, H5AC_ind_read_dxpl_id)) < 0)
+ if((ret_value = H5F__is_hdf5(name, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable open file")
done:
@@ -445,6 +443,8 @@ done:
hid_t
H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
{
+ hbool_t ci_load = FALSE; /* whether MDC ci load requested */
+ hbool_t ci_write = FALSE; /* whether MDC CI write requested */
H5F_t *new_file = NULL; /*file struct for new file */
hid_t dxpl_id = H5AC_ind_read_dxpl_id; /*dxpl used by library */
hid_t ret_value; /*return value */
@@ -490,6 +490,12 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
if(NULL == (new_file = H5F_open(filename, flags, fcpl_id, fapl_id, dxpl_id)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to create file")
+ /* Check to see if both SWMR and cache image are requested. Fail if so */
+ if(H5C_cache_image_status(new_file, &ci_load, &ci_write) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get MDC cache image status")
+ if((ci_load || ci_write) && (flags & (H5F_ACC_SWMR_READ | H5F_ACC_SWMR_WRITE)))
+ HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, FAIL, "can't have both SWMR and cache image")
+
/* Get an atom for the file */
if((ret_value = H5I_register(H5I_FILE, new_file, TRUE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to atomize file")
@@ -498,9 +504,8 @@ H5Fcreate(const char *filename, unsigned flags, hid_t fcpl_id, hid_t fapl_id)
new_file->file_id = ret_value;
done:
- if(ret_value < 0 && new_file)
- if(H5F_close(new_file) < 0)
- HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "problems closing file")
+ if(ret_value < 0 && new_file && H5F_try_close(new_file, NULL) < 0)
+ HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "problems closing file")
FUNC_LEAVE_API(ret_value)
} /* end H5Fcreate() */
@@ -528,7 +533,7 @@ done:
* Modifications:
* Robb Matzke, 1997-07-18
* File struct creation and destruction is through H5F_new() and
- * H5F_dest(). Reading the root symbol table entry is done with
+ * H5F__dest(). Reading the root symbol table entry is done with
* H5G_decode().
*
* Robb Matzke, 1997-09-23
@@ -549,6 +554,8 @@ done:
hid_t
H5Fopen(const char *filename, unsigned flags, hid_t fapl_id)
{
+ hbool_t ci_load = FALSE; /* whether MDC ci load requested */
+ hbool_t ci_write = FALSE; /* whether MDC CI write requested */
H5F_t *new_file = NULL; /*file struct for new file */
hid_t dxpl_id = H5AC_ind_read_dxpl_id; /*dxpl used by library */
hid_t ret_value; /*return value */
@@ -578,6 +585,12 @@ H5Fopen(const char *filename, unsigned flags, hid_t fapl_id)
if(NULL == (new_file = H5F_open(filename, flags, H5P_FILE_CREATE_DEFAULT, fapl_id, dxpl_id)))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, FAIL, "unable to open file")
+ /* Check to see if both SWMR and cache image are requested. Fail if so */
+ if(H5C_cache_image_status(new_file, &ci_load, &ci_write) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get MDC cache image status")
+ if((ci_load || ci_write) && (flags & (H5F_ACC_SWMR_READ | H5F_ACC_SWMR_WRITE)))
+ HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, FAIL, "can't have both SWMR and cache image")
+
/* Get an atom for the file */
if((ret_value = H5I_register(H5I_FILE, new_file, TRUE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTREGISTER, FAIL, "unable to atomize file handle")
@@ -698,12 +711,12 @@ H5Fflush(hid_t object_id, H5F_scope_t scope)
/* Flush other files, depending on scope */
if(H5F_SCOPE_GLOBAL == scope) {
/* Call the flush routine for mounted file hierarchies */
- if(H5F_flush_mounts(f, H5AC_ind_read_dxpl_id) < 0)
+ if(H5F_flush_mounts(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush mounted file hierarchy")
} /* end if */
else {
/* Call the flush routine, for this file */
- if(H5F_flush(f, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__flush(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
} /* end else */
} /* end if */
@@ -758,7 +771,7 @@ H5Fclose(hid_t file_id)
if((nref = H5I_get_ref(file_id, FALSE)) < 0)
HGOTO_ERROR(H5E_ATOM, H5E_CANTGET, FAIL, "can't get ID ref count")
if(nref == 1)
- if(H5F_flush(f, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__flush(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
} /* end if */
@@ -823,7 +836,7 @@ H5Freopen(hid_t file_id)
done:
if(ret_value < 0 && new_file)
- if(H5F_dest(new_file, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__dest(new_file, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "can't close file")
FUNC_LEAVE_API(ret_value)
@@ -1027,7 +1040,7 @@ H5Fget_file_image(hid_t file_id, void *buf_ptr, size_t buf_len)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID")
/* call private get_file_image function */
- if((ret_value = H5F_get_file_image(file, buf_ptr, buf_len, H5AC_ind_read_dxpl_id)) < 0)
+ if((ret_value = H5F_get_file_image(file, buf_ptr, buf_len, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file image")
done:
@@ -1177,7 +1190,7 @@ H5Fget_mdc_size(hid_t file_id, size_t *max_size_ptr, size_t *min_clean_size_ptr,
size_t *cur_size_ptr, int *cur_num_entries_ptr)
{
H5F_t *file; /* File object for file ID */
- int32_t cur_num_entries;
+ uint32_t cur_num_entries;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1593,6 +1606,8 @@ done:
herr_t
H5Fstart_swmr_write(hid_t file_id)
{
+ hbool_t ci_load = FALSE; /* whether MDC ci load requested */
+ hbool_t ci_write = FALSE; /* whether MDC CI write requested */
H5F_t *file = NULL; /* File info */
size_t grp_dset_count=0; /* # of open objects: groups & datasets */
size_t nt_attr_count=0; /* # of opened named datatypes + opened attributes */
@@ -1602,7 +1617,7 @@ H5Fstart_swmr_write(hid_t file_id)
H5G_name_t *obj_paths=NULL; /* Group hierarchy path */
size_t u; /* Local index variable */
hbool_t setup = FALSE; /* Boolean flag to indicate whether SWMR setting is enabled */
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1626,8 +1641,14 @@ H5Fstart_swmr_write(hid_t file_id)
HDassert(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS);
+ /* Check to see if cache image is enabled. Fail if so */
+ if(H5C_cache_image_status(file, &ci_load, &ci_write) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get MDC cache image status")
+ if(ci_load || ci_write )
+ HGOTO_ERROR(H5E_FILE, H5E_UNSUPPORTED, FAIL, "can't have both SWMR and MDC cache image")
+
/* Flush data buffers */
- if(H5F_flush(file, H5AC_ind_read_dxpl_id, FALSE) < 0)
+ if(H5F__flush(file, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
/* Get the # of opened named datatypes and attributes */
@@ -1681,7 +1702,9 @@ H5Fstart_swmr_write(hid_t file_id)
/* Set up I/O info for operation */
fio_info.f = file;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
/* Flush and reset the accumulator */
@@ -1868,7 +1891,48 @@ H5Fget_mdc_logging_status(hid_t file_id, hbool_t *is_enabled,
done:
FUNC_LEAVE_API(ret_value)
-} /* H5Fstop_mdc_logging() */
+} /* H5Fget_mdc_logging_status() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Fset_latest_format
+ *
+ * Purpose: Enable switching the "latest format" flag while a file is open.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Monday, September 21, 2015
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Fset_latest_format(hid_t file_id, hbool_t latest_format)
+{
+ H5F_t *f; /* File */
+ unsigned latest_flags; /* Latest format flags for file */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "ib", file_id, latest_format);
+
+ /* Check args */
+ if(NULL == (f = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "not a file ID")
+
+ /* Check if the value is changing */
+ latest_flags = H5F_USE_LATEST_FLAGS(f, H5F_LATEST_ALL_FLAGS);
+ if(latest_format != (H5F_LATEST_ALL_FLAGS == latest_flags)) {
+ /* Call the flush routine, for this file */
+ if(H5F__flush(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
+
+ /* Toggle the 'latest format' flag */
+ H5F_SET_LATEST_FLAGS(f, latest_format ? H5F_LATEST_ALL_FLAGS : 0);
+ } /* end if */
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Fset_latest_format() */
/*-------------------------------------------------------------------------
@@ -1909,7 +1973,9 @@ H5Fformat_convert(hid_t fid)
/* Check for persistent freespace manager, which needs to be downgraded */
if(!(f->shared->fs_strategy == H5F_FILE_SPACE_STRATEGY_DEF &&
- f->shared->fs_threshold == H5F_FREE_SPACE_THRESHOLD_DEF)) {
+ f->shared->fs_persist == H5F_FREE_SPACE_PERSIST_DEF &&
+ f->shared->fs_threshold == H5F_FREE_SPACE_THRESHOLD_DEF &&
+ f->shared->fs_page_size == H5F_FILE_SPACE_PAGE_SIZE_DEF)) {
/* Check to remove free-space manager info message from superblock extension */
if(H5F_addr_defined(f->shared->sblock->ext_addr))
if(H5F_super_ext_remove_msg(f, H5AC_ind_read_dxpl_id, H5O_FSINFO_ID) < 0)
@@ -1921,7 +1987,9 @@ H5Fformat_convert(hid_t fid)
/* Set non-persistent freespace manager */
f->shared->fs_strategy = H5F_FILE_SPACE_STRATEGY_DEF;
+ f->shared->fs_persist = H5F_FREE_SPACE_PERSIST_DEF;
f->shared->fs_threshold = H5F_FREE_SPACE_THRESHOLD_DEF;
+ f->shared->fs_page_size = H5F_FILE_SPACE_PAGE_SIZE_DEF;
/* Indicate that the superblock should be marked dirty */
mark_dirty = TRUE;
@@ -1940,3 +2008,119 @@ done:
FUNC_LEAVE_API(ret_value)
} /* end H5Fformat_convert() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5Freset_page_buffering_stats
+ *
+ * Purpose: Resets statistics for the page buffer layer.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Freset_page_buffering_stats(hid_t file_id)
+{
+ H5F_t *file; /* File to reset stats on */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "i", file_id);
+
+ /* Check args */
+ if(NULL == (file = (H5F_t *)H5I_object(file_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "invalid file identifier")
+ if(NULL == file->shared->page_buf)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "page buffering not enabled on file")
+
+ /* Reset the statistics */
+ if(H5PB_reset_stats(file->shared->page_buf) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't reset stats for page buffering")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Freset_page_buffering_stats() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Fget_page_buffering_stats
+ *
+ * Purpose: Retrieves statistics for the page buffer layer.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Fget_page_buffering_stats(hid_t file_id, unsigned accesses[2], unsigned hits[2],
+ unsigned misses[2], unsigned evictions[2], unsigned bypasses[2])
+{
+ H5F_t *file; /* File object for file ID */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE6("e", "i*Iu*Iu*Iu*Iu*Iu", file_id, accesses, hits, misses, evictions,
+ bypasses);
+
+ /* Check args */
+ if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID")
+ if(NULL == file->shared->page_buf)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "page buffering not enabled on file")
+ if(NULL == accesses || NULL == hits || NULL == misses || NULL == evictions || NULL == bypasses)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL input parameters for stats")
+
+ /* Get the statistics */
+ if(H5PB_get_stats(file->shared->page_buf, accesses, hits, misses, evictions, bypasses) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't retrieve stats for page buffering")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Fget_page_buffering_stats() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Fget_mdc_image_info
+ *
+ * Purpose: Retrieves the image_addr and image_len for the cache image in the file.
+ * image_addr: --base address of the on disk metadata cache image
+ * --HADDR_UNDEF if no cache image
+ * image_len: --size of the on disk metadata cache image
+ * --zero if no cache image
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Vailin Choi; March 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Fget_mdc_image_info(hid_t file_id, haddr_t *image_addr, hsize_t *image_len)
+{
+ H5F_t *file; /* File object for file ID */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE3("e", "i*a*h", file_id, image_addr, image_len);
+
+ /* Check args */
+ if(NULL == (file = (H5F_t *)H5I_object_verify(file_id, H5I_FILE)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "not a file ID")
+ if(NULL == image_addr || NULL == image_len)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL image addr or image len")
+
+ /* Go get the address and size of the cache image */
+ if(H5AC_get_mdc_image_info(file->shared->cache, image_addr, image_len) < 0)
+ HGOTO_ERROR(H5E_CACHE, H5E_CANTGET, FAIL, "can't retrieve cache image info")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Fget_mdc_image_info() */
+
diff --git a/src/H5FA.c b/src/H5FA.c
index 90144e7..d421eec 100644
--- a/src/H5FA.c
+++ b/src/H5FA.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FAcache.c b/src/H5FAcache.c
index 11e8571..19fdb74 100644
--- a/src/H5FAcache.c
+++ b/src/H5FAcache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -475,6 +473,8 @@ H5FA__cache_hdr_notify(H5AC_notify_action_t action, void *_thing))
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -879,6 +879,8 @@ H5FA__cache_dblock_notify(H5AC_notify_action_t action, void *_thing))
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -1254,6 +1256,8 @@ H5FA__cache_dblk_page_notify(H5AC_notify_action_t action, void *_thing))
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
diff --git a/src/H5FAdbg.c b/src/H5FAdbg.c
index 2dc51c5..7444eae 100644
--- a/src/H5FAdbg.c
+++ b/src/H5FAdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FAdblkpage.c b/src/H5FAdblkpage.c
index 09278f8..baf4286 100644
--- a/src/H5FAdblkpage.c
+++ b/src/H5FAdblkpage.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FAdblock.c b/src/H5FAdblock.c
index 440447d..432bf58 100644
--- a/src/H5FAdblock.c
+++ b/src/H5FAdblock.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FAhdr.c b/src/H5FAhdr.c
index 52b90d1..6809f02 100644
--- a/src/H5FAhdr.c
+++ b/src/H5FAhdr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FAint.c b/src/H5FAint.c
index 331227b..9d3bce8 100644
--- a/src/H5FAint.c
+++ b/src/H5FAint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FAmodule.h b/src/H5FAmodule.h
index e46b071..f675faf 100644
--- a/src/H5FAmodule.h
+++ b/src/H5FAmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FApkg.h b/src/H5FApkg.h
index ccef562..6f0a43a 100644
--- a/src/H5FApkg.h
+++ b/src/H5FApkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -249,15 +247,6 @@ typedef struct H5FA_dblk_page_cache_ud_t {
/* Package Private Variables */
/*****************************/
-/* H5FA header inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_FARRAY_HDR[1];
-
-/* H5FA data block inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_FARRAY_DBLOCK[1];
-
-/* H5FA data block page inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_FARRAY_DBLK_PAGE[1];
-
/* Internal fixed array testing class */
H5_DLLVAR const H5FA_class_t H5FA_CLS_TEST[1];
diff --git a/src/H5FAprivate.h b/src/H5FAprivate.h
index 612f3a2..1e44126 100644
--- a/src/H5FAprivate.h
+++ b/src/H5FAprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FAstat.c b/src/H5FAstat.c
index 72fa0de..3c06855 100644
--- a/src/H5FAstat.c
+++ b/src/H5FAstat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FAtest.c b/src/H5FAtest.c
index 091f284..3c6d8e4 100644
--- a/src/H5FAtest.c
+++ b/src/H5FAtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FD.c b/src/H5FD.c
index 99a9288..dcfaa6d 100644
--- a/src/H5FD.c
+++ b/src/H5FD.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -498,8 +496,7 @@ H5FD_sb_encode(H5FD_t *file, char *name/*out*/, uint8_t *buf)
FUNC_ENTER_NOAPI(FAIL)
HDassert(file && file->cls);
- if(file->cls->sb_encode &&
- (file->cls->sb_encode)(file, name/*out*/, buf/*out*/) < 0)
+ if(file->cls->sb_encode && (file->cls->sb_encode)(file, name/*out*/, buf/*out*/) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTINIT, FAIL, "driver sb_encode request failed")
done:
@@ -1531,7 +1528,7 @@ herr_t
H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
void *buf/*out*/)
{
- H5P_genplist_t *dxpl; /* DXPL object */
+ H5FD_io_info_t fdio_info; /* File driver I/O object */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1550,13 +1547,24 @@ H5FDread(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size
if(!buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "null result buffer")
- /* Get the DXPL plist object for DXPL ID */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ /* Set up the file driver I/O info object */
+ fdio_info.file = file;
+ if(H5FD_MEM_DRAW == type) {
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
/* Do the real work */
/* (Note compensating for base address addition in internal routine) */
- if(H5FD_read(file, dxpl, type, addr - file->base_addr, size, buf) < 0)
+ if(H5FD_read(&fdio_info, type, addr - file->base_addr, size, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "file read request failed")
done:
@@ -1585,7 +1593,7 @@ herr_t
H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t size,
const void *buf)
{
- H5P_genplist_t *dxpl; /* DXPL object */
+ H5FD_io_info_t fdio_info; /* File driver I/O object */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1603,13 +1611,24 @@ H5FDwrite(H5FD_t *file, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr, size_t siz
if(!buf)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "null buffer")
- /* Get the DXPL plist object for DXPL ID */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ /* Set up the file driver I/O info object */
+ fdio_info.file = file;
+ if(H5FD_MEM_DRAW == type) {
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
/* The real work */
/* (Note compensating for base address addition in internal routine) */
- if(H5FD_write(file, dxpl, type, addr - file->base_addr, size, buf) < 0)
+ if(H5FD_write(&fdio_info, type, addr - file->base_addr, size, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "file write request failed")
done:
@@ -2033,3 +2052,27 @@ H5FD_get_base_addr(const H5FD_t *file)
FUNC_LEAVE_NOAPI(file->base_addr)
} /* end H5FD_get_base_addr() */
+
+/*--------------------------------------------------------------------------
+ * Function: H5FD_set_paged_aggr
+ *
+ * Purpose: Set "paged_aggr" for the file.
+ *
+ * Return: Non-negative if succeed; negative if fails.
+ *
+ * Programmer: Vailin Choi; April 2013
+ *
+ *--------------------------------------------------------------------------
+ */
+herr_t
+H5FD_set_paged_aggr(H5FD_t *file, hbool_t paged)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(file);
+
+ /* Indicate whether paged aggregation for handling file space is enabled or not */
+ file->paged_aggr = paged;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5FD_set_paged_aggr() */
diff --git a/src/H5FDcore.c b/src/H5FDcore.c
index 9090679..d100a8b 100644
--- a/src/H5FDcore.c
+++ b/src/H5FDcore.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDcore.h b/src/H5FDcore.h
index 16cce89..5fe2912 100644
--- a/src/H5FDcore.h
+++ b/src/H5FDcore.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDdirect.c b/src/H5FDdirect.c
index 2034ba0..1487cda 100644
--- a/src/H5FDdirect.c
+++ b/src/H5FDdirect.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDdirect.h b/src/H5FDdirect.h
index 5ceb91f..805f3be 100644
--- a/src/H5FDdirect.h
+++ b/src/H5FDdirect.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDdrvr_module.h b/src/H5FDdrvr_module.h
index 8bb83a1..59a419e 100644
--- a/src/H5FDdrvr_module.h
+++ b/src/H5FDdrvr_module.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDfamily.c b/src/H5FDfamily.c
index f0d4dba..98ece84 100644
--- a/src/H5FDfamily.c
+++ b/src/H5FDfamily.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -1357,7 +1355,7 @@ H5FD_family_lock(H5FD_t *_file, hbool_t rw)
if(u < file->nmembs) {
unsigned v; /* Local index variable */
- for(v = 0; v < v; v++) {
+ for(v = 0; v < u; v++) {
if(H5FD_unlock(file->memb[v]) < 0)
/* Push error, but keep going */
HDONE_ERROR(H5E_IO, H5E_CANTUNLOCK, FAIL, "unable to unlock member files")
diff --git a/src/H5FDfamily.h b/src/H5FDfamily.h
index 80969c3..1584cf6 100644
--- a/src/H5FDfamily.h
+++ b/src/H5FDfamily.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDint.c b/src/H5FDint.c
index 744c3d1..bc322d6 100644
--- a/src/H5FDint.c
+++ b/src/H5FDint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -93,12 +91,9 @@
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_locate_signature(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, haddr_t *sig_addr)
+H5FD_locate_signature(H5FD_io_info_t *fdio_info, haddr_t *sig_addr)
{
+ H5FD_t *file;
haddr_t addr, eoa, eof;
uint8_t buf[H5F_SIGNATURE_LEN];
unsigned n, maxpow;
@@ -106,6 +101,10 @@ H5P_genplist_t *dxpl, haddr_t *sig_addr)
FUNC_ENTER_NOAPI_NOINIT
+ HDassert(fdio_info);
+ file = fdio_info->file;
+ HDassert(file);
+
/* Find the least N such that 2^N is larger than the file size */
eof = H5FD_get_eof(file, H5FD_MEM_SUPER);
eoa = H5FD_get_eoa(file, H5FD_MEM_SUPER);
@@ -124,7 +123,7 @@ H5P_genplist_t *dxpl, haddr_t *sig_addr)
addr = (8 == n) ? 0 : (haddr_t)1 << n;
if(H5FD_set_eoa(file, H5FD_MEM_SUPER, addr + H5F_SIGNATURE_LEN) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to set EOA value for file signature")
- if(H5FD_read(file, dxpl, H5FD_MEM_SUPER, addr, (size_t)H5F_SIGNATURE_LEN, buf) < 0)
+ if(H5FD_read(fdio_info, H5FD_MEM_SUPER, addr, (size_t)H5F_SIGNATURE_LEN, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to read file signature")
if(!HDmemcmp(buf, H5F_SIGNATURE, (size_t)H5F_SIGNATURE_LEN))
break;
@@ -162,29 +161,36 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_read(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr,
+H5FD_read(H5FD_io_info_t *fdio_info, H5FD_mem_t type, haddr_t addr,
size_t size, void *buf/*out*/)
{
+ H5FD_t *file;
+ H5P_genplist_t *io_dxpl;
haddr_t eoa = HADDR_UNDEF;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ HDassert(fdio_info);
+ file = fdio_info->file;
HDassert(file && file->cls);
- HDassert(TRUE == H5P_class_isa(H5P_CLASS(dxpl), H5P_CLS_DATASET_XFER_g));
+ HDassert(TRUE == H5P_class_isa(H5P_CLASS(fdio_info->meta_dxpl), H5P_CLS_DATASET_XFER_g));
+ HDassert(TRUE == H5P_class_isa(H5P_CLASS(fdio_info->raw_dxpl), H5P_CLS_DATASET_XFER_g));
HDassert(buf);
+ /* Set up proper DXPL for I/O */
+ if(H5FD_MEM_DRAW == type)
+ io_dxpl = fdio_info->raw_dxpl;
+ else
+ io_dxpl = fdio_info->meta_dxpl;
+
/* Sanity check the dxpl type against the mem type */
#ifdef H5_DEBUG_BUILD
{
H5FD_dxpl_type_t dxpl_type; /* Property indicating the type of the internal dxpl */
/* get the dxpl type */
- if(H5P_get(dxpl, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
+ if(H5P_get(io_dxpl, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't retrieve dxpl type")
/* we shouldn't be here if the dxpl is labeled with NO I/O */
@@ -219,7 +225,7 @@ H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr,
HGOTO_ERROR(H5E_ARGS, H5E_OVERFLOW, FAIL, "addr overflow, addr = %llu, size = %llu, eoa = %llu", (unsigned long long)(addr + file->base_addr), (unsigned long long)size, (unsigned long long)eoa)
/* Dispatch to driver */
- if((file->cls->read)(file, type, H5P_PLIST_ID(dxpl), addr + file->base_addr, size, buf) < 0)
+ if((file->cls->read)(file, type, H5P_PLIST_ID(io_dxpl), addr + file->base_addr, size, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_READERROR, FAIL, "driver read request failed")
done:
@@ -241,29 +247,36 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_write(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr,
+H5FD_write(const H5FD_io_info_t *fdio_info, H5FD_mem_t type, haddr_t addr,
size_t size, const void *buf)
{
+ H5FD_t *file;
+ H5P_genplist_t *io_dxpl;
haddr_t eoa = HADDR_UNDEF;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
+ HDassert(fdio_info);
+ file = fdio_info->file;
HDassert(file && file->cls);
- HDassert(TRUE == H5P_class_isa(H5P_CLASS(dxpl), H5P_CLS_DATASET_XFER_g));
+ HDassert(TRUE == H5P_class_isa(H5P_CLASS(fdio_info->meta_dxpl), H5P_CLS_DATASET_XFER_g));
+ HDassert(TRUE == H5P_class_isa(H5P_CLASS(fdio_info->raw_dxpl), H5P_CLS_DATASET_XFER_g));
HDassert(buf);
+ /* Set up proper DXPL for I/O */
+ if(H5FD_MEM_DRAW == type)
+ io_dxpl = fdio_info->raw_dxpl;
+ else
+ io_dxpl = fdio_info->meta_dxpl;
+
/* Sanity check the dxpl type against the mem type */
#ifdef H5_DEBUG_BUILD
{
H5FD_dxpl_type_t dxpl_type; /* Property indicating the type of the internal dxpl */
/* get the dxpl type */
- if(H5P_get(dxpl, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
+ if(H5P_get(io_dxpl, H5FD_DXPL_TYPE_NAME, &dxpl_type) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTGET, FAIL, "can't retrieve dxpl type")
/* we shouldn't be here if the dxpl is labeled with NO I/O */
@@ -291,7 +304,7 @@ H5P_genplist_t *dxpl, H5FD_mem_t type, haddr_t addr,
(unsigned long long)(addr+ file->base_addr), (unsigned long long)size, (unsigned long long)eoa)
/* Dispatch to driver */
- if((file->cls->write)(file, type, H5P_PLIST_ID(dxpl), addr + file->base_addr, size, buf) < 0)
+ if((file->cls->write)(file, type, H5P_PLIST_ID(io_dxpl), addr + file->base_addr, size, buf) < 0)
HGOTO_ERROR(H5E_VFL, H5E_WRITEERROR, FAIL, "driver write request failed")
done:
diff --git a/src/H5FDlog.c b/src/H5FDlog.c
index 03228d2..75333c2 100644
--- a/src/H5FDlog.c
+++ b/src/H5FDlog.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -934,13 +932,7 @@ H5FD_log_alloc(H5FD_t *_file, H5FD_mem_t type, hid_t H5_ATTR_UNUSED dxpl_id, hsi
/* Compute the address for the block to allocate */
addr = file->eoa;
- /* Check if we need to align this block */
- if(size >= file->pub.threshold) {
- /* Check for an already aligned block */
- if(addr % file->pub.alignment != 0)
- addr = ((addr / file->pub.alignment) + 1) * file->pub.alignment;
- } /* end if */
-
+ /* Extend the end-of-allocated space address */
file->eoa = addr + size;
/* Retain the (first) flavor of the information written to the file */
diff --git a/src/H5FDlog.h b/src/H5FDlog.h
index 11044e2..a69bb18 100644
--- a/src/H5FDlog.h
+++ b/src/H5FDlog.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDmodule.h b/src/H5FDmodule.h
index 6358e86..ea1a9fd 100644
--- a/src/H5FDmodule.h
+++ b/src/H5FDmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDmpi.c b/src/H5FDmpi.c
index 3f0160e..98e1b1a 100644
--- a/src/H5FDmpi.c
+++ b/src/H5FDmpi.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -148,6 +146,45 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5FD_get_mpi_info
+ *
+ * Purpose: Retrieves the file's mpi info
+ *
+ * Return: Success: SUCCEED
+ *
+ * Failure: Negative
+ *
+ * Programmer: John Mainzer
+ * 4/4/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FD_get_mpi_info(H5FD_t *file, void** mpi_info)
+{
+ const H5FD_class_mpi_t *cls;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ HDassert(file);
+ cls = (const H5FD_class_mpi_t *)(file->cls);
+ HDassert(cls);
+ HDassert(cls->get_mpi_info); /* All MPI drivers must implement this */
+
+ /* Dispatch to driver */
+ if ((ret_value=(cls->get_mpi_info)(file, mpi_info)) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTGET, MPI_COMM_NULL, \
+ "driver get_mpi_info request failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FD_get_mpi_info() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FD_mpi_MPIOff_to_haddr
*
* Purpose: Convert an MPI_Offset value to haddr_t.
diff --git a/src/H5FDmpi.h b/src/H5FDmpi.h
index 784fe70..2d62c79 100644
--- a/src/H5FDmpi.h
+++ b/src/H5FDmpi.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDmpio.c b/src/H5FDmpio.c
index a3a404f..ace91f8 100644
--- a/src/H5FDmpio.c
+++ b/src/H5FDmpio.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -95,6 +93,7 @@ static herr_t H5FD_mpio_truncate(H5FD_t *_file, hid_t dxpl_id, hbool_t closing);
static int H5FD_mpio_mpi_rank(const H5FD_t *_file);
static int H5FD_mpio_mpi_size(const H5FD_t *_file);
static MPI_Comm H5FD_mpio_communicator(const H5FD_t *_file);
+static herr_t H5FD_mpio_get_info(H5FD_t *_file, void** mpi_info);
/* The MPIO file driver information */
static const H5FD_class_mpi_t H5FD_mpio_g = {
@@ -134,7 +133,8 @@ static const H5FD_class_mpi_t H5FD_mpio_g = {
}, /* End of superclass information */
H5FD_mpio_mpi_rank, /*get_rank */
H5FD_mpio_mpi_size, /*get_size */
- H5FD_mpio_communicator /*get_comm */
+ H5FD_mpio_communicator, /*get_comm */
+ H5FD_mpio_get_info /*get_info */
};
#ifdef H5FDmpio_DEBUG
@@ -1308,6 +1308,39 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5FD_mpio_get_info
+ *
+ * Purpose: Returns the file info of MPIO file driver.
+ *
+ * Returns: Non-negative if succeed or negative if fails.
+ *
+ * Programmer: John Mainzer
+ * April 4, 2017
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+*/
+static herr_t
+H5FD_mpio_get_info(H5FD_t *_file, void** mpi_info)
+{
+ H5FD_mpio_t *file = (H5FD_mpio_t *)_file;
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ if(!mpi_info)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "mpi info not valid")
+
+ *mpi_info = &(file->info);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+
+} /* H5FD_mpio_get_info() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FD_mpio_read
*
* Purpose: Reads SIZE bytes of data from FILE beginning at address ADDR
diff --git a/src/H5FDmpio.h b/src/H5FDmpio.h
index 858e8ba..6ee0a1a 100644
--- a/src/H5FDmpio.h
+++ b/src/H5FDmpio.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDmulti.c b/src/H5FDmulti.c
index 5e27c53..8ae23d2 100644
--- a/src/H5FDmulti.c
+++ b/src/H5FDmulti.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -75,11 +73,11 @@ static hid_t H5FD_MULTI_g = 0;
/* Driver-specific file access properties */
typedef struct H5FD_multi_fapl_t {
- H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; /*memory usage map */
- hid_t memb_fapl[H5FD_MEM_NTYPES];/*member access properties */
- char *memb_name[H5FD_MEM_NTYPES];/*name generators */
- haddr_t memb_addr[H5FD_MEM_NTYPES];/*starting addr per member */
- hbool_t relax; /*less stringent error checking */
+ H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; /*memory usage map */
+ hid_t memb_fapl[H5FD_MEM_NTYPES]; /*member access properties */
+ char *memb_name[H5FD_MEM_NTYPES]; /*name generators */
+ haddr_t memb_addr[H5FD_MEM_NTYPES]; /*starting addr per member */
+ hbool_t relax; /*less stringent error checking */
} H5FD_multi_fapl_t;
/*
@@ -89,17 +87,17 @@ typedef struct H5FD_multi_fapl_t {
* copied into the parent file struct in H5F_open().
*/
typedef struct H5FD_multi_t {
- H5FD_t pub; /*public stuff, must be first */
- H5FD_multi_fapl_t fa; /*driver-specific file access properties*/
- haddr_t memb_next[H5FD_MEM_NTYPES];/*addr of next member */
- H5FD_t *memb[H5FD_MEM_NTYPES]; /*member pointers */
- haddr_t memb_eoa[H5FD_MEM_NTYPES]; /*EOA for individual files,
- *end of allocated addresses. v1.6 library
- *have the EOA for the entire file. But it's
- *meaningless for MULTI file. We replaced it
- *with the EOAs for individual files */
- unsigned flags; /*file open flags saved for debugging */
- char *name; /*name passed to H5Fopen or H5Fcreate */
+ H5FD_t pub; /*public stuff, must be first */
+ H5FD_multi_fapl_t fa; /*driver-specific file access properties */
+ haddr_t memb_next[H5FD_MEM_NTYPES]; /*addr of next member */
+ H5FD_t *memb[H5FD_MEM_NTYPES]; /*member pointers */
+ haddr_t memb_eoa[H5FD_MEM_NTYPES]; /*EOA for individual files,
+ *end of allocated addresses. v1.6 library
+ *have the EOA for the entire file. But it's
+ *meaningless for MULTI file. We replaced it
+ *with the EOAs for individual files */
+ unsigned flags; /*file open flags saved for debugging */
+ char *name; /*name passed to H5Fopen or H5Fcreate */
} H5FD_multi_t;
/* Driver specific data transfer properties */
@@ -233,9 +231,9 @@ H5FD_multi_init(void)
/* Clear the error stack */
H5Eclear2(H5E_DEFAULT);
- if (H5I_VFL!=H5Iget_type(H5FD_MULTI_g)) {
- H5FD_MULTI_g = H5FDregister(&H5FD_multi_g);
- }
+ if(H5I_VFL!=H5Iget_type(H5FD_MULTI_g))
+ H5FD_MULTI_g = H5FDregister(&H5FD_multi_g);
+
return H5FD_MULTI_g;
}
@@ -285,7 +283,8 @@ H5Pset_fapl_split(hid_t fapl, const char *meta_ext, hid_t meta_plist_id,
H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
hid_t memb_fapl[H5FD_MEM_NTYPES];
const char *memb_name[H5FD_MEM_NTYPES];
- char meta_name[H5FD_MULT_MAX_FILE_NAME_LEN], raw_name[H5FD_MULT_MAX_FILE_NAME_LEN];
+ char meta_name[H5FD_MULT_MAX_FILE_NAME_LEN];
+ char raw_name[H5FD_MULT_MAX_FILE_NAME_LEN];
haddr_t memb_addr[H5FD_MEM_NTYPES];
/*NO TRACE*/
@@ -1111,6 +1110,7 @@ H5FD_multi_close(H5FD_t *_file)
if (file->fa.memb_fapl[mt]>=0) (void)H5Idec_ref(file->fa.memb_fapl[mt]);
if (file->fa.memb_name[mt]) free(file->fa.memb_name[mt]);
} END_MEMBERS;
+
free(file->name);
free(file);
return 0;
@@ -1149,14 +1149,18 @@ H5FD_multi_cmp(const H5FD_t *_f1, const H5FD_t *_f2)
ALL_MEMBERS(mt) {
out_mt = mt;
- if (f1->memb[mt] && f2->memb[mt]) break;
- if (!cmp) {
- if (f1->memb[mt]) cmp = -1;
- else if (f2->memb[mt]) cmp = 1;
+ if(f1->memb[mt] && f2->memb[mt])
+ break;
+ if(!cmp) {
+ if(f1->memb[mt])
+ cmp = -1;
+ else if(f2->memb[mt])
+ cmp = 1;
}
} END_MEMBERS;
assert(cmp || out_mt<H5FD_MEM_NTYPES);
- if (out_mt>=H5FD_MEM_NTYPES) return cmp;
+ if(out_mt>=H5FD_MEM_NTYPES)
+ return cmp;
return H5FDcmp(f1->memb[out_mt], f2->memb[out_mt]);
}
@@ -1186,8 +1190,10 @@ H5FD_multi_query(const H5FD_t *_f, unsigned long *flags /* out */)
/* Set the VFL feature flags that this driver supports */
if(flags) {
*flags = 0;
- *flags |= H5FD_FEAT_DATA_SIEVE; /* OK to perform data sieving for faster raw data reads & writes */
- *flags |= H5FD_FEAT_AGGREGATE_SMALLDATA; /* OK to aggregate "small" raw data allocations */
+ *flags |= H5FD_FEAT_DATA_SIEVE; /* OK to perform data sieving for faster raw data reads & writes */
+ *flags |= H5FD_FEAT_AGGREGATE_SMALLDATA; /* OK to aggregate "small" raw data allocations */
+ *flags |= H5FD_FEAT_USE_ALLOC_SIZE; /* OK just pass the allocation size to the alloc callback */
+ *flags |= H5FD_FEAT_PAGED_AGGR; /* OK special file space mapping for paged aggregation */
} /* end if */
return(0);
@@ -1532,6 +1538,14 @@ H5FD_multi_alloc(H5FD_t *_file, H5FD_mem_t type, hid_t dxpl_id, hsize_t size)
mmt = file->fa.memb_map[type];
if (H5FD_MEM_DEFAULT==mmt) mmt = type;
+ /* XXX: NEED to work on this again */
+ if(file->pub.paged_aggr) {
+ ALL_MEMBERS(mt) {
+ if(file->memb[mt])
+ file->memb[mt]->paged_aggr = file->pub.paged_aggr;
+ } END_MEMBERS;
+ }
+
if (HADDR_UNDEF==(addr=H5FDalloc(file->memb[mmt], mmt, dxpl_id, size)))
H5Epush_ret(func, H5E_ERR_CLS, H5E_INTERNAL, H5E_BADVALUE, "member file can't alloc", HADDR_UNDEF)
addr += file->fa.memb_addr[mmt];
diff --git a/src/H5FDmulti.h b/src/H5FDmulti.h
index e819e74..0bd5718 100644
--- a/src/H5FDmulti.h
+++ b/src/H5FDmulti.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDpkg.h b/src/H5FDpkg.h
index 45bcfd8..31dcf8d 100644
--- a/src/H5FDpkg.h
+++ b/src/H5FDpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDprivate.h b/src/H5FDprivate.h
index 427b42c..0fc2135 100644
--- a/src/H5FDprivate.h
+++ b/src/H5FDprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -53,6 +51,7 @@ typedef struct H5FD_class_mpi_t {
int (*get_rank)(const H5FD_t *file); /* Get the MPI rank of a process */
int (*get_size)(const H5FD_t *file); /* Get the MPI size of a communicator */
MPI_Comm (*get_comm)(const H5FD_t *file); /* Get the communicator for a file */
+ herr_t (*get_mpi_info)(H5FD_t *file, void** mpi_info); /* get MPI_Info for a file */
} H5FD_class_mpi_t;
#endif
@@ -115,6 +114,19 @@ typedef enum {
#define H5FD_DXPL_TYPE_NAME "H5P_dxpl_type"
#endif /* H5_DEBUG_BUILD */
+/* I/O Info for an operation */
+typedef struct H5FD_io_info_t {
+ H5FD_t *file; /* File driver object */
+#ifndef H5_DEBUG_BUILD
+ const
+#endif /* H5_DEBUG_BUILD */
+ H5P_genplist_t *meta_dxpl; /* Metadata DXPL object */
+#ifndef H5_DEBUG_BUILD
+ const
+#endif /* H5_DEBUG_BUILD */
+ H5P_genplist_t *raw_dxpl; /* Raw data DXPL object */
+} H5FD_io_info_t;
+
/*****************************/
/* Library Private Variables */
@@ -126,15 +138,10 @@ typedef enum {
/******************************/
/* Forward declarations for prototype arguments */
-struct H5P_genplist_t;
struct H5F_t;
H5_DLL int H5FD_term_interface(void);
-H5_DLL herr_t H5FD_locate_signature(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, haddr_t *sig_addr);
+H5_DLL herr_t H5FD_locate_signature(H5FD_io_info_t *fdio_info, haddr_t *sig_addr);
H5_DLL H5FD_class_t *H5FD_get_class(hid_t id);
H5_DLL hsize_t H5FD_sb_size(H5FD_t *file);
H5_DLL herr_t H5FD_sb_encode(H5FD_t *file, char *name/*out*/, uint8_t *buf);
@@ -146,12 +153,12 @@ H5_DLL H5FD_t *H5FD_open(const char *name, unsigned flags, hid_t fapl_id,
haddr_t maxaddr);
H5_DLL herr_t H5FD_close(H5FD_t *file);
H5_DLL int H5FD_cmp(const H5FD_t *f1, const H5FD_t *f2);
-H5_DLL haddr_t H5FD_alloc(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, struct H5F_t *f,
- hsize_t size, haddr_t *align_addr, hsize_t *align_size);
+H5_DLL haddr_t H5FD_alloc(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type,
+ struct H5F_t *f, hsize_t size, haddr_t *frag_addr, hsize_t *frag_size);
H5_DLL herr_t H5FD_free(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, struct H5F_t *f,
haddr_t addr, hsize_t size);
H5_DLL htri_t H5FD_try_extend(H5FD_t *file, H5FD_mem_t type, struct H5F_t *f,
- haddr_t blk_end, hsize_t extra_requested);
+ hid_t dxpl_id, haddr_t blk_end, hsize_t extra_requested);
H5_DLL haddr_t H5FD_get_eoa(const H5FD_t *file, H5FD_mem_t type);
H5_DLL herr_t H5FD_set_eoa(H5FD_t *file, H5FD_mem_t type, haddr_t addr);
H5_DLL haddr_t H5FD_get_eof(const H5FD_t *file, H5FD_mem_t type);
@@ -159,17 +166,9 @@ H5_DLL haddr_t H5FD_get_maxaddr(const H5FD_t *file);
H5_DLL herr_t H5FD_get_feature_flags(const H5FD_t *file, unsigned long *feature_flags);
H5_DLL herr_t H5FD_set_feature_flags(H5FD_t *file, unsigned long feature_flags);
H5_DLL herr_t H5FD_get_fs_type_map(const H5FD_t *file, H5FD_mem_t *type_map);
-H5_DLL herr_t H5FD_read(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, H5FD_mem_t type,
+H5_DLL herr_t H5FD_read(H5FD_io_info_t *fdio_info, H5FD_mem_t type,
haddr_t addr, size_t size, void *buf/*out*/);
-H5_DLL herr_t H5FD_write(H5FD_t *file,
-#ifndef H5_DEBUG_BUILD
-const
-#endif /* H5_DEBUG_BUILD */
-H5P_genplist_t *dxpl, H5FD_mem_t type,
+H5_DLL herr_t H5FD_write(const H5FD_io_info_t *fdio_info, H5FD_mem_t type,
haddr_t addr, size_t size, const void *buf);
H5_DLL herr_t H5FD_flush(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
H5_DLL herr_t H5FD_truncate(H5FD_t *file, hid_t dxpl_id, hbool_t closing);
@@ -179,6 +178,7 @@ H5_DLL herr_t H5FD_get_fileno(const H5FD_t *file, unsigned long *filenum);
H5_DLL herr_t H5FD_get_vfd_handle(H5FD_t *file, hid_t fapl, void** file_handle);
H5_DLL herr_t H5FD_set_base_addr(H5FD_t *file, haddr_t base_addr);
H5_DLL haddr_t H5FD_get_base_addr(const H5FD_t *file);
+H5_DLL herr_t H5FD_set_paged_aggr(H5FD_t *file, hbool_t paged);
/* Function prototypes for MPI based VFDs*/
#ifdef H5_HAVE_PARALLEL
@@ -201,6 +201,7 @@ H5_DLL herr_t H5FD_get_mpio_atomicity(H5FD_t *file, hbool_t *flag);
H5_DLL int H5FD_mpi_get_rank(const H5FD_t *file);
H5_DLL int H5FD_mpi_get_size(const H5FD_t *file);
H5_DLL MPI_Comm H5FD_mpi_get_comm(const H5FD_t *_file);
+H5_DLL herr_t H5FD_get_mpi_info(H5FD_t *file, void** file_info);
#endif /* H5_HAVE_PARALLEL */
#endif /* !_H5FDprivate_H */
diff --git a/src/H5FDpublic.h b/src/H5FDpublic.h
index 436be10..3032e8a 100644
--- a/src/H5FDpublic.h
+++ b/src/H5FDpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -239,6 +237,19 @@ typedef enum H5F_mem_t H5FD_mem_t;
* driver supports the single-writer/multiple-readers I/O pattern.
*/
#define H5FD_FEAT_SUPPORTS_SWMR_IO 0x00001000
+ /*
+ * Defining H5FD_FEAT_USE_ALLOC_SIZE for a VFL driver
+ * means that the library will just pass the allocation size to the
+ * the driver's allocation callback which will eventually handle alignment.
+ * This is specifically used for the multi/split driver.
+ */
+#define H5FD_FEAT_USE_ALLOC_SIZE 0x00002000
+ /*
+ * Defining H5FD_FEAT_PAGED_AGGR for a VFL driver
+ * means that the driver needs special file space mapping for paged aggregation.
+ * This is specifically used for the multi/split driver.
+ */
+#define H5FD_FEAT_PAGED_AGGR 0x00004000
/* Forward declaration */
typedef struct H5FD_t H5FD_t;
@@ -307,6 +318,7 @@ struct H5FD_t {
/* Space allocation management fields */
hsize_t threshold; /* Threshold for alignment */
hsize_t alignment; /* Allocation alignment */
+ hbool_t paged_aggr; /* Paged aggregation for file space is enabled or not */
};
/* Define enum for the source of file image callbacks */
diff --git a/src/H5FDsec2.c b/src/H5FDsec2.c
index 0ca5efb..26913e2 100644
--- a/src/H5FDsec2.c
+++ b/src/H5FDsec2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDsec2.h b/src/H5FDsec2.h
index 0c62597..a4ade0b 100644
--- a/src/H5FDsec2.h
+++ b/src/H5FDsec2.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDspace.c b/src/H5FDspace.c
index edc83e6..e451d6b 100644
--- a/src/H5FDspace.c
+++ b/src/H5FDspace.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -19,7 +17,7 @@
* Jan 3 2008
* Quincey Koziol <koziol@hdfgroup.org>
*
- * Purpose: Space allocation routines for the file.
+ * Purpose: Space allocation routines for the file driver code.
*
*-------------------------------------------------------------------------
*/
@@ -99,7 +97,7 @@ H5FL_DEFINE(H5FD_free_t);
*-------------------------------------------------------------------------
*/
static haddr_t
-H5FD_extend(H5FD_t *file, H5FD_mem_t type, hbool_t new_block, hsize_t size, haddr_t *frag_addr, hsize_t *frag_size)
+H5FD_extend(H5FD_t *file, H5FD_mem_t type, hsize_t size)
{
hsize_t orig_size = size; /* Original allocation size */
haddr_t eoa; /* Address of end-of-allocated space */
@@ -117,40 +115,18 @@ H5FD_extend(H5FD_t *file, H5FD_mem_t type, hbool_t new_block, hsize_t size, hadd
/* Get current end-of-allocated space address */
eoa = file->cls->get_eoa(file, type);
- /* Compute extra space to allocate, if this is a new block and should be aligned */
- extra = 0;
- if(new_block && file->alignment > 1 && orig_size >= file->threshold) {
- hsize_t mis_align; /* Amount EOA is misaligned */
-
- /* Check for EOA already aligned */
- if((mis_align = (eoa % file->alignment)) > 0) {
- extra = file->alignment - mis_align;
- if(frag_addr)
- *frag_addr = eoa - file->base_addr; /* adjust for file's base address */
- if(frag_size)
- *frag_size = extra;
- } /* end if */
- } /* end if */
-
- /* Add in extra allocation amount */
- size += extra;
-
/* Check for overflow when extending */
if(H5F_addr_overflow(eoa, size) || (eoa + size) > file->maxaddr)
HGOTO_ERROR(H5E_VFL, H5E_NOSPACE, HADDR_UNDEF, "file allocation request failed")
- /* Set the [possibly aligned] address to return */
- ret_value = eoa + extra;
+ /* Set the [NOT aligned] address to return */
+ ret_value = eoa;
/* Extend the end-of-allocated space address */
eoa += size;
if(file->cls->set_eoa(file, type, eoa) < 0)
HGOTO_ERROR(H5E_VFL, H5E_NOSPACE, HADDR_UNDEF, "file allocation request failed")
- /* Post-condition sanity check */
- if(new_block && file->alignment && orig_size >= file->threshold)
- HDassert(!(ret_value % file->alignment));
-
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5FD_extend() */
@@ -160,6 +136,8 @@ done:
* Function: H5FD_alloc_real
*
* Purpose: Allocate space in the file with the VFD
+ * Note: the handling of alignment is moved up from each driver to
+ * this routine.
*
* Return: Success: The format address of the new file memory.
* Failure: The undefined address HADDR_UNDEF
@@ -170,8 +148,14 @@ done:
*-------------------------------------------------------------------------
*/
haddr_t
-H5FD_alloc_real(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, hsize_t size, haddr_t *frag_addr, hsize_t *frag_size)
+H5FD_alloc_real(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, hsize_t size,
+ haddr_t *frag_addr, hsize_t *frag_size)
{
+ hsize_t orig_size = size; /* Original allocation size */
+ haddr_t eoa; /* Address of end-of-allocated space */
+ hsize_t extra; /* Extra space to allocate, to align request */
+ unsigned long flags = 0; /* Driver feature flags */
+ hbool_t use_alloc_size; /* Just pass alloc size to the driver */
haddr_t ret_value = HADDR_UNDEF; /* Return value */
FUNC_ENTER_NOAPI(HADDR_UNDEF)
@@ -185,16 +169,53 @@ HDfprintf(stderr, "%s: type = %u, size = %Hu\n", FUNC, (unsigned)type, size);
HDassert(type >= H5FD_MEM_DEFAULT && type < H5FD_MEM_NTYPES);
HDassert(size > 0);
+ /* Check for query driver and call it */
+ if(file->cls->query)
+ (file->cls->query)(file, &flags);
+
+ /* Check for the driver feature flag */
+ use_alloc_size = flags & H5FD_FEAT_USE_ALLOC_SIZE;
+
+ /* Get current end-of-allocated space address */
+ eoa = file->cls->get_eoa(file, type);
+
+ /* Compute extra space to allocate, if this should be aligned */
+ extra = 0;
+ if(!file->paged_aggr && file->alignment > 1 && orig_size >= file->threshold) {
+ hsize_t mis_align; /* Amount EOA is misaligned */
+
+ /* Check for EOA already aligned */
+ if((mis_align = (eoa % file->alignment)) > 0) {
+ extra = file->alignment - mis_align;
+ if(frag_addr)
+ *frag_addr = eoa - file->base_addr; /* adjust for file's base address */
+ if(frag_size)
+ *frag_size = extra;
+ } /* end if */
+ } /* end if */
+
/* Dispatch to driver `alloc' callback or extend the end-of-address marker */
+ /* For the multi/split driver: the size passed down to the alloc callback is the original size from H5FD_alloc() */
+ /* For all other drivers: the size passed down to the alloc callback is the size + [possibly] alignment size */
if(file->cls->alloc) {
- if((ret_value = (file->cls->alloc)(file, type, dxpl_id, size)) == HADDR_UNDEF)
+ ret_value = (file->cls->alloc)(file, type, dxpl_id, use_alloc_size ? size : size + extra);
+ if(!H5F_addr_defined(ret_value))
HGOTO_ERROR(H5E_VFL, H5E_NOSPACE, HADDR_UNDEF, "driver allocation request failed")
} /* end if */
else {
- if((ret_value = H5FD_extend(file, type, TRUE, size, frag_addr, frag_size)) == HADDR_UNDEF)
+ ret_value = H5FD_extend(file, type, size + extra);
+ if(!H5F_addr_defined(ret_value))
HGOTO_ERROR(H5E_VFL, H5E_NOSPACE, HADDR_UNDEF, "driver eoa update request failed")
} /* end else */
+ /* Set the [possibly aligned] address to return */
+ if(!use_alloc_size)
+ ret_value += extra;
+
+ /* Post-condition sanity check */
+ if(!file->paged_aggr && file->alignment > 1 && orig_size >= file->threshold)
+ HDassert(!(ret_value % file->alignment));
+
/* Convert absolute file offset to relative address */
ret_value -= file->base_addr;
@@ -243,9 +264,9 @@ H5FD_alloc(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, H5F_t *f, hsize_t size,
if(!H5F_addr_defined(ret_value))
HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, HADDR_UNDEF, "real 'alloc' request failed")
- /* Mark superblock dirty in cache, so change to EOA will get encoded */
- if(H5F_super_dirty(f) < 0)
- HGOTO_ERROR(H5E_VFL, H5E_CANTMARKDIRTY, HADDR_UNDEF, "unable to mark superblock as dirty")
+ /* Mark EOA info dirty in cache, so change will get encoded */
+ if(H5F_eoa_dirty(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTMARKDIRTY, HADDR_UNDEF, "unable to mark EOA info as dirty")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -350,8 +371,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FD_free(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, H5F_t *f, haddr_t addr,
- hsize_t size)
+H5FD_free(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, H5F_t *f,
+ haddr_t addr, hsize_t size)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -367,9 +388,9 @@ H5FD_free(H5FD_t *file, hid_t dxpl_id, H5FD_mem_t type, H5F_t *f, haddr_t addr,
if(H5FD_free_real(file, dxpl_id, type, addr, size) < 0)
HGOTO_ERROR(H5E_VFL, H5E_CANTFREE, FAIL, "real 'free' request failed")
- /* Mark superblock dirty in cache, so change to EOA will get encoded */
- if(H5F_super_dirty(f) < 0)
- HGOTO_ERROR(H5E_VFL, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
+ /* Mark EOA info dirty in cache, so change will get encoded */
+ if(H5F_eoa_dirty(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTMARKDIRTY, FAIL, "unable to mark EOA info as dirty")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -395,8 +416,8 @@ done:
*-------------------------------------------------------------------------
*/
htri_t
-H5FD_try_extend(H5FD_t *file, H5FD_mem_t type, H5F_t *f, haddr_t blk_end,
- hsize_t extra_requested)
+H5FD_try_extend(H5FD_t *file, H5FD_mem_t type, H5F_t *f, hid_t dxpl_id,
+ haddr_t blk_end, hsize_t extra_requested)
{
haddr_t eoa; /* End of allocated space in file */
htri_t ret_value = FALSE; /* Return value */
@@ -420,12 +441,12 @@ H5FD_try_extend(H5FD_t *file, H5FD_mem_t type, H5F_t *f, haddr_t blk_end,
/* Check if the block is exactly at the end of the file */
if(H5F_addr_eq(blk_end, eoa)) {
/* Extend the object by extending the underlying file */
- if(HADDR_UNDEF == H5FD_extend(file, type, FALSE, extra_requested, NULL, NULL))
+ if(HADDR_UNDEF == H5FD_extend(file, type, extra_requested))
HGOTO_ERROR(H5E_VFL, H5E_CANTEXTEND, FAIL, "driver extend request failed")
- /* Mark superblock dirty in cache, so change to EOA will get encoded */
- if(H5F_super_dirty(f) < 0)
- HGOTO_ERROR(H5E_VFL, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
+ /* Mark EOA info dirty in cache, so change will get encoded */
+ if(H5F_eoa_dirty(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_VFL, H5E_CANTMARKDIRTY, FAIL, "unable to mark EOA info as dirty")
/* Indicate success */
HGOTO_DONE(TRUE)
diff --git a/src/H5FDstdio.c b/src/H5FDstdio.c
index 4c62bcc..5023af3 100644
--- a/src/H5FDstdio.c
+++ b/src/H5FDstdio.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
@@ -601,13 +599,6 @@ H5FD_stdio_alloc(H5FD_t *_file, H5FD_mem_t /*UNUSED*/ type, hid_t /*UNUSED*/ dxp
/* Compute the address for the block to allocate */
addr = file->eoa;
- /* Check if we need to align this block */
- if(size >= file->pub.threshold) {
- /* Check for an already aligned block */
- if((addr % file->pub.alignment) != 0)
- addr = ((addr / file->pub.alignment) + 1) * file->pub.alignment;
- } /* end if */
-
file->eoa = addr + size;
return addr;
diff --git a/src/H5FDstdio.h b/src/H5FDstdio.h
index 8281705..f99aacf 100644
--- a/src/H5FDstdio.h
+++ b/src/H5FDstdio.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FDtest.c b/src/H5FDtest.c
index fc9188b..f528dfb 100644
--- a/src/H5FDtest.c
+++ b/src/H5FDtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FDwindows.c b/src/H5FDwindows.c
index 8dd7555..76c4f18 100644
--- a/src/H5FDwindows.c
+++ b/src/H5FDwindows.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h" /* Generic Functions */
diff --git a/src/H5FDwindows.h b/src/H5FDwindows.h
index 7a9d2cf..5cf68a1 100644
--- a/src/H5FDwindows.h
+++ b/src/H5FDwindows.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FL.c b/src/H5FL.c
index db51809..0e67414 100644
--- a/src/H5FL.c
+++ b/src/H5FL.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FLmodule.h b/src/H5FLmodule.h
index cfa585c..48b8d2b 100644
--- a/src/H5FLmodule.h
+++ b/src/H5FLmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FLprivate.h b/src/H5FLprivate.h
index f44d359..4aa44f9 100644
--- a/src/H5FLprivate.h
+++ b/src/H5FLprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FO.c b/src/H5FO.c
index a77e268..627ee64 100644
--- a/src/H5FO.c
+++ b/src/H5FO.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FOprivate.h b/src/H5FOprivate.h
index 4648f02..aa85c29 100644
--- a/src/H5FOprivate.h
+++ b/src/H5FOprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FS.c b/src/H5FS.c
index 2193d84..b789dfd 100644
--- a/src/H5FS.c
+++ b/src/H5FS.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -135,7 +133,7 @@ HDfprintf(stderr, "%s: Creating free space manager, nclasses = %Zu\n", FUNC, ncl
fspace->swmr_write = (H5F_INTENT(f) & H5F_ACC_SWMR_WRITE) > 0;
fspace->alignment = alignment;
- fspace->threshold = threshold;
+ fspace->align_thres = threshold;
/* Check if the free space tracker is supposed to be persistant */
if(fs_addr) {
@@ -227,7 +225,7 @@ HDfprintf(stderr, "%s: fspace->rc = %u\n", FUNC, fspace->rc);
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINC, NULL, "unable to increment ref. count on free space header")
fspace->alignment = alignment;
- fspace->threshold = threshold;
+ fspace->align_thres = threshold;
/* Unlock free space header */
if(H5AC_unprotect(f, dxpl_id, H5AC_FSPACE_HDR, fs_addr, fspace, H5AC__NO_FLAGS_SET) < 0)
@@ -365,7 +363,7 @@ HDfprintf(stderr, "%s: Expunging free space section info from cache\n", FUNC);
if(H5AC_expunge_entry(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, cache_flags) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTREMOVE, FAIL, "unable to remove free space section info from cache")
- }
+ } /* end block */
#ifdef H5FS_DEBUG
HDfprintf(stderr, "%s: Done expunging free space section info from cache\n", FUNC);
@@ -873,10 +871,7 @@ H5FS_alloc_sect(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
HDassert(fspace);
if(!H5F_addr_defined(fspace->sect_addr) && fspace->sinfo && fspace->serial_sect_count > 0) {
- /* Allocate space for section info from aggregator/vfd (or temp. address space) */
- /* (The original version called H5MF_alloc(), but that may cause sect_size to change again) */
- /* (This routine is only called during file close operations, so don't allocate from temp. address space) */
- if(HADDR_UNDEF == (fspace->sect_addr = H5MF_aggr_vfd_alloc(f, H5FD_MEM_FSPACE_SINFO, dxpl_id, fspace->sect_size)))
+ if(HADDR_UNDEF == (fspace->sect_addr = H5MF_alloc(f, H5FD_MEM_FSPACE_SINFO, dxpl_id, fspace->sect_size)))
HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "file allocation failed for section info")
fspace->alloc_sect_size = fspace->sect_size;
@@ -888,6 +883,10 @@ H5FS_alloc_sect(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
if(H5AC_insert_entry(f, dxpl_id, H5AC_FSPACE_SINFO, fspace->sect_addr, fspace->sinfo, H5AC__NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space sections to cache")
+ /* Since space has been allocated for the section info and the sinfo
+ * has been inserted into the cache, relinquish owership (i.e. float)
+ * the section info.
+ */
fspace->sinfo = NULL;
} /* end if */
@@ -909,7 +908,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
+H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id, hbool_t free_file_space)
{
haddr_t saved_addr; /* Previous address of item */
unsigned cache_flags; /* Flags for unprotecting cache entries */
@@ -923,6 +922,7 @@ H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
cache_flags = H5AC__DELETED_FLAG | H5AC__TAKE_OWNERSHIP_FLAG;;
+ /* Free space for section info */
if(H5F_addr_defined(fspace->sect_addr)) {
hsize_t saved_size; /* Size of previous section info */
unsigned sinfo_status = 0; /* Section info cache status */
@@ -955,7 +955,8 @@ H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
/* Free space for the free-space manager section info */
if(!H5F_IS_TMP_ADDR(f, saved_addr)) {
- if(H5MF_xfree(f, H5FD_MEM_FSPACE_SINFO, dxpl_id, saved_addr, saved_size) < 0)
+ if(free_file_space &&
+ H5MF_xfree(f, H5FD_MEM_FSPACE_SINFO, dxpl_id, saved_addr, saved_size) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to release free space sections")
} /* end if */
@@ -964,6 +965,7 @@ H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
} /* end if */
+ /* Free space for header */
if(H5F_addr_defined(fspace->addr)) {
unsigned hdr_status = 0; /* Header entry status */
@@ -996,7 +998,8 @@ H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id)
fspace->addr = HADDR_UNDEF;
/* Free space for the free-space manager header */
- if(H5MF_xfree(f, H5FD_MEM_FSPACE_HDR, dxpl_id, saved_addr, (hsize_t)H5FS_HEADER_SIZE(f)) < 0)
+ if(free_file_space &&
+ H5MF_xfree(f, H5FD_MEM_FSPACE_HDR, dxpl_id, saved_addr, (hsize_t)H5FS_HEADER_SIZE(f)) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "unable to free free space header")
} /* end if */
@@ -1170,6 +1173,23 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5FS_sinfo_dest() */
+herr_t
+H5FS_get_sect_count(const H5FS_t *frsp, hsize_t *tot_sect_count)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Check arguments. */
+ HDassert(frsp);
+ HDassert(tot_sect_count);
+
+ /* Report statistics for free space */
+ *tot_sect_count = frsp->serial_sect_count;
+
+ FUNC_LEAVE_NOAPI(ret_value)
+}
+
#ifdef H5FS_DEBUG_ASSERT
/*-------------------------------------------------------------------------
diff --git a/src/H5FScache.c b/src/H5FScache.c
index 42eccff..628fba0 100644
--- a/src/H5FScache.c
+++ b/src/H5FScache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -37,6 +35,7 @@
#include "H5private.h" /* Generic Functions */
#include "H5ACprivate.h" /* Metadata cache */
#include "H5Eprivate.h" /* Error handling */
+#include "H5Fprivate.h" /* File */
#include "H5FSpkg.h" /* File free space */
#include "H5MFprivate.h" /* File memory management */
#include "H5VMprivate.h" /* Vectors and arrays */
@@ -83,11 +82,12 @@ static htri_t H5FS__cache_hdr_verify_chksum(const void *image_ptr, size_t len, v
static void *H5FS__cache_hdr_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5FS__cache_hdr_image_len(const void *thing, size_t *image_len);
-static herr_t H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+static herr_t H5FS__cache_hdr_pre_serialize(H5F_t *f, hid_t dxpl_id,
void *thing, haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len,
unsigned *flags);
static herr_t H5FS__cache_hdr_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
+static herr_t H5FS__cache_hdr_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5FS__cache_hdr_free_icr(void *thing);
static herr_t H5FS__cache_sinfo_get_initial_load_size(void *udata, size_t *image_len);
@@ -95,7 +95,7 @@ static htri_t H5FS__cache_sinfo_verify_chksum(const void *image_ptr, size_t len,
static void *H5FS__cache_sinfo_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5FS__cache_sinfo_image_len(const void *thing, size_t *image_len);
-static herr_t H5FS__cache_sinfo_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+static herr_t H5FS__cache_sinfo_pre_serialize(H5F_t *f, hid_t dxpl_id,
void *thing, haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len,
unsigned *flags);
static herr_t H5FS__cache_sinfo_serialize(const H5F_t *f, void *image,
@@ -121,7 +121,7 @@ const H5AC_class_t H5AC_FSPACE_HDR[1] = {{
H5FS__cache_hdr_image_len, /* 'image_len' callback */
H5FS__cache_hdr_pre_serialize, /* 'pre_serialize' callback */
H5FS__cache_hdr_serialize, /* 'serialize' callback */
- NULL, /* 'notify' callback */
+ H5FS__cache_hdr_notify, /* 'notify' callback */
H5FS__cache_hdr_free_icr, /* 'free_icr' callback */
NULL, /* 'fsf_size' callback */
}};
@@ -295,7 +295,7 @@ H5FS__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
/* # of section classes */
/* (only check if we actually have some classes) */
UINT16DECODE(image, nclasses);
- if(fspace->nclasses > 0 && fspace->nclasses != nclasses)
+ if(fspace->nclasses > 0 && nclasses > fspace->nclasses)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTLOAD, NULL, "section class count mismatch")
/* Shrink percent */
@@ -404,7 +404,7 @@ H5FS__cache_hdr_image_len(const void *_thing, size_t *image_len)
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+H5FS__cache_hdr_pre_serialize(H5F_t *f, hid_t dxpl_id, void *_thing,
haddr_t addr, size_t H5_ATTR_UNUSED len, haddr_t *new_addr, size_t *new_len,
unsigned *flags)
{
@@ -519,7 +519,6 @@ H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
HDassert(fspace->sect_size > 0);
if(!H5F_addr_defined(fspace->sect_addr)) { /* case 1 */
-
haddr_t tag = HADDR_UNDEF;
/* allocate file space for the section info, and insert it
@@ -596,9 +595,11 @@ H5FS__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
* real file space lest the header be written to file with
* a nonsense section info address.
*/
- HDassert(fspace->serial_sect_count > 0);
- HDassert(fspace->sect_size > 0);
- HDassert(fspace->alloc_sect_size == (size_t)fspace->sect_size);
+ if(!H5F_POINT_OF_NO_RETURN(f)) {
+ HDassert(fspace->serial_sect_count > 0);
+ HDassert(fspace->sect_size > 0);
+ HDassert(fspace->alloc_sect_size == (size_t)fspace->sect_size);
+ } /* end if */
if(H5F_IS_TMP_ADDR(f, fspace->sect_addr)) {
unsigned sect_status = 0;
@@ -702,10 +703,12 @@ H5FS__cache_hdr_serialize(const H5F_t *f, void *_image, size_t len,
* The following asserts are a cursory check on this.
*/
HDassert((! H5F_addr_defined(fspace->sect_addr)) || (! H5F_IS_TMP_ADDR(f, fspace->sect_addr)));
- HDassert((! H5F_addr_defined(fspace->sect_addr)) ||
- ((fspace->serial_sect_count > 0) &&
- (fspace->sect_size > 0) &&
- (fspace->alloc_sect_size == (size_t)fspace->sect_size)));
+
+ if(!H5F_POINT_OF_NO_RETURN(f))
+ HDassert((! H5F_addr_defined(fspace->sect_addr)) ||
+ ((fspace->serial_sect_count > 0) &&
+ (fspace->sect_size > 0) &&
+ (fspace->alloc_sect_size == (size_t)fspace->sect_size)));
/* Magic number */
HDmemcpy(image, H5FS_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC);
@@ -769,6 +772,65 @@ H5FS__cache_hdr_serialize(const H5F_t *f, void *_image, size_t len,
/*-------------------------------------------------------------------------
+ * Function: H5FS__cache_hdr_notify
+ *
+ * Purpose: Handle cache action notifications
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Quincey Koziol
+ * koziol@lbl.gov
+ * January 3, 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5FS__cache_hdr_notify(H5AC_notify_action_t action, void *_thing)
+{
+ H5FS_t *fspace = (H5FS_t *)_thing; /* Pointer to the object */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Sanity check */
+ HDassert(fspace);
+
+ /* Determine which action to take */
+ switch(action) {
+ case H5AC_NOTIFY_ACTION_AFTER_INSERT:
+ case H5AC_NOTIFY_ACTION_AFTER_LOAD:
+ case H5AC_NOTIFY_ACTION_AFTER_FLUSH:
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_ENTRY_DIRTIED:
+ if(H5AC_unsettle_entry_ring(fspace) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTFLUSH, FAIL, "unable to mark FSM ring as unsettled")
+ break;
+
+ case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
+ case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
+ case H5AC_NOTIFY_ACTION_BEFORE_EVICT:
+ /* do nothing */
+ break;
+
+ default:
+#ifdef NDEBUG
+ HGOTO_ERROR(H5E_FSPACE, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
+#else /* NDEBUG */
+ HDassert(0 && "Unknown action?!?");
+#endif /* NDEBUG */
+ } /* end switch */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5FS__cache_hdr_notify() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5FS__cache_hdr_free_icr
*
* Purpose: Destroys a free space header in memory.
@@ -1002,7 +1064,7 @@ H5FS__cache_sinfo_deserialize(const void *_image, size_t len, void *_udata,
/* Insert section in free space manager, unless requested not to */
if(!(des_flags & H5FS_DESERIALIZE_NO_ADD))
- if(H5FS_sect_add(udata->f, udata->dxpl_id, udata->fspace, new_sect, H5FS_ADD_DESERIALIZING, NULL) < 0)
+ if(H5FS_sect_add(udata->f, udata->dxpl_id, udata->fspace, new_sect, H5FS_ADD_DESERIALIZING, udata) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, NULL, "can't add section to free space manager")
} /* end for */
} while(image < (((const uint8_t *)_image + old_sect_size) - H5FS_SIZEOF_CHKSUM));
@@ -1092,7 +1154,7 @@ H5FS__cache_sinfo_image_len(const void *_thing, size_t *image_len)
*-------------------------------------------------------------------------
*/
static herr_t
-H5FS__cache_sinfo_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+H5FS__cache_sinfo_pre_serialize(H5F_t *f, hid_t dxpl_id, void *_thing,
haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags)
{
H5FS_sinfo_t *sinfo = (H5FS_sinfo_t *)_thing; /* Pointer to the object */
@@ -1118,8 +1180,9 @@ H5FS__cache_sinfo_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
HDassert(new_len);
HDassert(flags);
- /* we shouldn't be called if the section info is empty */
- HDassert(fspace->serial_sect_count > 0);
+ /* we shouldn't be called if the section info is empty, unless we hit the point of no return. */
+ if(!H5F_POINT_OF_NO_RETURN(f))
+ HDassert(fspace->serial_sect_count > 0);
sinfo_addr = addr; /* this will change if we relocate the section data */
@@ -1283,6 +1346,8 @@ H5FS__cache_sinfo_notify(H5AC_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
diff --git a/src/H5FSdbg.c b/src/H5FSdbg.c
index fbdeb70..450216b 100644
--- a/src/H5FSdbg.c
+++ b/src/H5FSdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FSint.c b/src/H5FSint.c
index 60cedd5..1a41172 100644
--- a/src/H5FSint.c
+++ b/src/H5FSint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FSmodule.h b/src/H5FSmodule.h
index b435b79..b2869dd 100644
--- a/src/H5FSmodule.h
+++ b/src/H5FSmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FSpkg.h b/src/H5FSpkg.h
index b3b1548..df1d92f 100644
--- a/src/H5FSpkg.h
+++ b/src/H5FSpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -187,8 +185,8 @@ struct H5FS_t {
/* must be either H5C__NO_FLAGS_SET (i.e r/w) */
/* or H5AC__READ_ONLY_FLAG (i.e. r/o). */
size_t max_cls_serial_size; /* Max. additional size of serialized form of section */
- hsize_t threshold; /* Threshold for alignment */
hsize_t alignment; /* Alignment */
+ hsize_t align_thres; /* Threshold for alignment */
/* Memory data structures (not stored directly) */
@@ -200,12 +198,6 @@ struct H5FS_t {
/* Package Private Variables */
/*****************************/
-/* H5FS header inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_FSPACE_HDR[1];
-
-/* H5FS section info inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_FSPACE_SINFO[1];
-
/* Declare a free list to manage the H5FS_node_t struct */
H5FL_EXTERN(H5FS_node_t);
diff --git a/src/H5FSprivate.h b/src/H5FSprivate.h
index 20fdff1..c0467a6 100644
--- a/src/H5FSprivate.h
+++ b/src/H5FSprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -67,6 +65,12 @@
* managed sections is in flux)
*/
+#define H5FS_PAGE_END_NO_ADD 0x08 /* For "small" page fs:
+ * Don't add section to free space:
+ * when the section is at page end and
+ * when the section size is <= "small"
+ */
+
/* Flags for deserialize callback */
#define H5FS_DESERIALIZE_NO_ADD 0x01 /* Don't add section to free space
* manager after it's deserialized
@@ -98,11 +102,11 @@ typedef struct H5FS_section_class_t {
herr_t (*term_cls)(struct H5FS_section_class_t *); /* Routine to terminate class-specific settings */
/* Object methods */
- herr_t (*add)(H5FS_section_info_t *, unsigned *, void *); /* Routine called when section is about to be added to manager */
+ herr_t (*add)(H5FS_section_info_t **, unsigned *, void *); /* Routine called when section is about to be added to manager */
herr_t (*serialize)(const struct H5FS_section_class_t *, const H5FS_section_info_t *, uint8_t *); /* Routine to serialize a "live" section into a buffer */
H5FS_section_info_t *(*deserialize)(const struct H5FS_section_class_t *, hid_t dxpl_id, const uint8_t *, haddr_t, hsize_t, unsigned *); /* Routine to deserialize a buffer into a "live" section */
htri_t (*can_merge)(const H5FS_section_info_t *, const H5FS_section_info_t *, void *); /* Routine to determine if two nodes are mergable */
- herr_t (*merge)(H5FS_section_info_t *, H5FS_section_info_t *, void *); /* Routine to merge two nodes */
+ herr_t (*merge)(H5FS_section_info_t **, H5FS_section_info_t *, void *); /* Routine to merge two nodes */
htri_t (*can_shrink)(const H5FS_section_info_t *, void *); /* Routine to determine if node can shrink container */
herr_t (*shrink)(H5FS_section_info_t **, void *); /* Routine to shrink container */
herr_t (*free)(H5FS_section_info_t *); /* Routine to free node */
@@ -182,7 +186,8 @@ H5_DLL herr_t H5FS_delete(H5F_t *f, hid_t dxpl_id, haddr_t fs_addr);
H5_DLL herr_t H5FS_close(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace);
H5_DLL herr_t H5FS_alloc_hdr(H5F_t *f, H5FS_t *fspace, haddr_t *fs_addr, hid_t dxpl_id);
H5_DLL herr_t H5FS_alloc_sect(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id);
-H5_DLL herr_t H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id);
+H5_DLL herr_t H5FS_free(H5F_t *f, H5FS_t *fspace, hid_t dxpl_id,
+ hbool_t free_file_space);
/* Free space section routines */
H5_DLL herr_t H5FS_sect_add(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
@@ -190,7 +195,7 @@ H5_DLL herr_t H5FS_sect_add(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
H5_DLL htri_t H5FS_sect_try_merge(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
H5FS_section_info_t *sect, unsigned flags, void *op_data);
H5_DLL htri_t H5FS_sect_try_extend(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
- haddr_t addr, hsize_t size, hsize_t extra_requested);
+ haddr_t addr, hsize_t size, hsize_t extra_requested, unsigned flags, void *op_data);
H5_DLL herr_t H5FS_sect_remove(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
H5FS_section_info_t *node);
H5_DLL htri_t H5FS_sect_find(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
@@ -200,11 +205,18 @@ H5_DLL herr_t H5FS_sect_stats(const H5FS_t *fspace, hsize_t *tot_space,
hsize_t *nsects);
H5_DLL herr_t H5FS_sect_change_class(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
H5FS_section_info_t *sect, uint16_t new_class);
-H5_DLL htri_t H5FS_sect_try_shrink_eoa(const H5F_t *f, hid_t dxpl_id, const H5FS_t *fspace, void *op_data);
-H5_DLL herr_t H5FS_sect_query_last_sect(const H5FS_t *fspace, haddr_t *sect_addr, hsize_t *sect_size);
+H5_DLL htri_t H5FS_sect_try_shrink_eoa(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace,
+ void *op_data);
/* Statistics routine */
H5_DLL herr_t H5FS_stat_info(const H5F_t *f, const H5FS_t *frsp, H5FS_stat_t *stats);
+H5_DLL herr_t H5FS_get_sect_count(const H5FS_t *frsp, hsize_t *tot_sect_count);
+
+/* free space manager settling routines */
+H5_DLL herr_t H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f,
+ hid_t dxpl_id,
+ H5FS_t *fspace,
+ haddr_t *fs_addr_ptr);
/* Debugging routines for dumping file structures */
H5_DLL herr_t H5FS_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
diff --git a/src/H5FSpublic.h b/src/H5FSpublic.h
index 87debe8..3090d0d 100644
--- a/src/H5FSpublic.h
+++ b/src/H5FSpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5FSsection.c b/src/H5FSsection.c
index 766a823..8f911a4 100644
--- a/src/H5FSsection.c
+++ b/src/H5FSsection.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -25,6 +23,8 @@
/* Module Setup */
/****************/
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+
#include "H5FSmodule.h" /* This source code file is part of the H5FS module */
@@ -33,6 +33,7 @@
/***********/
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* File access */
#include "H5FSpkg.h" /* File free space */
#include "H5MFprivate.h" /* File memory management */
#include "H5VMprivate.h" /* Vectors and arrays */
@@ -1210,11 +1211,13 @@ H5FS_sect_merge(H5FS_t *fspace, H5FS_section_info_t **sect, void *op_data)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't remove section from internal data structures")
/* Merge the two sections together */
- if((*tmp_sect_cls->merge)(tmp_sect, *sect, op_data) < 0)
+ if((*tmp_sect_cls->merge)(&tmp_sect, *sect, op_data) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't merge two sections")
/* Retarget section pointer to 'less than' node that was merged into */
*sect = tmp_sect;
+ if(*sect == NULL)
+ HGOTO_DONE(ret_value);
/* Indicate successful merge occurred */
modified = TRUE;
@@ -1254,9 +1257,13 @@ H5FS_sect_merge(H5FS_t *fspace, H5FS_section_info_t **sect, void *op_data)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't remove section from internal data structures")
/* Merge the two sections together */
- if((*sect_cls->merge)(*sect, tmp_sect, op_data) < 0)
+ if((*sect_cls->merge)(sect, tmp_sect, op_data) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't merge two sections")
+ /* It's possible that the merge caused the section to be deleted (particularly in the paged allocation case) */
+ if(*sect == NULL)
+ HGOTO_DONE(ret_value);
+
/* Indicate successful merge occurred */
modified = TRUE;
} /* end if */
@@ -1383,7 +1390,7 @@ HDfprintf(stderr, "%s: *sect = {%a, %Hu, %u, %s}\n", FUNC, sect->addr, sect->siz
/* Call "add" section class callback, if there is one */
cls = &fspace->sect_cls[sect->type];
if(cls->add) {
- if((*cls->add)(sect, &flags, op_data) < 0)
+ if((*cls->add)(&sect, &flags, op_data) < 0)
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "'add' section class callback failed")
} /* end if */
@@ -1411,7 +1418,7 @@ HDfprintf(stderr, "%s: fspace->tot_space = %Hu\n", FUNC, fspace->tot_space);
#endif /* H5FS_SINFO_DEBUG */
/* Mark free space sections as changed */
/* (if adding sections while deserializing sections, don't set the flag) */
- if(!(flags & H5FS_ADD_DESERIALIZING))
+ if(!(flags & (H5FS_ADD_DESERIALIZING | H5FS_PAGE_END_NO_ADD)))
sinfo_modified = TRUE;
done:
@@ -1444,7 +1451,7 @@ HDfprintf(stderr, "%s: Leaving, ret_value = %d\n", FUNC, ret_value);
*/
htri_t
H5FS_sect_try_extend(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, haddr_t addr,
- hsize_t size, hsize_t extra_requested)
+ hsize_t size, hsize_t extra_requested, unsigned flags, void *op_data)
{
hbool_t sinfo_valid = FALSE; /* Whether the section info is valid */
hbool_t sinfo_modified = FALSE; /* Whether the section info was modified */
@@ -1528,10 +1535,16 @@ if(_section_)
/* Adjust section by amount requested */
sect->addr += extra_requested;
sect->size -= extra_requested;
-
- /* Re-add adjusted section to free sections data structures */
- if(H5FS_sect_link(fspace, sect, 0) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't insert free space section into skip list")
+ if(cls->add)
+ if((*cls->add)(&sect, &flags, op_data) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "'add' section class callback failed")
+
+ /* Re-adding the section could cause it to disappear (particularly when paging) */
+ if(sect) {
+ /* Re-add adjusted section to free sections data structures */
+ if(H5FS_sect_link(fspace, sect, 0) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't insert free space section into skip list")
+ } /* end if */
} /* end if */
else {
/* Sanity check */
@@ -1667,7 +1680,7 @@ HDfprintf(stderr, "%s: fspace->sinfo->nbins = %u\n", FUNC, fspace->sinfo->nbins)
HDfprintf(stderr, "%s: bin = %u\n", FUNC, bin);
#endif /* QAK */
alignment = fspace->alignment;
- if(!((alignment > 1) && (request >= fspace->threshold)))
+ if(!((alignment > 1) && (request >= fspace->align_thres)))
alignment = 0; /* no alignment */
do {
@@ -1725,10 +1738,10 @@ HDfprintf(stderr, "%s: bin = %u\n", FUNC, bin);
HDassert(alignment);
HDassert(cls);
- if ((mis_align = curr_sect->addr % alignment))
+ if((mis_align = curr_sect->addr % alignment))
frag_size = alignment - mis_align;
- if ((curr_sect->size >= (request + frag_size)) && (cls->split)) {
+ if((curr_sect->size >= (request + frag_size)) && (cls->split)) {
/* remove the section with aligned address */
if(NULL == (*node = (H5FS_section_info_t *)H5SL_remove(curr_fspace_node->sect_list, &curr_sect->addr)))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTREMOVE, FAIL, "can't remove free space node from skip list")
@@ -1745,17 +1758,17 @@ HDfprintf(stderr, "%s: bin = %u\n", FUNC, bin);
* NODE's addr & size are updated to point to the remaining aligned section
* split_sect is re-added to free-space
*/
- if (mis_align) {
+ if(mis_align) {
split_sect = cls->split(*node, frag_size);
if((H5FS_sect_link(fspace, split_sect, 0) < 0))
HGOTO_ERROR(H5E_FSPACE, H5E_CANTINSERT, FAIL, "can't insert free space section into skip list")
/* sanity check */
HDassert(split_sect->addr < (*node)->addr);
HDassert(request <= (*node)->size);
- }
+ } /* end if */
/* Indicate that we found a node for the request */
HGOTO_DONE(TRUE)
- }
+ } /* end if */
/* Get the next section node in the list */
curr_sect_node = H5SL_next(curr_sect_node);
@@ -1831,7 +1844,7 @@ HDfprintf(stderr, "%s: fspace->ghost_sect_count = %Hu\n", FUNC, fspace->ghost_se
#ifdef QAK
HDfprintf(stderr, "%s: (*node)->size = %Hu, (*node)->addr = %a, (*node)->type = %u\n", FUNC, (*node)->size, (*node)->addr, (*node)->type);
#endif /* QAK */
- }
+ } /* end if */
} /* end if */
done:
@@ -2349,7 +2362,7 @@ HDfprintf(stderr, "%s: sect->size = %Hu, sect->addr = %a, sect->type = %u\n", "H
*-------------------------------------------------------------------------
*/
htri_t
-H5FS_sect_try_shrink_eoa(const H5F_t *f, hid_t dxpl_id, const H5FS_t *fspace, void *op_data)
+H5FS_sect_try_shrink_eoa(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, void *op_data)
{
hbool_t sinfo_valid = FALSE; /* Whether the section info is valid */
hbool_t section_removed = FALSE; /* Whether a section was removed */
@@ -2406,42 +2419,264 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5FS_sect_query_last_sect
- *
- * Purpose: Retrieve info about the last section on the merge list
- *
- * Return: Success: non-negative
- * Failure: negative
+ * Function: H5FS_vfd_alloc_hdr_and_section_info_if_needed
+ *
+ * Purpose: This function is part of a hack to patch over a design
+ * flaw in the free space managers for file space allocation.
+ * Specifically, if a free space manager allocates space for
+ * its own section info, it is possible for it to
+ * go into an infinite loop as it:
+ *
+ * 1) computes the size of the section info
+ *
+ * 2) allocates file space for the section info
+ *
+ * 3) notices that the size of the section info
+ * has changed
+ *
+ * 4) deallocates the section info file space and
+ * returns to 1) above.
+ *
+ * Similarly, if it allocates space for its own header, it
+ * can go into an infinte loop as it:
+ *
+ * 1) allocates space for the header
+ *
+ * 2) notices that the free space manager is empty
+ * and thus should not have file space allocated
+ * to its header
+ *
+ * 3) frees the space allocated to the header
+ *
+ * 4) notices that the free space manager is not
+ * empty and thus must have file space allocated
+ * to it, and thus returns to 1) above.
+ *
+ * In a nutshell, the solution applied in this hack is to
+ * deallocate file space for the free space manager(s) that
+ * handle FSM header and/or section info file space allocations,
+ * wait until all other allocation/deallocation requests have
+ * been handled, and then test to see if the free space manager(s)
+ * in question are empty. If they are, do nothing. If they
+ * are not, allocate space for them at end of file bypassing the
+ * usual file space allocation calls, and thus avoiding the
+ * potential infinite loops.
+ *
+ * The purpose of this function is to allocate file space for
+ * the header and section info of the target free space manager
+ * directly from the VFD if needed. In this case the function
+ * also re-inserts the header and section info in the metadata
+ * cache with this allocation.
+ *
+ * When paged allocation is not enabled, allocation of space
+ * for the free space manager header and section info is
+ * straight forward -- we simply allocate the space directly
+ * from file driver.
+ *
+ * Note that if f->shared->alignment > 1, and EOA is not a
+ * multiple of the alignment, it is possible that performing
+ * these allocations may generate a fragment of file space in
+ * addition to the space allocated for the section info. This
+ * excess space is dropped on the floor. As shall be seen,
+ * it will usually be reclaimed later.
+ *
+ * When page allocation is enabled, things are more difficult,
+ * as there is the possibility that page buffering will be
+ * enabled when the free space managers are read. To allow
+ * for this, we must ensure that space allocated for the
+ * free space manager header and section info is either larger
+ * than a page, or resides completely withing a page.
+ *
+ * Do this by allocating space for the free space header and
+ * section info starting at page boundaries, and extending
+ * allocation to the next page boundary. This of course wastes
+ * space, but see below.
+ *
+ * On the first free space allocation / deallocation after the
+ * next file open, we will read the self referential free space
+ * managers, float them and reduce the EOA to its value prior
+ * to allocation of file space for the self referential free
+ * space managers on the preceeding file close. This EOA value
+ * is stored in the free space manager superblock extension
+ * message.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
*
- * Programmer: Vailin Choi
+ * Programmer: John Mainzer
+ * 6/6/16
*
*-------------------------------------------------------------------------
*/
herr_t
-H5FS_sect_query_last_sect(const H5FS_t *fspace, haddr_t *sect_addr, hsize_t *sect_size)
+H5FS_vfd_alloc_hdr_and_section_info_if_needed(H5F_t *f, hid_t dxpl_id,
+ H5FS_t *fspace, haddr_t *fs_addr_ptr)
{
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ hsize_t hdr_alloc_size;
+ hsize_t sinfo_alloc_size;
+ haddr_t sect_addr = HADDR_UNDEF; /* address of sinfo */
+ haddr_t eoa_frag_addr = HADDR_UNDEF; /* Address of fragment at EOA */
+ hsize_t eoa_frag_size = 0; /* Size of fragment at EOA */
+ haddr_t eoa = HADDR_UNDEF; /* Initial EOA for the file */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
/* Check arguments. */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->lf);
HDassert(fspace);
+ HDassert(fs_addr_ptr);
- if(fspace->sinfo && fspace->sinfo->merge_list) {
- H5SL_node_t *last_node; /* Last node in merge list */
+ /* the section info should be unlocked */
+ HDassert(fspace->sinfo_lock_count == 0);
- /* Check for last node in the merge list */
- if(NULL != (last_node = H5SL_last(fspace->sinfo->merge_list))) {
- H5FS_section_info_t *tmp_sect; /* Temporary free space section */
+ /* no space should be allocated */
+ HDassert(*fs_addr_ptr == HADDR_UNDEF);
+ HDassert(fspace->addr == HADDR_UNDEF);
+ HDassert(fspace->sect_addr == HADDR_UNDEF);
+ HDassert(fspace->alloc_sect_size == 0);
- /* Get the pointer to the last section, from the last node */
- tmp_sect = (H5FS_section_info_t *)H5SL_item(last_node);
- HDassert(tmp_sect);
- if(sect_addr)
- *sect_addr = tmp_sect->addr;
- if(sect_size)
- *sect_size = tmp_sect->size;
- } /* end if */
+ /* persistant free space managers must be enabled */
+ HDassert(f->shared->fs_persist);
+
+ /* At present, all free space strategies enable the free space managers.
+ * This will probably change -- at which point this assertion should
+ * be revisited.
+ */
+ /* Updated: Only the following two strategies enable the free-space managers */
+ HDassert((f->shared->fs_strategy == H5F_FSPACE_STRATEGY_FSM_AGGR) ||
+ (f->shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE));
+
+ if(fspace->serial_sect_count > 0) {
+ /* the section info is floating, so space->sinfo should be defined */
+ HDassert(fspace->sinfo);
+
+ /* start by allocating file space for the header */
+
+ /* Get the EOA for the file -- need for sanity check below */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, H5FD_MEM_FSPACE_HDR)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "Unable to get eoa")
+
+ /* check for overlap into temporary allocation space */
+ if(H5F_IS_TMP_ADDR(f, (eoa + fspace->sect_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, FAIL, "hdr file space alloc will overlap into 'temporary' file space")
+
+ hdr_alloc_size = H5FS_HEADER_SIZE(f);
+
+ /* if page allocation is enabled, extend the hdr_alloc_size to the
+ * next page boundary.
+ */
+ if(H5F_PAGED_AGGR(f)) {
+ HDassert(0 == (eoa % f->shared->fs_page_size));
+
+ hdr_alloc_size = ((hdr_alloc_size / f->shared->fs_page_size) + 1) * f->shared->fs_page_size;
+
+ HDassert(hdr_alloc_size >= H5FS_HEADER_SIZE(f));
+ HDassert((hdr_alloc_size % f->shared->fs_page_size) == 0);
+ } /* end if */
+
+ /* allocate space for the hdr */
+ if(HADDR_UNDEF == (fspace->addr = H5FD_alloc(f->shared->lf, dxpl_id,
+ H5FD_MEM_FSPACE_HDR, f,
+ hdr_alloc_size,
+ &eoa_frag_addr,
+ &eoa_frag_size)))
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate file space for hdr")
+
+ /* if the file alignement is 1, there should be no
+ * eoa fragment. Otherwise, drop any fragment on the floor.
+ */
+ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
+
+ /* Cache the new free space header (pinned) */
+ if(H5AC_insert_entry(f, dxpl_id, H5AC_FSPACE_HDR, fspace->addr, fspace, H5AC__PIN_ENTRY_FLAG) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space header to cache")
+
+ *fs_addr_ptr = fspace->addr;
+
+ /* now allocate file space for the section info */
+
+ /* Get the EOA for the file -- need for sanity check below */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, H5FD_MEM_FSPACE_SINFO)))
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTGET, FAIL, "Unable to get eoa")
+
+ /* check for overlap into temporary allocation space */
+ if(H5F_IS_TMP_ADDR(f, (eoa + fspace->sect_size)))
+ HGOTO_ERROR(H5E_FSPACE, H5E_BADRANGE, FAIL, "sinfo file space alloc will overlap into 'temporary' file space")
+
+ sinfo_alloc_size = fspace->sect_size;
+
+ /* if paged allocation is enabled, extend the sinfo_alloc_size to the
+ * next page boundary.
+ */
+ if(H5F_PAGED_AGGR(f)) {
+ HDassert(0 == (eoa % f->shared->fs_page_size));
+
+ sinfo_alloc_size = ((sinfo_alloc_size / f->shared->fs_page_size) + 1) * f->shared->fs_page_size;
+
+ HDassert(sinfo_alloc_size >= fspace->sect_size);
+ HDassert((sinfo_alloc_size % f->shared->fs_page_size) == 0);
+ } /* end if */
+
+ /* allocate space for the section info */
+ if(HADDR_UNDEF == (sect_addr = H5FD_alloc(f->shared->lf, dxpl_id,
+ H5FD_MEM_FSPACE_SINFO, f,
+ sinfo_alloc_size,
+ &eoa_frag_addr,
+ &eoa_frag_size)))
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTALLOC, FAIL, "can't allocate file space")
+
+ /* if the file alignement is 1, there should be no
+ * eoa fragment. Otherwise, drop the fragment on the floor.
+ */
+ HDassert((eoa_frag_size == 0) || (f->shared->alignment != 1));
+
+ /* update fspace->alloc_sect_size and fspace->sect_addr to reflect
+ * the allocation
+ */
+ fspace->alloc_sect_size = fspace->sect_size;
+ fspace->sect_addr = sect_addr;
+
+ /* insert the new section info into the metadata cache. */
+
+ /* Question: Do we need to worry about this insertion causing an
+ * eviction from the metadata cache? Think on this. If so, add a
+ * flag to H5AC_insert() to force it to skip the call to make space in
+ * cache.
+ *
+ * On reflection, no.
+ *
+ * On a regular file close, any eviction will not change the
+ * the contents of the free space manger(s), as all entries
+ * should have correct file space allocated by the time this
+ * function is called.
+ *
+ * In the cache image case, the selection of entries for inclusion
+ * in the cache image will not take place until after this call.
+ * (Recall that this call is made during the metadata fsm settle
+ * routine, which is called during the serialization routine in
+ * the cache image case. Entries are not selected for inclusion
+ * in the image until after the cache is serialized.)
+ *
+ * JRM -- 11/4/16
+ */
+ if(H5AC_insert_entry(f, dxpl_id, H5AC_FSPACE_SINFO, sect_addr, fspace->sinfo, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTINIT, FAIL, "can't add free space sinfo to cache")
+
+ /* We have changed the sinfo address -- Mark free space header dirty */
+ if(H5AC_mark_entry_dirty(fspace) < 0)
+ HGOTO_ERROR(H5E_FSPACE, H5E_CANTMARKDIRTY, FAIL, "unable to mark free space header as dirty")
+
+ /* since space has been allocated for the section info and the sinfo
+ * has been inserted into the cache, relinquish owership (i.e. float)
+ * the section info.
+ */
+ fspace->sinfo = NULL;
} /* end if */
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5FS_sect_query_last_sect() */
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5FS_vfd_alloc_hdr_and_section_info_if_needed() */
diff --git a/src/H5FSstat.c b/src/H5FSstat.c
index 3200849..e2ad5bb 100644
--- a/src/H5FSstat.c
+++ b/src/H5FSstat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5FStest.c b/src/H5FStest.c
index 06f5166..5ab0219 100644
--- a/src/H5FStest.c
+++ b/src/H5FStest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Faccum.c b/src/H5Faccum.c
index 48f9bdd..e84cfda 100644
--- a/src/H5Faccum.c
+++ b/src/H5Faccum.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -112,18 +110,25 @@ H5FL_BLK_DEFINE_STATIC(meta_accum);
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr,
+H5F__accum_read(const H5F_io_info2_t *fio_info, H5FD_mem_t map_type, haddr_t addr,
size_t size, void *buf/*out*/)
{
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
HDassert(fio_info);
HDassert(fio_info->f);
- HDassert(fio_info->dxpl);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
HDassert(buf);
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
/* Check if this information is in the metadata accumulator */
if((fio_info->f->shared->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) && map_type != H5FD_MEM_DRAW) {
H5F_meta_accum_t *accum; /* Alias for file's metadata accumulator */
@@ -178,7 +183,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr
accum->dirty_off += amount_before;
/* Dispatch to driver */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, amount_before, accum->buf) < 0)
+ if(H5FD_read(&fdio_info, map_type, addr, amount_before, accum->buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
} /* end if */
else
@@ -192,7 +197,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr
H5_CHECKED_ASSIGN(amount_after, size_t, ((addr + size) - (accum->loc + accum->size)), hsize_t);
/* Dispatch to driver */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, (accum->loc + accum->size), amount_after, (accum->buf + accum->size + amount_before)) < 0)
+ if(H5FD_read(&fdio_info, map_type, (accum->loc + accum->size), amount_after, (accum->buf + accum->size + amount_before)) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
} /* end if */
@@ -206,13 +211,13 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr
/* Current read doesn't overlap with metadata accumulator, read it from file */
else {
/* Dispatch to driver */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_read(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
} /* end else */
} /* end if */
else {
/* Read the data */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_read(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
/* Check for overlap w/dirty accumulator */
@@ -255,7 +260,7 @@ H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr
} /* end if */
else {
/* Read the data */
- if(H5FD_read(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_read(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "driver read request failed")
} /* end else */
@@ -278,7 +283,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5F__accum_adjust(H5F_meta_accum_t *accum, const H5F_io_info_t *fio_info,
+H5F__accum_adjust(H5F_meta_accum_t *accum, const H5FD_io_info_t *fdio_info,
H5F_accum_adjust_t adjust, size_t size)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -286,7 +291,7 @@ H5F__accum_adjust(H5F_meta_accum_t *accum, const H5F_io_info_t *fio_info,
FUNC_ENTER_STATIC
HDassert(accum);
- HDassert(fio_info);
+ HDassert(fdio_info);
HDassert(H5F_ACCUM_APPEND == adjust || H5F_ACCUM_PREPEND == adjust);
HDassert(size > 0);
HDassert(size <= H5F_ACCUM_MAX_SIZE);
@@ -344,7 +349,7 @@ H5F__accum_adjust(H5F_meta_accum_t *accum, const H5F_io_info_t *fio_info,
/* Check if the dirty region overlaps the region to eliminate from the accumulator */
if((accum->size - shrink_size) < (accum->dirty_off + accum->dirty_len)) {
/* Write out the dirty region from the metadata accumulator, with dispatch to driver */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, (accum->loc + accum->dirty_off), accum->dirty_len, (accum->buf + accum->dirty_off)) < 0)
+ if(H5FD_write(fdio_info, H5FD_MEM_DEFAULT, (accum->loc + accum->dirty_off), accum->dirty_len, (accum->buf + accum->dirty_off)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "file write failed")
/* Reset accumulator dirty flag */
@@ -355,7 +360,7 @@ H5F__accum_adjust(H5F_meta_accum_t *accum, const H5F_io_info_t *fio_info,
/* Check if the dirty region overlaps the region to eliminate from the accumulator */
if(shrink_size > accum->dirty_off) {
/* Write out the dirty region from the metadata accumulator, with dispatch to driver */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, (accum->loc + accum->dirty_off), accum->dirty_len, (accum->buf + accum->dirty_off)) < 0)
+ if(H5FD_write(fdio_info, H5FD_MEM_DEFAULT, (accum->loc + accum->dirty_off), accum->dirty_len, (accum->buf + accum->dirty_off)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "file write failed")
/* Reset accumulator dirty flag */
@@ -417,9 +422,10 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t addr,
+H5F__accum_write(const H5F_io_info2_t *fio_info, H5FD_mem_t map_type, haddr_t addr,
size_t size, const void *buf)
{
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -427,9 +433,15 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
HDassert(fio_info);
HDassert(fio_info->f);
HDassert(H5F_INTENT(fio_info->f) & H5F_ACC_RDWR);
- HDassert(fio_info->dxpl);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
HDassert(buf);
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
/* Check for accumulating metadata */
if((fio_info->f->shared->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) && map_type != H5FD_MEM_DRAW) {
H5F_meta_accum_t *accum; /* Alias for file's metadata accumulator */
@@ -446,7 +458,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
/* Check if the new metadata adjoins the beginning of the current accumulator */
if((addr + size) == accum->loc) {
/* Check if we need to adjust accumulator size */
- if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_PREPEND, size) < 0)
+ if(H5F__accum_adjust(accum, &fdio_info, H5F_ACCUM_PREPEND, size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Move the existing metadata to the proper location */
@@ -471,7 +483,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
/* Check if the new metadata adjoins the end of the current accumulator */
else if(addr == (accum->loc + accum->size)) {
/* Check if we need to adjust accumulator size */
- if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_APPEND, size) < 0)
+ if(H5F__accum_adjust(accum, &fdio_info, H5F_ACCUM_APPEND, size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Copy the new metadata to the end */
@@ -531,7 +543,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
H5_CHECKED_ASSIGN(add_size, size_t, (accum->loc - addr), hsize_t);
/* Check if we need to adjust accumulator size */
- if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_PREPEND, add_size) < 0)
+ if(H5F__accum_adjust(accum, &fdio_info, H5F_ACCUM_PREPEND, add_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Calculate the proper offset of the existing metadata */
@@ -571,7 +583,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
H5_CHECKED_ASSIGN(add_size, size_t, (addr + size) - (accum->loc + accum->size), hsize_t);
/* Check if we need to adjust accumulator size */
- if(H5F__accum_adjust(accum, fio_info, H5F_ACCUM_APPEND, add_size) < 0)
+ if(H5F__accum_adjust(accum, &fdio_info, H5F_ACCUM_APPEND, add_size) < 0)
HGOTO_ERROR(H5E_IO, H5E_CANTRESIZE, FAIL, "can't adjust metadata accumulator")
/* Compute offset of dirty region (after adjusting accumulator) */
@@ -637,7 +649,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
else {
/* Write out the existing metadata accumulator, with dispatch to driver */
if(accum->dirty) {
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, accum->loc + accum->dirty_off, accum->dirty_len, accum->buf + accum->dirty_off) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, accum->loc + accum->dirty_off, accum->dirty_len, accum->buf + accum->dirty_off) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
/* Reset accumulator dirty flag */
@@ -733,7 +745,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
HGOTO_ERROR(H5E_IO, H5E_CANTRESET, FAIL, "can't reset accumulator")
/* Write the data */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_write(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
/* Check for overlap w/accumulator */
@@ -818,7 +830,7 @@ H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t map_type, haddr_t add
} /* end if */
else {
/* Write the data */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, map_type, addr, size, buf) < 0)
+ if(H5FD_write(&fdio_info, map_type, addr, size, buf) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
} /* end else */
@@ -842,10 +854,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr,
+H5F__accum_free(const H5F_io_info2_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, haddr_t addr,
hsize_t size)
{
H5F_meta_accum_t *accum; /* Alias for file's metadata accumulator */
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -853,11 +866,17 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, h
/* check arguments */
HDassert(fio_info);
HDassert(fio_info->f);
- HDassert(fio_info->dxpl);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
/* Set up alias for file's metadata accumulator info */
accum = &fio_info->f->shared->accum;
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
/* Adjust the metadata accumulator to remove the freed block, if it overlaps */
if((fio_info->f->shared->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA)
&& H5F_addr_overlap(addr, size, accum->loc, accum->size)) {
@@ -930,7 +949,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, h
/* Check if block to free is entirely before dirty region */
if(H5F_addr_le(tail_addr, dirty_start)) {
/* Write out the entire dirty region of the accumulator */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, dirty_start, accum->dirty_len, accum->buf + accum->dirty_off) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, dirty_start, accum->dirty_len, accum->buf + accum->dirty_off) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
} /* end if */
/* Block to free overlaps with some/all of dirty region */
@@ -945,7 +964,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, h
HDassert(write_size > 0);
/* Write out the unfreed dirty region of the accumulator */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, dirty_start + dirty_delta, write_size, accum->buf + accum->dirty_off + dirty_delta) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, dirty_start + dirty_delta, write_size, accum->buf + accum->dirty_off + dirty_delta) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
} /* end if */
@@ -965,7 +984,7 @@ H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t H5_ATTR_UNUSED type, h
HDassert(write_size > 0);
/* Write out the unfreed end of the dirty region of the accumulator */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, dirty_start + dirty_delta, write_size, accum->buf + accum->dirty_off + dirty_delta) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, dirty_start + dirty_delta, write_size, accum->buf + accum->dirty_off + dirty_delta) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
} /* end if */
@@ -1006,7 +1025,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_flush(const H5F_io_info_t *fio_info)
+H5F__accum_flush(const H5F_io_info2_t *fio_info)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -1014,12 +1033,20 @@ H5F__accum_flush(const H5F_io_info_t *fio_info)
HDassert(fio_info);
HDassert(fio_info->f);
- HDassert(fio_info->dxpl);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
/* Check if we need to flush out the metadata accumulator */
if((fio_info->f->shared->feature_flags & H5FD_FEAT_ACCUMULATE_METADATA) && fio_info->f->shared->accum.dirty) {
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
+
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
/* Flush the metadata contents */
- if(H5FD_write(fio_info->f->shared->lf, fio_info->dxpl, H5FD_MEM_DEFAULT, fio_info->f->shared->accum.loc + fio_info->f->shared->accum.dirty_off, fio_info->f->shared->accum.dirty_len, fio_info->f->shared->accum.buf + fio_info->f->shared->accum.dirty_off) < 0)
+ if(H5FD_write(&fdio_info, H5FD_MEM_DEFAULT, fio_info->f->shared->accum.loc + fio_info->f->shared->accum.dirty_off, fio_info->f->shared->accum.dirty_len, fio_info->f->shared->accum.buf + fio_info->f->shared->accum.dirty_off) < 0)
HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "file write failed")
/* Reset the dirty flag */
@@ -1045,7 +1072,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__accum_reset(const H5F_io_info_t *fio_info, hbool_t flush)
+H5F__accum_reset(const H5F_io_info2_t *fio_info, hbool_t flush)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -1053,7 +1080,6 @@ H5F__accum_reset(const H5F_io_info_t *fio_info, hbool_t flush)
HDassert(fio_info);
HDassert(fio_info->f);
- HDassert(fio_info->dxpl);
/* Flush any dirty data in accumulator, if requested */
if(flush)
diff --git a/src/H5Fcwfs.c b/src/H5Fcwfs.c
index 32c73b0..04b86d2 100644
--- a/src/H5Fcwfs.c
+++ b/src/H5Fcwfs.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Fdbg.c b/src/H5Fdbg.c
index 11accc6..535b43d 100644
--- a/src/H5Fdbg.c
+++ b/src/H5Fdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5Fdeprec.c b/src/H5Fdeprec.c
index 4a3bce0..03f5df8 100644
--- a/src/H5Fdeprec.c
+++ b/src/H5Fdeprec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Fefc.c b/src/H5Fefc.c
index 42bf5d8..5652d15 100644
--- a/src/H5Fefc.c
+++ b/src/H5Fefc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Ffake.c b/src/H5Ffake.c
index e191003..6072f2e 100644
--- a/src/H5Ffake.c
+++ b/src/H5Ffake.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Fmodule.h" /* This source code file is part of the H5F module */
diff --git a/src/H5Fint.c b/src/H5Fint.c
index fd79dc2..fe532b2 100644
--- a/src/H5Fint.c
+++ b/src/H5Fint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -76,6 +74,8 @@ typedef struct H5F_olist_t {
static int H5F_get_objects_cb(void *obj_ptr, hid_t obj_id, void *key);
static herr_t H5F_build_actual_name(const H5F_t *f, const H5P_genplist_t *fapl,
const char *name, char ** /*out*/ actual_name);/* Declare a free list to manage the H5F_t struct */
+static herr_t H5F__flush_phase1(H5F_t *f, hid_t meta_dxpl_id);
+static herr_t H5F__flush_phase2(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t closing);
/*********************/
@@ -180,12 +180,22 @@ H5F_get_access_plist(H5F_t *f, hbool_t app_ref)
efc_size = H5F_efc_max_nfiles(f->shared->efc);
if(H5P_set(new_plist, H5F_ACS_EFC_SIZE_NAME, &efc_size) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set elink file cache size")
+ if(f->shared->page_buf != NULL) {
+ if(H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_SIZE_NAME, &(f->shared->page_buf->max_size)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set page buffer size")
+ if(H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME, &(f->shared->page_buf->min_meta_perc)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set minimum metadata fraction of page buffer")
+ if(H5P_set(new_plist, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME, &(f->shared->page_buf->min_raw_perc)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set minimum raw data fraction of page buffer")
+ } /* end if */
#ifdef H5_HAVE_PARALLEL
if(H5P_set(new_plist, H5_COLL_MD_READ_FLAG_NAME, &(f->coll_md_read)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set collective metadata read flag")
if(H5P_set(new_plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->coll_md_write)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set collective metadata read flag")
#endif /* H5_HAVE_PARALLEL */
+ if(H5P_set(new_plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set initial metadata cache resize config.")
/* Prepare the driver property */
driver_prop.driver_id = f->shared->lf->driver_id;
@@ -491,7 +501,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5F_is_hdf5
+ * Function: H5F__is_hdf5
*
* Purpose: Check the file signature to detect an HDF5 file.
*
@@ -505,17 +515,14 @@ done:
*
* Programmer: Unknown
*
- * Modifications:
- * Robb Matzke, 1999-08-02
- * Rewritten to use the virtual file layer.
*-------------------------------------------------------------------------
*/
htri_t
-H5F_is_hdf5(const char *name, hid_t dxpl_id)
+H5F__is_hdf5(const char *name, hid_t meta_dxpl_id, hid_t raw_dxpl_id)
{
H5FD_t *file = NULL; /* Low-level file struct */
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
haddr_t sig_addr; /* Addess of hdf5 file signature */
- H5P_genplist_t *xfer_plist= NULL; /* Dataset transfer property list object */
htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -524,12 +531,15 @@ H5F_is_hdf5(const char *name, hid_t dxpl_id)
if(NULL == (file = H5FD_open(name, H5F_ACC_RDONLY, H5P_FILE_ACCESS_DEFAULT, HADDR_UNDEF)))
HGOTO_ERROR(H5E_IO, H5E_CANTINIT, FAIL, "unable to open file")
- /* Get the property list object */
- if(NULL == (xfer_plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ /* Set up the file driver info */
+ fdio_info.file = file;
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(raw_dxpl_id)))
HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
/* The file is an hdf5 file if the hdf5 file signature can be found */
- if(H5FD_locate_signature(file, xfer_plist, &sig_addr) < 0)
+ if(H5FD_locate_signature(&fdio_info, &sig_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable to locate file signature")
ret_value = (HADDR_UNDEF != sig_addr);
@@ -540,7 +550,7 @@ done:
HDONE_ERROR(H5E_IO, H5E_CANTCLOSEFILE, FAIL, "unable to close file")
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5F_is_hdf5() */
+} /* end H5F__is_hdf5() */
/*-------------------------------------------------------------------------
@@ -593,11 +603,26 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
f->shared->flags = flags;
f->shared->sohm_addr = HADDR_UNDEF;
f->shared->sohm_vers = HDF5_SHAREDHEADER_VERSION;
- for(u = 0; u < NELMTS(f->shared->fs_addr); u++)
- f->shared->fs_addr[u] = HADDR_UNDEF;
f->shared->accum.loc = HADDR_UNDEF;
f->shared->lf = lf;
+ /* Initialization for handling file space */
+ for(u = 0; u < NELMTS(f->shared->fs_addr); u++) {
+ f->shared->fs_state[u] = H5F_FS_STATE_CLOSED;
+ f->shared->fs_addr[u] = HADDR_UNDEF;
+ f->shared->fs_man[u] = NULL;
+ } /* end for */
+ f->shared->first_alloc_dealloc = FALSE;
+ f->shared->eoa_pre_fsm_fsalloc = HADDR_UNDEF;
+ f->shared->eoa_post_fsm_fsalloc = HADDR_UNDEF;
+ f->shared->eoa_post_mdci_fsalloc = HADDR_UNDEF;
+
+ /* Initialization for handling file space (for paged aggregation) */
+ f->shared->pgend_meta_thres = H5F_FILE_SPACE_PGEND_META_THRES;
+
+ /* intialize point of no return */
+ f->shared->point_of_no_return = FALSE;
+
/*
* Copy the file creation and file access property lists into the
* new file handle. We do this early because some values might need
@@ -617,8 +642,19 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
HDassert(f->shared->sohm_nindexes < 255);
if(H5P_get(plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, &f->shared->fs_strategy) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get file space strategy")
+ if(H5P_get(plist, H5F_CRT_FREE_SPACE_PERSIST_NAME, &f->shared->fs_persist) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get file space persisting status")
if(H5P_get(plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, &f->shared->fs_threshold) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get free-space section threshold")
+ if(H5P_get(plist, H5F_CRT_FILE_SPACE_PAGE_SIZE_NAME, &f->shared->fs_page_size) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get file space page size")
+ HDassert(f->shared->fs_page_size >= H5F_FILE_SPACE_PAGE_SIZE_MIN);
+
+ /* Temporary for multi/split drivers: fail file creation
+ when persisting free-space or using paged aggregation strategy */
+ if(H5F_HAS_FEATURE(f, H5FD_FEAT_PAGED_AGGR))
+ if(f->shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE || f->shared->fs_persist)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't open with this strategy or persistent fs")
/* Get the FAPL values to cache */
if(NULL == (plist = (H5P_genplist_t *)H5I_object(fapl_id)))
@@ -665,6 +701,8 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
if(H5P_get(plist, H5F_ACS_COLL_MD_WRITE_FLAG_NAME, &(f->coll_md_write)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get collective metadata write flag")
#endif /* H5_HAVE_PARALLEL */
+ if(H5P_get(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, &(f->shared->mdc_initCacheImageCfg)) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get initial metadata cache resize config")
/* Get the VFD values to cache */
f->shared->maxaddr = H5FD_get_maxaddr(lf);
@@ -748,7 +786,7 @@ H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id, hid_t fapl_id, H5FD_t
* The cache might be created with a different number of elements and
* the access property list should be updated to reflect that.
*/
- if(H5AC_create(f, &(f->shared->mdc_initCacheCfg)) < 0)
+ if(H5AC_create(f, &(f->shared->mdc_initCacheCfg), &(f->shared->mdc_initCacheImageCfg)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create metadata cache")
/* Create the file's "open object" information */
@@ -790,7 +828,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5F_dest
+ * Function: H5F__dest
*
* Purpose: Destroys a file structure. This function flushes the cache
* but doesn't do any other cleanup other than freeing memory
@@ -806,11 +844,11 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
+H5F__dest(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t flush)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_PACKAGE
/* Sanity check */
HDassert(f);
@@ -818,16 +856,41 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
if(1 == f->shared->nrefs) {
int actype; /* metadata cache type (enum value) */
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
- /* Flush at this point since the file will be closed.
+ /* Flush at this point since the file will be closed (phase 1).
* Only try to flush the file if it was opened with write access, and if
* the caller requested a flush.
*/
if((H5F_ACC_RDWR & H5F_INTENT(f)) && flush)
- if(H5F_flush(f, dxpl_id, TRUE) < 0)
+ if(H5F__flush_phase1(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
- HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cached data (phase 1)")
+
+ /* Notify the metadata cache that the file is about to be closed.
+ * This allows the cache to set up for creating a metadata cache
+ * image if this has been requested.
+ */
+ if(H5AC_prep_for_file_close(f, meta_dxpl_id) < 0)
+ /* Push error, but keep going */
+ HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "metadata cache prep for close failed")
+
+ /* Flush at this point since the file will be closed (phase 2).
+ * Only try to flush the file if it was opened with write access, and if
+ * the caller requested a flush.
+ */
+ if((H5F_ACC_RDWR & H5F_INTENT(f)) && flush)
+ if(H5F__flush_phase2(f, meta_dxpl_id, raw_dxpl_id, TRUE) < 0)
+ /* Push error, but keep going */
+ HDONE_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush cached data (phase 2)")
+
+ /* With the shutdown modifications, the contents of the metadata cache
+ * should be clean at this point, with the possible exception of the
+ * the superblock and superblock extension.
+ *
+ * Verify this.
+ */
+ HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
/* Release the external file cache */
if(f->shared->efc) {
@@ -837,20 +900,42 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
f->shared->efc = NULL;
} /* end if */
+ /* With the shutdown modifications, the contents of the metadata cache
+ * should be clean at this point, with the possible exception of the
+ * the superblock and superblock extension.
+ *
+ * Verify this.
+ */
+ HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
+
/* Release objects that depend on the superblock being initialized */
if(f->shared->sblock) {
/* Shutdown file free space manager(s) */
- /* (We should release the free space information now (before truncating
- * the file and before the metadata cache is shut down) since the
- * free space manager is holding some data structures in memory
- * and also because releasing free space can shrink the file's
- * 'eoa' value)
+ /* (We should release the free space information now (before
+ * truncating the file and before the metadata cache is shut
+ * down) since the free space manager is holding some data
+ * structures in memory and also because releasing free space
+ * can shrink the file's 'eoa' value)
+ *
+ * Update 11/1/16:
+ *
+ * With recent library shutdown modifications, the free space
+ * managers should be settled and written to file at this point
+ * (assuming they are persistent). In this case, closing the
+ * free space managers should have no effect on EOA.
+ *
+ * -- JRM
*/
if(H5F_ACC_RDWR & H5F_INTENT(f)) {
- if(H5MF_close(f, dxpl_id) < 0)
+ if(H5MF_close(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't release file free space info")
+ /* at this point, only the superblock and superblock
+ * extension should be dirty.
+ */
+ HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
+
/* Flush the file again (if requested), as shutting down the
* free space manager may dirty some data structures again.
*/
@@ -858,14 +943,32 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
/* Clear status_flags */
f->shared->sblock->status_flags &= (uint8_t)(~H5F_SUPER_WRITE_ACCESS);
f->shared->sblock->status_flags &= (uint8_t)(~H5F_SUPER_SWMR_WRITE_ACCESS);
- /* Mark superblock dirty in cache, so change will get encoded */
- /* Push error, but keep going*/
- if(H5F_super_dirty(f) < 0)
+
+ /* Mark EOA info dirty in cache, so change will get encoded */
+ if(H5F_eoa_dirty(f, meta_dxpl_id) < 0)
+ /* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
- if(H5F_flush(f, dxpl_id, TRUE) < 0)
+ /* Release any space allocated to space aggregators,
+ * so that the eoa value corresponds to the end of the
+ * space written to in the file.
+ *
+ * At most, this should change the superblock or the
+ * superblock extension messages.
+ */
+ if(H5MF_free_aggrs(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
- HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush cache")
+ HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't release file space")
+
+ /* Truncate the file to the current allocated size */
+ if(H5FD_truncate(f->shared->lf, meta_dxpl_id, TRUE) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
+
+ /* at this point, only the superblock and superblock
+ * extension should be dirty.
+ */
+ HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
} /* end if */
} /* end if */
@@ -883,6 +986,13 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
HDONE_ERROR(H5E_FSPACE, H5E_CANTUNPIN, FAIL, "unable to unpin superblock")
f->shared->sblock = NULL;
} /* end if */
+
+ /* with the possible exception of the superblock and superblock
+ * extension, the metadata cache should be clean at this point.
+ *
+ * Verify this.
+ */
+ HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
/* Remove shared file struct from list of open files */
if(H5F_sfile_remove(f->shared) < 0)
@@ -890,10 +1000,22 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing file")
/* Shutdown the metadata cache */
- if(H5AC_dest(f, dxpl_id))
+ if(H5AC_dest(f, meta_dxpl_id))
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing file")
+ /* Set up I/O info for operation */
+ fio_info.f = f;
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+
+ /* Shutdown the page buffer cache */
+ if(H5PB_dest(&fio_info) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "problems closing page buffer cache")
+
/* Clean up the metadata cache log location string */
if(f->shared->mdc_log_location)
f->shared->mdc_log_location = (char *)H5MM_xfree(f->shared->mdc_log_location);
@@ -910,11 +1032,6 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
f->shared->root_grp = NULL;
} /* end if */
- /* Set up I/O info for operation */
- fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
-
/* Destroy other components of the file */
if(H5F__accum_reset(&fio_info, TRUE) < 0)
/* Push error, but keep going*/
@@ -1052,7 +1169,7 @@ H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush)
*/
H5F_t *
H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
- hid_t dxpl_id)
+ hid_t meta_dxpl_id)
{
H5F_t *file = NULL; /*the success return value */
H5F_file_t *shared = NULL; /*shared part of `file' */
@@ -1061,6 +1178,10 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
H5FD_class_t *drvr; /*file driver class info */
H5P_genplist_t *a_plist; /*file access property list */
H5F_close_degree_t fc_degree; /*file close degree */
+ hid_t raw_dxpl_id = H5AC_rawdata_dxpl_id; /* Raw data dxpl used by library */
+ size_t page_buf_size;
+ unsigned page_buf_min_meta_perc;
+ unsigned page_buf_min_raw_perc;
hbool_t set_flag = FALSE; /*set the status_flags in the superblock */
hbool_t clear = FALSE; /*clear the status_flags */
hbool_t evict_on_close; /* evict on close value from plist */
@@ -1207,6 +1328,29 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
shared = file->shared;
lf = shared->lf;
+ /* Get the file access property list, for future queries */
+ if(NULL == (a_plist = (H5P_genplist_t *)H5I_object(fapl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not file access property list")
+
+ /* Check if page buffering is enabled */
+ if(H5P_get(a_plist, H5F_ACS_PAGE_BUFFER_SIZE_NAME, &page_buf_size) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get page buffer size")
+ if(page_buf_size) {
+#ifdef H5_HAVE_PARALLEL
+ /* Collective metadata writes are not supported with page buffering */
+ if(file->coll_md_write)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "collective metadata writes are not supported with page buffering")
+
+ /* Temporary: fail file create when page buffering feature is enabled for parallel */
+ HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "page buffering is disabled for parallel")
+#endif /* H5_HAVE_PARALLEL */
+ /* Query for other page buffer cache properties */
+ if(H5P_get(a_plist, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME, &page_buf_min_meta_perc) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get minimum metadata fraction of page buffer")
+ if(H5P_get(a_plist, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME, &page_buf_min_raw_perc) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "can't get minimum raw data fraction of page buffer")
+ } /* end if */
+
/*
* Read or write the file superblock, depending on whether the file is
* empty or not.
@@ -1217,33 +1361,38 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
* to create & write the superblock.
*/
+ /* Create the page buffer before initializing the superblock */
+ if(page_buf_size)
+ if(H5PB_create(file, page_buf_size, page_buf_min_meta_perc, page_buf_min_raw_perc) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create page buffer")
+
/* Initialize information about the superblock and allocate space for it */
/* (Writes superblock extension messages, if there are any) */
- if(H5F__super_init(file, dxpl_id) < 0)
+ if(H5F__super_init(file, meta_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to allocate file superblock")
/* Create and open the root group */
/* (This must be after the space for the superblock is allocated in
* the file, since the superblock must be at offset 0)
*/
- if(H5G_mkroot(file, dxpl_id, TRUE) < 0)
+ if(H5G_mkroot(file, meta_dxpl_id, TRUE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create/open root group")
} /* end if */
else if (1 == shared->nrefs) {
-
/* Read the superblock if it hasn't been read before. */
- if(H5F__super_read(file, dxpl_id, TRUE) < 0)
+ if(H5F__super_read(file, meta_dxpl_id, raw_dxpl_id, TRUE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_READERROR, NULL, "unable to read superblock")
+ /* Create the page buffer before initializing the superblock */
+ if(page_buf_size)
+ if(H5PB_create(file, page_buf_size, page_buf_min_meta_perc, page_buf_min_raw_perc) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "unable to create page buffer")
+
/* Open the root group */
- if(H5G_mkroot(file, dxpl_id, FALSE) < 0)
+ if(H5G_mkroot(file, meta_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "unable to read root group")
} /* end if */
- /* Get the file access property list, for future queries */
- if(NULL == (a_plist = (H5P_genplist_t *)H5I_object(fapl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not file access property list")
-
/*
* Decide the file close degree. If it's the first time to open the
* file, set the degree to access property list value; if it's the
@@ -1282,12 +1431,11 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
*/
if(H5P_get(a_plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, NULL, "can't get evict on close value")
-
- if(shared->nrefs == 1) {
+ if(shared->nrefs == 1)
shared->evict_on_close = evict_on_close;
- } else if(shared->nrefs > 1) {
+ else if(shared->nrefs > 1) {
if(shared->evict_on_close != evict_on_close)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, NULL, "file evict-on-close value doesn't match")
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "file evict-on-close value doesn't match")
} /* end if */
/* Formulate the absolute path for later search of target file for external links */
@@ -1315,7 +1463,7 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
/* Flush the superblock */
if(H5F_super_dirty(file) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, NULL, "unable to mark superblock as dirty")
- if(H5F_flush_tagged_metadata(file, H5AC__SUPERBLOCK_TAG, H5AC_ind_read_dxpl_id) < 0)
+ if(H5F_flush_tagged_metadata(file, H5AC__SUPERBLOCK_TAG, meta_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, NULL, "unable to flush superblock")
/* Remove the file lock for SWMR_WRITE */
@@ -1327,7 +1475,6 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
else { /* H5F_ACC_RDONLY: check consistency of status_flags */
/* Skip check of status_flags for file with < superblock version 3 */
if(file->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_3) {
-
if(H5F_INTENT(file) & H5F_ACC_SWMR_READ) {
if((file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS &&
!(file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
@@ -1335,12 +1482,10 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
(!(file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS) &&
file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is not already open for SWMR writing")
-
} /* end if */
else if((file->shared->sblock->status_flags & H5F_SUPER_WRITE_ACCESS) ||
(file->shared->sblock->status_flags & H5F_SUPER_SWMR_WRITE_ACCESS))
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENFILE, NULL, "file is already open for write (may use <h5clear file> to clear file consistency flags)")
-
} /* version 3 superblock */
} /* end else */
} /* end if set_flag */
@@ -1350,38 +1495,37 @@ H5F_open(const char *name, unsigned flags, hid_t fcpl_id, hid_t fapl_id,
done:
if((NULL == ret_value) && file)
- if(H5F_dest(file, dxpl_id, FALSE) < 0)
+ if(H5F__dest(file, meta_dxpl_id, raw_dxpl_id, FALSE) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, NULL, "problems closing file")
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F_open() */
/*-------------------------------------------------------------------------
- * Function: H5F_flush
+ * Function: H5F_flush_phase1
*
- * Purpose: Flushes cached data.
+ * Purpose: First phase of flushing cached data.
*
* Return: Non-negative on success/Negative on failure
*
- * Programmer: Robb Matzke
- * matzke@llnl.gov
- * Aug 29 1997
+ * Programmer: Quincey Koziol
+ * koziol@lbl.gov
+ * Jan 1 2017
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5F_flush(H5F_t *f, hid_t dxpl_id, hbool_t closing)
+static herr_t
+H5F__flush_phase1(H5F_t *f, hid_t meta_dxpl_id)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
/* Sanity check arguments */
HDassert(f);
/* Flush any cached dataset storage raw data */
- if(H5D_flush(f, dxpl_id) < 0)
+ if(H5D_flush(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush dataset cache")
@@ -1391,27 +1535,60 @@ H5F_flush(H5F_t *f, hid_t dxpl_id, hbool_t closing)
/* (needs to happen before cache flush, with superblock write, since the
* 'eoa' value is written in superblock -QAK)
*/
- if(H5MF_free_aggrs(f, dxpl_id) < 0)
+ if(H5MF_free_aggrs(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't release file space")
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F__flush_phase1() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__flush_phase2
+ *
+ * Purpose: Second phase of flushing cached data.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * koziol@lbl.gov
+ * Jan 1 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F__flush_phase2(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t closing)
+{
+ H5F_io_info2_t fio_info; /* I/O info for operation */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check arguments */
+ HDassert(f);
+
/* Flush the entire metadata cache */
- if(H5AC_flush(f, dxpl_id) < 0)
+ if(H5AC_flush(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush metadata cache")
/* Truncate the file to the current allocated size */
- if(H5FD_truncate(f->shared->lf, dxpl_id, closing) < 0)
+ if(H5FD_truncate(f->shared->lf, meta_dxpl_id, closing) < 0)
+ /* Push error, but keep going*/
HDONE_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "low level truncate failed")
/* Flush the entire metadata cache again since the EOA could have changed in the truncate call. */
- if(H5AC_flush(f, dxpl_id) < 0)
+ if(H5AC_flush(f, meta_dxpl_id) < 0)
/* Push error, but keep going*/
HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush metadata cache")
/* Set up I/O info for operation */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(raw_dxpl_id)))
+ /* Push error, but keep going*/
HDONE_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
/* Flush out the metadata accumulator */
@@ -1419,14 +1596,55 @@ H5F_flush(H5F_t *f, hid_t dxpl_id, hbool_t closing)
/* Push error, but keep going*/
HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "unable to flush metadata accumulator")
+ /* Flush the page buffer */
+ if(H5PB_flush(&fio_info) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "page buffer flush failed")
+
/* Flush file buffers to disk. */
- if(H5FD_flush(f->shared->lf, dxpl_id, closing) < 0)
+ if(H5FD_flush(f->shared->lf, meta_dxpl_id, closing) < 0)
/* Push error, but keep going*/
- HDONE_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "low level flush failed")
+ HDONE_ERROR(H5E_IO, H5E_CANTFLUSH, FAIL, "low level flush failed")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F__flush_phase2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__flush
+ *
+ * Purpose: Flushes cached data.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Robb Matzke
+ * matzke@llnl.gov
+ * Aug 29 1997
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F__flush(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t closing)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity check arguments */
+ HDassert(f);
+
+ /* First phase of flushing data */
+ if(H5F__flush_phase1(f, meta_dxpl_id) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush file data")
+
+ /* Second phase of flushing data */
+ if(H5F__flush_phase2(f, meta_dxpl_id, raw_dxpl_id, closing) < 0)
+ /* Push error, but keep going*/
+ HDONE_ERROR(H5E_CACHE, H5E_CANTFLUSH, FAIL, "unable to flush file data")
-done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5F_flush() */
+} /* end H5F__flush() */
/*-------------------------------------------------------------------------
@@ -1651,7 +1869,7 @@ H5F_try_close(H5F_t *f, hbool_t *was_closed /*out*/)
if(H5F_efc_try_close(f) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "can't attempt to close EFC")
- /* Delay flush until the shared file struct is closed, in H5F_dest. If the
+ /* Delay flush until the shared file struct is closed, in H5F__dest. If the
* application called H5Fclose, it would have been flushed in that function
* (unless it will have been flushed in H5F_dest anyways). */
@@ -1660,7 +1878,7 @@ H5F_try_close(H5F_t *f, hbool_t *was_closed /*out*/)
* shared H5F_file_t struct. If the reference count for the H5F_file_t
* struct reaches zero then destroy it also.
*/
- if(H5F_dest(f, H5AC_ind_read_dxpl_id, TRUE) < 0)
+ if(H5F__dest(f, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id, TRUE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEFILE, FAIL, "problems closing file")
/* Since we closed the file, this should be set to TRUE */
@@ -2218,7 +2436,8 @@ H5F_set_store_msg_crt_idx(H5F_t *f, hbool_t flag)
*-------------------------------------------------------------------------
*/
ssize_t
-H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t dxpl_id)
+H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t meta_dxpl_id,
+ hid_t raw_dxpl_id)
{
H5FD_t *fd_ptr; /* file driver */
haddr_t eoa; /* End of file address */
@@ -2285,10 +2504,10 @@ H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t dxpl_id)
/* test to see if a buffer was provided -- if not, we are done */
if(buf_ptr != NULL) {
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
size_t space_needed; /* size of file image */
hsize_t tmp;
size_t tmp_size;
- H5P_genplist_t *xfer_plist= NULL; /* Dataset transfer property list object */
/* Check for buffer too small */
if((haddr_t)buf_len < eoa)
@@ -2296,13 +2515,16 @@ H5F_get_file_image(H5F_t *file, void *buf_ptr, size_t buf_len, hid_t dxpl_id)
space_needed = (size_t)eoa;
- /* Get the property list object */
- if(NULL == (xfer_plist = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get new property list object")
+ /* Set up file driver I/O info object */
+ fdio_info.file = fd_ptr;
+ if(NULL == (fdio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get property list object")
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(raw_dxpl_id)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADATOM, FAIL, "can't get property list object")
/* read in the file image */
/* (Note compensation for base address addition in internal routine) */
- if(H5FD_read(fd_ptr, xfer_plist, H5FD_MEM_DEFAULT, 0, space_needed, buf_ptr) < 0)
+ if(H5FD_read(&fdio_info, H5FD_MEM_DEFAULT, 0, space_needed, buf_ptr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_READERROR, FAIL, "file image read request failed")
/* Offset to "status_flags" in the superblock */
@@ -2504,6 +2726,38 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F__set_eoa() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__set_paged_aggr
+ *
+ * Purpose: Quick and dirty routine to set the file's paged_aggr mode
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol <koziol@hdfgroup.org>
+ * June 19, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F__set_paged_aggr(const H5F_t *f, hbool_t paged)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_PACKAGE
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+
+ /* Dispatch to driver */
+ if(H5FD_set_paged_aggr(f->shared->lf, paged) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "driver set paged aggr mode failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F__set_paged_aggr() */
+
#ifdef H5_HAVE_PARALLEL
/*-------------------------------------------------------------------------
@@ -2534,3 +2788,33 @@ H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t cmr)
} /* H5F_set_coll_md_read() */
#endif /* H5_HAVE_PARALLEL */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_set_latest_flags
+ *
+ * Purpose: Set the latest_flags field with a new value.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * 4/26/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_set_latest_flags(H5F_t *f, unsigned flags)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(0 == ((~flags) & H5F_LATEST_ALL_FLAGS));
+
+ f->shared->latest_flags = flags;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5F_set_latest_flags() */
+
diff --git a/src/H5Fio.c b/src/H5Fio.c
index e215666..81fa514 100644
--- a/src/H5Fio.c
+++ b/src/H5Fio.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -39,6 +37,7 @@
#include "H5Fpkg.h" /* File access */
#include "H5FDprivate.h" /* File drivers */
#include "H5Iprivate.h" /* IDs */
+#include "H5PBprivate.h" /* Page Buffer */
/****************/
@@ -96,15 +95,11 @@ herr_t
H5F_block_read(const H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size,
hid_t dxpl_id, void *buf/*out*/)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
H5FD_mem_t map_type; /* Mapped memory type */
- hid_t my_dxpl_id = dxpl_id; /* transfer property to use for I/O */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
-#ifdef QAK
-HDfprintf(stderr, "%s: read from addr = %a, size = %Zu\n", FUNC, addr, size);
-#endif /* QAK */
HDassert(f);
HDassert(f->shared);
@@ -118,20 +113,24 @@ HDfprintf(stderr, "%s: read from addr = %a, size = %Zu\n", FUNC, addr, size);
/* Treat global heap as raw data */
map_type = (type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type;
-#ifdef H5_DEBUG_BUILD
- /* GHEAP type is treated as RAW, so update the dxpl type property too */
- if(H5FD_MEM_GHEAP == type)
- my_dxpl_id = H5AC_rawdata_dxpl_id;
-#endif /* H5_DEBUG_BUILD */
-
- /* Set up I/O info for operation */
+ /* Set up the I/O info object */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(my_dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(H5FD_MEM_DRAW == type) {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
- /* Pass through metadata accumulator layer */
- if(H5F__accum_read(&fio_info, map_type, addr, size, buf) < 0)
- HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "read through metadata accumulator failed")
+ /* Pass through page buffer layer */
+ if(H5PB_read(&fio_info, map_type, addr, size, buf) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_READERROR, FAIL, "read through page buffer failed")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -157,15 +156,11 @@ herr_t
H5F_block_write(const H5F_t *f, H5FD_mem_t type, haddr_t addr, size_t size,
hid_t dxpl_id, const void *buf)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
H5FD_mem_t map_type; /* Mapped memory type */
- hid_t my_dxpl_id = dxpl_id; /* transfer property to use for I/O */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
-#ifdef QAK
-HDfprintf(stderr, "%s: write to addr = %a, size = %Zu\n", FUNC, addr, size);
-#endif /* QAK */
HDassert(f);
HDassert(f->shared);
@@ -180,20 +175,24 @@ HDfprintf(stderr, "%s: write to addr = %a, size = %Zu\n", FUNC, addr, size);
/* Treat global heap as raw data */
map_type = (type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : type;
-#ifdef H5_DEBUG_BUILD
- /* GHEAP type is treated as RAW, so update the dxpl type property too */
- if(H5FD_MEM_GHEAP == type)
- my_dxpl_id = H5AC_rawdata_dxpl_id;
-#endif /* H5_DEBUG_BUILD */
-
- /* Set up I/O info for operation */
+ /* Set up the I/O info object */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(my_dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(H5FD_MEM_DRAW == type) {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
- /* Pass through metadata accumulator layer */
- if(H5F__accum_write(&fio_info, map_type, addr, size, buf) < 0)
- HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "write through metadata accumulator failed")
+ /* Pass through page buffer layer */
+ if(H5PB_write(&fio_info, map_type, addr, size, buf) < 0)
+ HGOTO_ERROR(H5E_IO, H5E_WRITEERROR, FAIL, "write through page buffer failed")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -216,7 +215,7 @@ done:
herr_t
H5F_flush_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
herr_t ret_value = SUCCEED;
FUNC_ENTER_NOAPI(FAIL)
@@ -227,10 +226,10 @@ H5F_flush_tagged_metadata(H5F_t * f, haddr_t tag, hid_t dxpl_id)
/* Set up I/O info for operation */
fio_info.f = f;
-
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
-
/* Flush and reset the accumulator */
if(H5F__accum_reset(&fio_info, TRUE) < 0)
@@ -302,7 +301,7 @@ H5F__evict_cache_entries(H5F_t *f, hid_t dxpl_id)
#ifndef NDEBUG
{
unsigned status = 0;
- int32_t cur_num_entries;
+ uint32_t cur_num_entries;
/* Retrieve status of the superblock */
if(H5AC_get_entry_status(f, (haddr_t)0, &status) < 0)
diff --git a/src/H5Fmodule.h b/src/H5Fmodule.h
index 4bb2506..0481512 100644
--- a/src/H5Fmodule.h
+++ b/src/H5Fmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Fmount.c b/src/H5Fmount.c
index e3d4952..3cd5c21 100644
--- a/src/H5Fmount.c
+++ b/src/H5Fmount.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Fmodule.h" /* This source code file is part of the H5F module */
@@ -614,7 +612,7 @@ H5F_mount_count_ids(H5F_t *f, unsigned *nopen_files, unsigned *nopen_objs)
*-------------------------------------------------------------------------
*/
static herr_t
-H5F_flush_mounts_recurse(H5F_t *f, hid_t dxpl_id)
+H5F_flush_mounts_recurse(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id)
{
unsigned nerrors = 0; /* Errors from recursive flushes */
unsigned u; /* Index variable */
@@ -627,11 +625,11 @@ H5F_flush_mounts_recurse(H5F_t *f, hid_t dxpl_id)
/* Flush all child files, not stopping for errors */
for(u = 0; u < f->shared->mtab.nmounts; u++)
- if(H5F_flush_mounts_recurse(f->shared->mtab.child[u].file, dxpl_id) < 0)
+ if(H5F_flush_mounts_recurse(f->shared->mtab.child[u].file, meta_dxpl_id, raw_dxpl_id) < 0)
nerrors++;
/* Call the "real" flush routine, for this file */
- if(H5F_flush(f, dxpl_id, FALSE) < 0)
+ if(H5F__flush(f, meta_dxpl_id, raw_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush file's cached information")
/* Check flush errors for children - errors are already on the stack */
@@ -656,7 +654,7 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_flush_mounts(H5F_t *f, hid_t dxpl_id)
+H5F_flush_mounts(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -670,7 +668,7 @@ H5F_flush_mounts(H5F_t *f, hid_t dxpl_id)
f = f->parent;
/* Flush the mounted file hierarchy */
- if(H5F_flush_mounts_recurse(f, dxpl_id) < 0)
+ if(H5F_flush_mounts_recurse(f, meta_dxpl_id, raw_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFLUSH, FAIL, "unable to flush mounted file hierarchy")
done:
diff --git a/src/H5Fmpi.c b/src/H5Fmpi.c
index 5434aa5..2ce454a 100644
--- a/src/H5Fmpi.c
+++ b/src/H5Fmpi.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -356,5 +354,31 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5F_mpi_retrieve_comm */
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_mpi_info
+ *
+ * Purpose: Retrieves MPI File info.
+ *
+ * Return: Success: The size (positive)
+ * Failure: Negative
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_get_mpi_info(const H5F_t *f, MPI_Info **f_info)
+{
+ herr_t ret_value = SUCCEED;
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ HDassert(f && f->shared);
+
+ /* Dispatch to driver */
+ if ((ret_value = H5FD_get_mpi_info(f->shared->lf, (void **)f_info)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "can't get mpi file info")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F_get_mpi_info() */
#endif /* H5_HAVE_PARALLEL */
diff --git a/src/H5Fpkg.h b/src/H5Fpkg.h
index 1adf74b..7a5c126 100644
--- a/src/H5Fpkg.h
+++ b/src/H5Fpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -42,6 +40,7 @@
#include "H5FSprivate.h" /* File free space */
#include "H5Gprivate.h" /* Groups */
#include "H5Oprivate.h" /* Object header messages */
+#include "H5PBprivate.h" /* Page buffer */
#include "H5UCprivate.h" /* Reference counted object functions */
@@ -68,8 +67,8 @@
/* Macro to abstract checking whether file is using a free space manager */
#define H5F_HAVE_FREE_SPACE_MANAGER(F) \
- ((F)->shared->fs_strategy == H5F_FILE_SPACE_ALL || \
- (F)->shared->fs_strategy == H5F_FILE_SPACE_ALL_PERSIST)
+ ((F)->shared->fs_strategy == H5F_FSPACE_STRATEGY_FSM_AGGR || \
+ (F)->shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE)
/* Macros for encoding/decoding superblock */
#define H5F_MAX_DRVINFOBLOCK_SIZE 1024 /* Maximum size of superblock driver info buffer */
@@ -200,13 +199,6 @@ typedef struct H5F_meta_accum_t {
hbool_t dirty; /* Flag to indicate that the accumulated metadata is dirty */
} H5F_meta_accum_t;
-/* Enum for free space manager state */
-typedef enum H5F_fs_state_t {
- H5F_FS_STATE_CLOSED, /* Free space manager is closed */
- H5F_FS_STATE_OPEN, /* Free space manager has been opened */
- H5F_FS_STATE_DELETING /* Free space manager is being deleted */
-} H5F_fs_state_t;
-
/* A record of the mount table */
typedef struct H5F_mount_t {
struct H5G_t *group; /* Mount point group held open */
@@ -259,6 +251,14 @@ struct H5F_file_t {
* block is present. At all other times
* it should be NULL.
*/
+ hbool_t drvinfo_sb_msg_exists; /* Convenience field used to track
+ * whether the driver info superblock
+ * extension message has been created
+ * yet. This field should be TRUE iff the
+ * superblock extension exists and contains
+ * a driver info message. Under all other
+ * circumstances, it must be set to FALSE.
+ */
unsigned nrefs; /* Ref count for times file is opened */
unsigned flags; /* Access Permissions for file */
H5F_mtab_t mtab; /* File mount table */
@@ -273,12 +273,19 @@ struct H5F_file_t {
unsigned long feature_flags; /* VFL Driver feature Flags */
haddr_t maxaddr; /* Maximum address for file */
+ H5PB_t *page_buf; /* The page buffer cache */
H5AC_t *cache; /* The object cache */
H5AC_cache_config_t
mdc_initCacheCfg; /* initial configuration for the */
/* metadata cache. This structure is */
/* fixed at creation time and should */
/* not change thereafter. */
+ H5AC_cache_image_config_t
+ mdc_initCacheImageCfg; /* initial configuration for the */
+ /* generate metadata cache image on */
+ /* close option. This structure is */
+ /* fixed at creation time and should */
+ /* not change thereafter. */
hbool_t use_mdc_logging; /* Set when metadata logging is desired */
hbool_t start_mdc_log_on_access; /* set when mdc logging should */
/* begin on file access/create */
@@ -302,19 +309,38 @@ struct H5F_file_t {
H5UC_t *grp_btree_shared; /* Ref-counted group B-tree node info */
/* File space allocation information */
- H5F_file_space_type_t fs_strategy; /* File space handling strategy */
+ H5F_fspace_strategy_t fs_strategy; /* File space handling strategy */
hsize_t fs_threshold; /* Free space section threshold */
+ hbool_t fs_persist; /* Free-space persist or not */
hbool_t use_tmp_space; /* Whether temp. file space allocation is allowed */
haddr_t tmp_addr; /* Next address to use for temp. space in the file */
+ hbool_t point_of_no_return; /* flag to indicate that we can't go back and delete a freespace header when it's used up */
+
+ H5F_fs_state_t fs_state[H5F_MEM_PAGE_NTYPES]; /* State of free space manager for each type */
+ haddr_t fs_addr[H5F_MEM_PAGE_NTYPES]; /* Address of free space manager info for each type */
+ H5FS_t *fs_man[H5F_MEM_PAGE_NTYPES]; /* Free space manager for each file space type */
+ hbool_t first_alloc_dealloc; /* TRUE iff free space managers */
+ /* are persistant and have not */
+ /* been used accessed for either */
+ /* allocation or deallocation */
+ /* since file open. */
+ haddr_t eoa_pre_fsm_fsalloc; /* eoa pre file space allocation */
+ /* for self referential FSMs */
+ haddr_t eoa_post_fsm_fsalloc; /* eoa post file space allocation */
+ /* for self referential FSMs */
+ haddr_t eoa_post_mdci_fsalloc; /* eoa past file space allocation */
+ /* for metadata cache image, or */
+ /* HADDR_UNDEF if no cache image. */
+
+ /* Free-space aggregation info */
unsigned fs_aggr_merge[H5FD_MEM_NTYPES]; /* Flags for whether free space can merge with aggregator(s) */
- H5F_fs_state_t fs_state[H5FD_MEM_NTYPES]; /* State of free space manager for each type */
- haddr_t fs_addr[H5FD_MEM_NTYPES]; /* Address of free space manager info for each type */
- H5FS_t *fs_man[H5FD_MEM_NTYPES]; /* Free space manager for each file space type */
- H5FD_mem_t fs_type_map[H5FD_MEM_NTYPES]; /* Mapping of "real" file space type into tracked type */
- H5F_blk_aggr_t meta_aggr; /* Metadata aggregation info */
- /* (if aggregating metadata allocations) */
- H5F_blk_aggr_t sdata_aggr; /* "Small data" aggregation info */
- /* (if aggregating "small data" allocations) */
+ H5FD_mem_t fs_type_map[H5FD_MEM_NTYPES]; /* Mapping of "real" file space type into tracked type */
+ H5F_blk_aggr_t meta_aggr; /* Metadata aggregation info (if aggregating metadata allocations) */
+ H5F_blk_aggr_t sdata_aggr; /* "Small data" aggregation info (if aggregating "small data" allocations) */
+
+ /* Paged aggregation info */
+ hsize_t fs_page_size; /* File space page size */
+ size_t pgend_meta_thres; /* Do not track page end meta section <= this threshold */
/* Metadata accumulator information */
H5F_meta_accum_t accum; /* Metadata accumulator info */
@@ -361,9 +387,6 @@ H5FL_EXTERN(H5F_t);
/* Declare a free list to manage the H5F_file_t struct */
H5FL_EXTERN(H5F_file_t);
-H5_DLLVAR const H5AC_class_t H5AC_SUPERBLOCK[1];
-H5_DLLVAR const H5AC_class_t H5AC_DRVRINFO[1];
-
/******************************/
/* Package Private Prototypes */
@@ -372,11 +395,12 @@ H5_DLLVAR const H5AC_class_t H5AC_DRVRINFO[1];
/* General routines */
H5F_t *H5F_new(H5F_file_t *shared, unsigned flags, hid_t fcpl_id,
hid_t fapl_id, H5FD_t *lf);
-herr_t H5F_dest(H5F_t *f, hid_t dxpl_id, hbool_t flush);
-H5_DLL herr_t H5F_flush(H5F_t *f, hid_t dxpl_id, hbool_t closing);
-H5_DLL htri_t H5F_is_hdf5(const char *name, hid_t dxpl_id);
+H5_DLL herr_t H5F__dest(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t flush);
+H5_DLL herr_t H5F__flush(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t closing);
+H5_DLL htri_t H5F__is_hdf5(const char *name, hid_t meta_dxpl_id, hid_t raw_dxpl_id);
H5_DLL herr_t H5F_get_objects(const H5F_t *f, unsigned types, size_t max_index, hid_t *obj_id_list, hbool_t app_ref, size_t *obj_id_count_ptr);
-H5_DLL ssize_t H5F_get_file_image(H5F_t *f, void *buf_ptr, size_t buf_len, hid_t dxpl_id);
+H5_DLL ssize_t H5F_get_file_image(H5F_t *f, void *buf_ptr, size_t buf_len,
+ hid_t meta_dxpl_id, hid_t raw_dxpl_id);
H5_DLL herr_t H5F_close(H5F_t *f);
/* File mount related routines */
@@ -386,26 +410,28 @@ H5_DLL herr_t H5F_mount_count_ids(H5F_t *f, unsigned *nopen_files, unsigned *nop
/* Superblock related routines */
H5_DLL herr_t H5F__super_init(H5F_t *f, hid_t dxpl_id);
-H5_DLL herr_t H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read);
+H5_DLL herr_t H5F__super_read(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id,
+ hbool_t initial_read);
H5_DLL herr_t H5F__super_size(H5F_t *f, hid_t dxpl_id, hsize_t *super_size, hsize_t *super_ext_size);
H5_DLL herr_t H5F__super_free(H5F_super_t *sblock);
/* Superblock extension related routines */
H5_DLL herr_t H5F_super_ext_open(H5F_t *f, haddr_t ext_addr, H5O_loc_t *ext_ptr);
-H5_DLL herr_t H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg, hbool_t may_create);
+H5_DLL herr_t H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id,
+ void *mesg, hbool_t may_create, unsigned mesg_flags);
H5_DLL herr_t H5F_super_ext_remove_msg(H5F_t *f, hid_t dxpl_id, unsigned id);
H5_DLL herr_t H5F_super_ext_close(H5F_t *f, H5O_loc_t *ext_ptr, hid_t dxpl_id,
hbool_t was_created);
/* Metadata accumulator routines */
-H5_DLL herr_t H5F__accum_read(const H5F_io_info_t *fio_info, H5FD_mem_t type,
+H5_DLL herr_t H5F__accum_read(const H5F_io_info2_t *fio_info, H5FD_mem_t type,
haddr_t addr, size_t size, void *buf);
-H5_DLL herr_t H5F__accum_write(const H5F_io_info_t *fio_info, H5FD_mem_t type,
+H5_DLL herr_t H5F__accum_write(const H5F_io_info2_t *fio_info, H5FD_mem_t type,
haddr_t addr, size_t size, const void *buf);
-H5_DLL herr_t H5F__accum_free(const H5F_io_info_t *fio_info, H5FD_mem_t type,
+H5_DLL herr_t H5F__accum_free(const H5F_io_info2_t *fio_info, H5FD_mem_t type,
haddr_t addr, hsize_t size);
-H5_DLL herr_t H5F__accum_flush(const H5F_io_info_t *fio_info);
-H5_DLL herr_t H5F__accum_reset(const H5F_io_info_t *fio_info, hbool_t flush);
+H5_DLL herr_t H5F__accum_flush(const H5F_io_info2_t *fio_info);
+H5_DLL herr_t H5F__accum_reset(const H5F_io_info2_t *fio_info, hbool_t flush);
/* Shared file list related routines */
H5_DLL herr_t H5F_sfile_add(H5F_file_t *shared);
@@ -419,9 +445,16 @@ H5_DLL herr_t H5F_efc_release(H5F_efc_t *efc);
H5_DLL herr_t H5F_efc_destroy(H5F_efc_t *efc);
H5_DLL herr_t H5F_efc_try_close(H5F_t *f);
+/* Space allocation routines */
+H5_DLL haddr_t H5F_alloc(H5F_t *f, hid_t dxpl_id, H5F_mem_t type, hsize_t size, haddr_t *frag_addr, hsize_t *frag_size);
+H5_DLL herr_t H5F_free(H5F_t *f, hid_t dxpl_id, H5F_mem_t type, haddr_t addr, hsize_t size);
+H5_DLL htri_t H5F_try_extend(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type,
+ haddr_t blk_end, hsize_t extra_requested);
+
/* Functions that get/retrieve values from VFD layer */
H5_DLL herr_t H5F__set_eoa(const H5F_t *f, H5F_mem_t type, haddr_t addr);
H5_DLL herr_t H5F__set_base_addr(const H5F_t *f, haddr_t addr);
+H5_DLL herr_t H5F__set_paged_aggr(const H5F_t *f, hbool_t paged);
/* Functions that flush or evict */
H5_DLL herr_t H5F__evict_cache_entries(H5F_t *f, hid_t dxpl_id);
diff --git a/src/H5Fprivate.h b/src/H5Fprivate.h
index a6d1c4a..6f68a62 100644
--- a/src/H5Fprivate.h
+++ b/src/H5Fprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -315,14 +313,21 @@
#define H5F_SET_STORE_MSG_CRT_IDX(F, FL) ((F)->shared->store_msg_crt_idx = (FL))
#define H5F_GRP_BTREE_SHARED(F) ((F)->shared->grp_btree_shared)
#define H5F_SET_GRP_BTREE_SHARED(F, RC) (((F)->shared->grp_btree_shared = (RC)) ? SUCCEED : FAIL)
-#define H5F_USE_TMP_SPACE(F) ((F)->shared->use_tmp_space)
-#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_addr_le((F)->shared->tmp_addr, (ADDR)))
+#define H5F_USE_TMP_SPACE(F) ((F)->shared->fs.use_tmp_space)
+#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_addr_le((F)->shared->fs.tmp_addr, (ADDR)))
+#define H5F_SET_LATEST_FLAGS(F, FL) ((F)->shared->latest_flags = (FL))
#ifdef H5_HAVE_PARALLEL
#define H5F_COLL_MD_READ(F) ((F)->coll_md_read)
#endif /* H5_HAVE_PARALLEL */
#define H5F_USE_MDC_LOGGING(F) ((F)->shared->use_mdc_logging)
#define H5F_START_MDC_LOG_ON_ACCESS(F) ((F)->shared->start_mdc_log_on_access)
#define H5F_MDC_LOG_LOCATION(F) ((F)->shared->mdc_log_location)
+#define H5F_ALIGNMENT(F) ((F)->shared->alignment)
+#define H5F_THRESHOLD(F) ((F)->shared->threshold)
+#define H5F_PGEND_META_THRES(F) ((F)->shared->fs.pgend_meta_thres)
+#define H5F_POINT_OF_NO_RETURN(F) ((F)->shared->fs.point_of_no_return)
+#define H5F_FIRST_ALLOC_DEALLOC(F) ((F)->shared->first_alloc_dealloc)
+#define H5F_EOA_PRE_FSM_FSALLOC(F) ((F)->shared->eoa_pre_fsm_fsalloc)
#else /* H5F_MODULE */
#define H5F_INTENT(F) (H5F_get_intent(F))
#define H5F_OPEN_NAME(F) (H5F_get_open_name(F))
@@ -367,12 +372,19 @@
#define H5F_SET_GRP_BTREE_SHARED(F, RC) (H5F_set_grp_btree_shared((F), (RC)))
#define H5F_USE_TMP_SPACE(F) (H5F_use_tmp_space(F))
#define H5F_IS_TMP_ADDR(F, ADDR) (H5F_is_tmp_addr((F), (ADDR)))
+#define H5F_SET_LATEST_FLAGS(F, FL) (H5F_set_latest_flags((F), (FL)))
#ifdef H5_HAVE_PARALLEL
#define H5F_COLL_MD_READ(F) (H5F_coll_md_read(F))
#endif /* H5_HAVE_PARALLEL */
#define H5F_USE_MDC_LOGGING(F) (H5F_use_mdc_logging(F))
#define H5F_START_MDC_LOG_ON_ACCESS(F) (H5F_start_mdc_log_on_access(F))
#define H5F_MDC_LOG_LOCATION(F) (H5F_mdc_log_location(F))
+#define H5F_ALIGNMENT(F) (H5F_get_alignment(F))
+#define H5F_THRESHOLD(F) (H5F_get_threshold(F))
+#define H5F_PGEND_META_THRES(F) (H5F_get_pgend_meta_thres(F))
+#define H5F_POINT_OF_NO_RETURN(F) (H5F_get_point_of_no_return(F))
+#define H5F_FIRST_ALLOC_DEALLOC(F) (H5F_get_first_alloc_dealloc(F))
+#define H5F_EOA_PRE_FSM_FSALLOC(F) (H5F_get_eoa_pre_fsm_fsalloc(F))
#endif /* H5F_MODULE */
@@ -446,7 +458,9 @@
#define H5F_CRT_SHMSG_LIST_MAX_NAME "shmsg_list_max" /* Shared message list maximum size */
#define H5F_CRT_SHMSG_BTREE_MIN_NAME "shmsg_btree_min" /* Shared message B-tree minimum size */
#define H5F_CRT_FILE_SPACE_STRATEGY_NAME "file_space_strategy" /* File space handling strategy */
+#define H5F_CRT_FREE_SPACE_PERSIST_NAME "free_space_persist" /* Free-space persisting status */
#define H5F_CRT_FREE_SPACE_THRESHOLD_NAME "free_space_threshold" /* Free space section threshold */
+#define H5F_CRT_FILE_SPACE_PAGE_SIZE_NAME "file_space_page_size" /* File space page size */
@@ -481,6 +495,10 @@
#define H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME "evict_on_close_flag" /* Whether or not the metadata cache will evict objects on close */
#define H5F_ACS_CORE_WRITE_TRACKING_PAGE_SIZE_NAME "core_write_tracking_page_size" /* The page size in kiB when core VFD write tracking is enabled */
#define H5F_ACS_COLL_MD_WRITE_FLAG_NAME "collective_metadata_write" /* property indicating whether metadata writes are done collectively or not */
+#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME "mdc_initCacheImageCfg" /* Initial metadata cache image creation configuration */
+#define H5F_ACS_PAGE_BUFFER_SIZE_NAME "page_buffer_size" /* the maximum size for the page buffer cache */
+#define H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME "page_buffer_min_meta_perc" /* the min metadata percentage for the page buffer cache */
+#define H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME "page_buffer_min_raw_perc" /* the min raw data percentage for the page buffer cache */
/* ======================== File Mount properties ====================*/
#define H5F_MNT_SYM_LOCAL_NAME "local" /* Whether absolute symlinks local to file. */
@@ -519,14 +537,39 @@
/* See format specification on version 1 B-trees */
/* Default file space handling strategy */
-#define H5F_FILE_SPACE_STRATEGY_DEF H5F_FILE_SPACE_ALL
+#define H5F_FILE_SPACE_STRATEGY_DEF H5F_FSPACE_STRATEGY_FSM_AGGR
+
+/* Default free space section threshold used by free-space managers */
+#define H5F_FREE_SPACE_PERSIST_DEF FALSE
+
/* Default free space section threshold used by free-space managers */
#define H5F_FREE_SPACE_THRESHOLD_DEF 1
+/* For paged aggregation: default file space page size when not set */
+#define H5F_FILE_SPACE_PAGE_SIZE_DEF 4096
+/* For paged aggregation: minimum value for file space page size */
+#define H5F_FILE_SPACE_PAGE_SIZE_MIN 512
+
+/* For paged aggregation: drop free-space with size <= this threshold for small meta section */
+#define H5F_FILE_SPACE_PGEND_META_THRES 10
+
+/* Default for threshold for alignment (can be set via H5Pset_alignment()) */
+#define H5F_ALIGN_DEF 1
+/* Default for alignment (can be set via H5Pset_alignment()) */
+#define H5F_ALIGN_THRHD_DEF 1
+/* Default size for meta data aggregation block (can be set via H5Pset_meta_block_size()) */
+#define H5F_META_BLOCK_SIZE_DEF 2048
+/* Default size for small data aggregation block (can be set via H5Pset_small_data_block_size()) */
+#define H5F_SDATA_BLOCK_SIZE_DEF 2048
+
+/* Check for file using paged aggregation */
+#define H5F_PAGED_AGGR(F) (F->shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE && F->shared->fs_page_size)
+
/* Metadata read attempt values */
#define H5F_METADATA_READ_ATTEMPTS 1 /* Default # of read attempts for non-SWMR access */
#define H5F_SWMR_METADATA_READ_ATTEMPTS 100 /* Default # of read attempts for SWMR access */
+
/* Macros to define signatures of all objects in the file */
/* Size of signature information (on disk) */
@@ -624,18 +667,54 @@ typedef struct H5F_object_flush_t {
void *udata; /* User data */
} H5F_object_flush_t;
-/* I/O Info for an operation */
+/* I/O Info for an operation (old) */
typedef struct H5F_io_info_t {
const H5F_t *f; /* File object */
const struct H5P_genplist_t *dxpl; /* DXPL object */
} H5F_io_info_t;
+/* I/O Info for an operation */
+/* (Migrate toward this one, so that both raw data & metadata DXPLs are available) */
+typedef struct H5F_io_info2_t {
+ const H5F_t *f; /* File object */
+ const struct H5P_genplist_t *meta_dxpl; /* Metadata DXPL object */
+ const struct H5P_genplist_t *raw_dxpl; /* Raw data DXPL object */
+} H5F_io_info2_t;
+
/* Concise info about a block of bytes in a file */
typedef struct H5F_block_t {
haddr_t offset; /* Offset of the block in the file */
hsize_t length; /* Length of the block in the file */
} H5F_block_t;
+/* Enum for free space manager state */
+typedef enum H5F_fs_state_t {
+ H5F_FS_STATE_CLOSED = 0, /* Free space manager is closed */
+ H5F_FS_STATE_OPEN = 1, /* Free space manager has been opened */
+ H5F_FS_STATE_DELETING = 2 /* Free space manager is being deleted */
+} H5F_fs_state_t;
+
+/* For paged aggregation */
+/* The values 0 to 6 is the same as H5F_mem_t */
+typedef enum H5F_mem_page_t {
+ H5F_MEM_PAGE_DEFAULT = 0, /* Not used */
+ H5F_MEM_PAGE_SUPER = 1,
+ H5F_MEM_PAGE_BTREE = 2,
+ H5F_MEM_PAGE_DRAW = 3,
+ H5F_MEM_PAGE_GHEAP = 4,
+ H5F_MEM_PAGE_LHEAP = 5,
+ H5F_MEM_PAGE_OHDR = 6,
+ H5F_MEM_PAGE_LARGE_SUPER = 7,
+ H5F_MEM_PAGE_LARGE_BTREE = 8,
+ H5F_MEM_PAGE_LARGE_DRAW = 9,
+ H5F_MEM_PAGE_LARGE_GHEAP = 10,
+ H5F_MEM_PAGE_LARGE_LHEAP = 11,
+ H5F_MEM_PAGE_LARGE_OHDR = 12,
+ H5F_MEM_PAGE_NTYPES = 13 /* Sentinel value - must be last */
+} H5F_mem_page_t;
+
+#define H5F_MEM_PAGE_META H5F_MEM_PAGE_SUPER /* Small-sized meta data */
+#define H5F_MEM_PAGE_GENERIC H5F_MEM_PAGE_LARGE_SUPER /* Large-sized generic: meta and raw */
/*****************************/
/* Library-private Variables */
@@ -670,6 +749,10 @@ H5_DLL hid_t H5F_get_access_plist(H5F_t *f, hbool_t app_ref);
H5_DLL hid_t H5F_get_id(H5F_t *file, hbool_t app_ref);
H5_DLL herr_t H5F_get_obj_count(const H5F_t *f, unsigned types, hbool_t app_ref, size_t *obj_id_count_ptr);
H5_DLL herr_t H5F_get_obj_ids(const H5F_t *f, unsigned types, size_t max_objs, hid_t *oid_list, hbool_t app_ref, size_t *obj_id_count_ptr);
+H5_DLL hsize_t H5F_get_pgend_meta_thres(const H5F_t *f);
+H5_DLL hbool_t H5F_get_point_of_no_return(const H5F_t *f);
+H5_DLL hbool_t H5F_get_first_alloc_dealloc(const H5F_t *f);
+H5_DLL hbool_t H5F_get_eoa_pre_fsm_fsalloc(const H5F_t *f);
/* Functions than retrieve values set/cached from the superblock/FCPL */
H5_DLL haddr_t H5F_get_base_addr(const H5F_t *f);
@@ -699,6 +782,9 @@ H5_DLL struct H5UC_t *H5F_grp_btree_shared(const H5F_t *f);
H5_DLL herr_t H5F_set_grp_btree_shared(H5F_t *f, struct H5UC_t *rc);
H5_DLL hbool_t H5F_use_tmp_space(const H5F_t *f);
H5_DLL hbool_t H5F_is_tmp_addr(const H5F_t *f, haddr_t addr);
+H5_DLL herr_t H5F_set_latest_flags(H5F_t *f, unsigned flags);
+H5_DLL hsize_t H5F_get_alignment(const H5F_t *f);
+H5_DLL hsize_t H5F_get_threshold(const H5F_t *f);
#ifdef H5_HAVE_PARALLEL
H5_DLL H5P_coll_md_read_flag_t H5F_coll_md_read(const H5F_t *f);
H5_DLL void H5F_set_coll_md_read(H5F_t *f, H5P_coll_md_read_flag_t flag);
@@ -718,7 +804,7 @@ H5_DLL herr_t H5F_get_vfd_handle(const H5F_t *file, hid_t fapl, void **file_hand
H5_DLL hbool_t H5F_is_mount(const H5F_t *file);
H5_DLL hbool_t H5F_has_mount(const H5F_t *file);
H5_DLL herr_t H5F_traverse_mount(struct H5O_loc_t *oloc/*in,out*/);
-H5_DLL herr_t H5F_flush_mounts(H5F_t *f, hid_t dxpl_id);
+H5_DLL herr_t H5F_flush_mounts(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id);
/* Functions that operate on blocks of bytes wrt super block */
H5_DLL herr_t H5F_block_read(const H5F_t *f, H5FD_mem_t type, haddr_t addr,
@@ -755,6 +841,7 @@ H5_DLL herr_t H5F_fake_free(H5F_t *f);
/* Superblock related routines */
H5_DLL herr_t H5F_super_dirty(H5F_t *f);
+H5_DLL herr_t H5F_eoa_dirty(H5F_t *f, hid_t dxpl_id);
/* Parallel I/O (i.e. MPI) related routines */
#ifdef H5_HAVE_PARALLEL
@@ -763,6 +850,7 @@ H5_DLL int H5F_mpi_get_rank(const H5F_t *f);
H5_DLL MPI_Comm H5F_mpi_get_comm(const H5F_t *f);
H5_DLL int H5F_mpi_get_size(const H5F_t *f);
H5_DLL herr_t H5F_mpi_retrieve_comm(hid_t loc_id, hid_t acspl_id, MPI_Comm *mpi_comm);
+H5_DLL herr_t H5F_get_mpi_info(const H5F_t *f, MPI_Info **f_info);
#endif /* H5_HAVE_PARALLEL */
/* External file cache routines */
diff --git a/src/H5Fpublic.h b/src/H5Fpublic.h
index a79da75..1594cb2 100644
--- a/src/H5Fpublic.h
+++ b/src/H5Fpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -182,6 +180,17 @@ typedef enum H5F_libver_t {
} H5F_libver_t;
/* File space handling strategy */
+typedef enum H5F_fspace_strategy_t {
+ H5F_FSPACE_STRATEGY_FSM_AGGR = 0, /* Mechanisms: free-space managers, aggregators, and virtual file drivers */
+ /* This is the library default when not set */
+ H5F_FSPACE_STRATEGY_PAGE = 1, /* Mechanisms: free-space managers with embedded paged aggregation and virtual file drivers */
+ H5F_FSPACE_STRATEGY_AGGR = 2, /* Mechanisms: aggregators and virtual file drivers */
+ H5F_FSPACE_STRATEGY_NONE = 3, /* Mechanisms: virtual file drivers */
+ H5F_FSPACE_STRATEGY_NTYPES /* must be last */
+} H5F_fspace_strategy_t;
+
+/* Deprecated: File space handling strategy for release 1.10.0 */
+/* They are mapped to H5F_fspace_strategy_t as defined above from release 1.10.1 onwards */
typedef enum H5F_file_space_type_t {
H5F_FILE_SPACE_DEFAULT = 0, /* Default (or current) free space strategy setting */
H5F_FILE_SPACE_ALL_PERSIST = 1, /* Persistent free space managers, aggregators, virtual file driver */
@@ -246,12 +255,18 @@ H5_DLL herr_t H5Fstart_swmr_write(hid_t file_id);
H5_DLL ssize_t H5Fget_free_sections(hid_t file_id, H5F_mem_t type,
size_t nsects, H5F_sect_info_t *sect_info/*out*/);
H5_DLL herr_t H5Fclear_elink_file_cache(hid_t file_id);
+H5_DLL herr_t H5Fset_latest_format(hid_t file_id, hbool_t latest_format);
H5_DLL herr_t H5Fstart_mdc_logging(hid_t file_id);
H5_DLL herr_t H5Fstop_mdc_logging(hid_t file_id);
H5_DLL herr_t H5Fget_mdc_logging_status(hid_t file_id,
/*OUT*/ hbool_t *is_enabled,
/*OUT*/ hbool_t *is_currently_logging);
H5_DLL herr_t H5Fformat_convert(hid_t fid);
+H5_DLL herr_t H5Freset_page_buffering_stats(hid_t file_id);
+H5_DLL herr_t H5Fget_page_buffering_stats(hid_t file_id, unsigned accesses[2],
+ unsigned hits[2], unsigned misses[2], unsigned evictions[2], unsigned bypasses[2]);
+H5_DLL herr_t H5Fget_mdc_image_info(hid_t file_id, haddr_t *image_addr, hsize_t *image_size);
+
#ifdef H5_HAVE_PARALLEL
H5_DLL herr_t H5Fset_mpi_atomicity(hid_t file_id, hbool_t flag);
H5_DLL herr_t H5Fget_mpi_atomicity(hid_t file_id, hbool_t *flag);
diff --git a/src/H5Fquery.c b/src/H5Fquery.c
index 14dd655..41cf4d2 100644
--- a/src/H5Fquery.c
+++ b/src/H5Fquery.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -1235,3 +1233,150 @@ H5F_mdc_log_location(const H5F_t *f)
FUNC_LEAVE_NOAPI(f->shared->mdc_log_location)
} /* end H5F_mdc_log_location() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_alignment
+ *
+ * Purpose: Retrieve the 'alignment' for the file.
+ *
+ * Return: Success: Non-negative, the 'alignment'
+ *
+ * Failure: (can't happen)
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+hsize_t
+H5F_get_alignment(const H5F_t *f)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ FUNC_LEAVE_NOAPI(f->shared->alignment)
+} /* end H5F_get_alignment() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_threshold
+ *
+ * Purpose: Retrieve the 'threshold' for alignment in the file.
+ *
+ * Return: Success: Non-negative, the 'threshold'
+ *
+ * Failure: (can't happen)
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+hsize_t
+H5F_get_threshold(const H5F_t *f)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ FUNC_LEAVE_NOAPI(f->shared->threshold)
+} /* end H5F_get_threshold() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_pgend_meta_thres
+ *
+ * Purpose: Retrieve the 'page end meta threshold size' for the file.
+ *
+ * Return: Success: Non-negative, the 'pgend_meta_thres'
+ *
+ * Failure: (can't happen)
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+hsize_t
+H5F_get_pgend_meta_thres(const H5F_t *f)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ FUNC_LEAVE_NOAPI(f->shared->pgend_meta_thres)
+} /* end H5F_get_pgend_meta_thres() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_point_of_no_return
+ *
+ * Purpose: Retrieve the 'point of no return' value for the file.
+ *
+ * Return: Success: Non-negative, the 'point_of_no_return'
+ * Failure: (can't happen)
+ *
+ *-------------------------------------------------------------------------
+ */
+hbool_t
+H5F_get_point_of_no_return(const H5F_t *f)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ FUNC_LEAVE_NOAPI(f->shared->point_of_no_return)
+} /* end H5F_get_point_of_no_return() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_first_alloc_dealloc
+ *
+ * Purpose: Retrieve the 'first alloc / dealloc' value for the file.
+ *
+ * Return: Success: Non-negative, the 'first_alloc_dealloc'
+ * Failure: (can't happen)
+ *
+ *-------------------------------------------------------------------------
+ */
+hbool_t
+H5F_get_first_alloc_dealloc(const H5F_t *f)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ FUNC_LEAVE_NOAPI(f->shared->first_alloc_dealloc)
+} /* end H5F_get_first_alloc_dealloc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_get_eoa_pre_fsm_fsalloc
+ *
+ * Purpose: Retrieve the 'EOA pre-FSM fsalloc' value for the file.
+ *
+ * Return: Success: Non-negative, the 'EOA pre-FSM fsalloc'
+ * Failure: (can't happen)
+ *
+ *-------------------------------------------------------------------------
+ */
+hbool_t
+H5F_get_eoa_pre_fsm_fsalloc(const H5F_t *f)
+{
+ /* Use FUNC_ENTER_NOAPI_NOINIT_NOERR here to avoid performance issues */
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+ HDassert(f->shared);
+
+ FUNC_LEAVE_NOAPI(f->shared->eoa_pre_fsm_fsalloc)
+} /* end H5F_get_eoa_pre_fsm_fsalloc() */
+
diff --git a/src/H5Fsfile.c b/src/H5Fsfile.c
index 4fb9cd9..e0c830b 100644
--- a/src/H5Fsfile.c
+++ b/src/H5Fsfile.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Fmodule.h" /* This source code file is part of the H5F module */
diff --git a/src/H5Fspace.c b/src/H5Fspace.c
new file mode 100644
index 0000000..0962f69
--- /dev/null
+++ b/src/H5Fspace.c
@@ -0,0 +1,224 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Fspace.c
+ * Dec 30 2013
+ * Quincey Koziol <koziol@hdfgroup.org>
+ *
+ * Purpose: Space allocation routines for the file.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#include "H5Fmodule.h" /* This source code file is part of the H5F module */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* File access */
+#include "H5FDprivate.h" /* File drivers */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_alloc
+ *
+ * Purpose: Wrapper for H5FD_alloc, to make certain EOA changes are
+ * reflected in superblock.
+ *
+ * Note: When the metadata cache routines are updated to allow
+ * marking an entry dirty without a H5F_t*, this routine should
+ * be changed to take a H5F_super_t* directly.
+ *
+ * Return: Success: The format address of the new file memory.
+ * Failure: The undefined address HADDR_UNDEF
+ *
+ * Programmer: Quincey Koziol
+ * Monday, December 30, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+haddr_t
+H5F_alloc(H5F_t *f, hid_t dxpl_id, H5F_mem_t type, hsize_t size, haddr_t *frag_addr, hsize_t *frag_size)
+{
+ haddr_t ret_value = 0; /* Return value */
+
+ FUNC_ENTER_NOAPI(HADDR_UNDEF)
+
+ /* check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->lf);
+ HDassert(type >= H5FD_MEM_DEFAULT && type < H5FD_MEM_NTYPES);
+ HDassert(size > 0);
+
+ /* Check whether the file can use temporary addresses */
+ if(f->shared->use_tmp_space) {
+ haddr_t eoa; /* Current EOA for the file */
+
+ /* Get the EOA for the file */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, type)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, HADDR_UNDEF, "Unable to get eoa")
+
+ /* Check for overlapping into file's temporary allocation space */
+ if(H5F_addr_gt((eoa + size), f->shared->tmp_addr))
+ HGOTO_ERROR(H5E_FILE, H5E_BADRANGE, HADDR_UNDEF, "'normal' file space allocation request will overlap into 'temporary' file space")
+ } /* end if */
+
+ /* Call the file driver 'alloc' routine */
+ ret_value = H5FD_alloc(f->shared->lf, dxpl_id, type, f, size, frag_addr, frag_size);
+ if(!H5F_addr_defined(ret_value))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, HADDR_UNDEF, "file driver 'alloc' request failed")
+
+ /* Mark EOA dirty */
+ if(H5F_eoa_dirty(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, HADDR_UNDEF, "unable to mark EOA as dirty")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F_alloc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_free
+ *
+ * Purpose: Wrapper for H5FD_free, to make certain EOA changes are
+ * reflected in superblock.
+ *
+ * Note: When the metadata cache routines are updated to allow
+ * marking an entry dirty without a H5F_t*, this routine should
+ * be changed to take a H5F_super_t* directly.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, December 30, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_free(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, haddr_t addr, hsize_t size)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->lf);
+ HDassert(type >= H5FD_MEM_DEFAULT && type < H5FD_MEM_NTYPES);
+ HDassert(size > 0);
+
+ /* Call the file driver 'free' routine */
+ if(H5FD_free(f->shared->lf, dxpl_id, type, f, addr, size) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFREE, FAIL, "file driver 'free' request failed")
+
+ /* Mark EOA dirty */
+ if(H5F_eoa_dirty(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark EOA as dirty")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F_free() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_try_extend
+ *
+ * Purpose: Extend a block at the end of the file, if possible.
+ *
+ * Note: When the metadata cache routines are updated to allow
+ * marking an entry dirty without a H5F_t*, this routine should
+ * be changed to take a H5F_super_t* directly.
+ *
+ * Return: Success: TRUE(1) - Block was extended
+ * FALSE(0) - Block could not be extended
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * Monday, 30 December, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+htri_t
+H5F_try_extend(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, haddr_t blk_end, hsize_t extra_requested)
+{
+ htri_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->lf);
+ HDassert(type >= H5FD_MEM_DEFAULT && type < H5FD_MEM_NTYPES);
+ HDassert(extra_requested > 0);
+
+ /* Extend the object by extending the underlying file */
+ if((ret_value = H5FD_try_extend(f->shared->lf, type, f, dxpl_id, blk_end, extra_requested)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTEXTEND, FAIL, "driver try extend request failed")
+
+ /* H5FD_try_extend() updates driver message and marks the superblock
+ * dirty, so no need to do it again here.
+ */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F_try_extend() */
+
diff --git a/src/H5Fsuper.c b/src/H5Fsuper.c
index 2a82618..7c70a64 100644
--- a/src/H5Fsuper.c
+++ b/src/H5Fsuper.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -29,6 +27,7 @@
#include "H5Fpkg.h" /* File access */
#include "H5FDprivate.h" /* File drivers */
#include "H5Iprivate.h" /* IDs */
+#include "H5MFprivate.h" /* File memory management */
#include "H5MMprivate.h" /* Memory management */
#include "H5Pprivate.h" /* Property lists */
#include "H5SMprivate.h" /* Shared Object Header Messages */
@@ -53,6 +52,7 @@
/* Local Prototypes */
/********************/
static herr_t H5F_super_ext_create(H5F_t *f, hid_t dxpl_id, H5O_loc_t *ext_ptr);
+static herr_t H5F__update_super_ext_driver_msg(H5F_t *f, hid_t dxpl_id);
/*********************/
@@ -222,6 +222,86 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5F__update_super_ext_driver_msg
+ *
+ * Purpose: Update the superblock extension file driver info message if
+ * we are using a V 2 superblock. Observe that the function
+ * is a NO-OP if the file driver info message does not exist.
+ * This is necessary, as the function is called whenever the
+ * EOA is updated, and were it to create the file driver info
+ * message, it would find itself in an infinite recursion.
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: John Mainzer
+ * 11/10/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F__update_super_ext_driver_msg(H5F_t *f, hid_t dxpl_id)
+{
+ H5F_super_t *sblock; /* Pointer to the super block */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+ sblock = f->shared->sblock;
+ HDassert(sblock);
+ HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK);
+
+ /* Update the driver information message in the superblock extension
+ * if appropriate.
+ */
+ if(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2) {
+ if(H5F_addr_defined(sblock->ext_addr)) {
+ /* Check for ignoring the driver info for this file */
+ if(!H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
+ size_t driver_size; /* Size of driver info block (bytes)*/
+
+ /* Check for driver info */
+ H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
+
+ /* Nothing to do unless there is both driver info and
+ * the driver info superblock extension message has
+ * already been created.
+ */
+ if(driver_size > 0) {
+ H5O_drvinfo_t drvinfo; /* Driver info */
+ uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Driver info block encoding buffer */
+
+ /* Sanity check */
+ HDassert(driver_size <= H5F_MAX_DRVINFOBLOCK_SIZE);
+
+ /* Encode driver-specific data */
+ if(H5FD_sb_encode(f->shared->lf, drvinfo.name, dbuf) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
+
+ /* Write the message to the superblock extension.
+ *
+ * Note that the superblock extension and the
+ * file driver info message must already exist.
+ */
+ drvinfo.len = driver_size;
+ drvinfo.buf = dbuf;
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE, H5O_MSG_NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "unable to update driver info header message")
+ } /* end if driver_size > 0 */
+ } /* end if !H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO) */
+ } /* end if superblock extension exists */
+ } /* end if sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2 */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F__update_super_ext_driver_msg() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5F__super_read
*
* Purpose: Reads the superblock from the file or from the BUF. If
@@ -239,13 +319,14 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
+H5F__super_read(H5F_t *f, hid_t meta_dxpl_id, hid_t raw_dxpl_id, hbool_t initial_read)
{
H5P_genplist_t *dxpl = NULL; /* DXPL object */
H5AC_ring_t ring, orig_ring = H5AC_RING_INV;
H5F_super_t * sblock = NULL; /* Superblock structure */
H5F_superblock_cache_ud_t udata; /* User data for cache callbacks */
H5P_genplist_t *c_plist; /* File creation property list */
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
unsigned sblock_flags = H5AC__NO_FLAGS_SET; /* flags used in superblock unprotect call */
haddr_t super_addr; /* Absolute address of superblock */
haddr_t eof; /* End of file address */
@@ -253,7 +334,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
hbool_t skip_eof_check = FALSE; /* Whether to skip checking the EOF value */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
+ FUNC_ENTER_PACKAGE_TAG(meta_dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
/* initialize the drvinfo to NULL -- we will overwrite this if there
* is a driver information block
@@ -261,13 +342,19 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
f->shared->drvinfo = NULL;
/* Get the DXPL plist object for DXPL ID */
- if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ if(NULL == (dxpl = (H5P_genplist_t *)H5I_object(meta_dxpl_id)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
if((H5P_get(dxpl, H5AC_RING_NAME, &orig_ring)) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get property value");
+ /* Set up file driver I/O info */
+ fdio_info.file = f->shared->lf;
+ fdio_info.meta_dxpl = dxpl;
+ if(NULL == (fdio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(raw_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+
/* Find the superblock */
- if(H5FD_locate_signature(f->shared->lf, dxpl, &super_addr) < 0)
+ if(H5FD_locate_signature(&fdio_info, &super_addr) < 0)
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "unable to locate file signature")
if(HADDR_UNDEF == super_addr)
HGOTO_ERROR(H5E_FILE, H5E_NOTHDF5, FAIL, "file signature not found")
@@ -315,7 +402,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set property value");
/* Look up the superblock */
- if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags)))
+ if(NULL == (sblock = (H5F_super_t *)H5AC_protect(f, meta_dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, &udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load superblock")
if(H5F_INTENT(f) & H5F_ACC_SWMR_WRITE)
@@ -475,7 +562,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed")
/* Look up the driver info block */
- if(NULL == (drvinfo = (H5O_drvinfo_t *)H5AC_protect(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, &drvrinfo_udata, rw_flags)))
+ if(NULL == (drvinfo = (H5O_drvinfo_t *)H5AC_protect(f, meta_dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, &drvrinfo_udata, rw_flags)))
HGOTO_ERROR(H5E_FILE, H5E_CANTPROTECT, FAIL, "unable to load driver info block")
/* Loading the driver info block is enough to set up the right info */
@@ -490,7 +577,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
drvinfo_flags |= H5AC__PIN_ENTRY_FLAG;
/* Release the driver info block */
- if(H5AC_unprotect(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, drvinfo, drvinfo_flags) < 0)
+ if(H5AC_unprotect(f, meta_dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, drvinfo, drvinfo_flags) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to release driver info block")
/* save a pointer to the driver information cache entry */
@@ -531,14 +618,14 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open file's superblock extension")
/* Check for the extension having a 'driver info' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_DRVINFO_ID, dxpl_id)) < 0)
+ if((status = H5O_msg_exists(&ext_loc, H5O_DRVINFO_ID, meta_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
/* Check for ignoring the driver info for this file */
if(!udata.ignore_drvrinfo) {
/* Retrieve the 'driver info' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_DRVINFO_ID, &drvinfo, dxpl_id))
+ if(NULL == H5O_msg_read(&ext_loc, H5O_DRVINFO_ID, &drvinfo, meta_dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "driver info message not present")
/* Validate and decode driver information */
@@ -549,19 +636,22 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
/* Reset driver info message */
H5O_msg_reset(H5O_DRVINFO_ID, &drvinfo);
+
+ HDassert(FALSE == f->shared->drvinfo_sb_msg_exists);
+ f->shared->drvinfo_sb_msg_exists = TRUE;
} /* end else */
} /* end if */
/* Read in the shared OH message information if there is any */
- if(H5SM_get_info(&ext_loc, c_plist, dxpl_id) < 0)
+ if(H5SM_get_info(&ext_loc, c_plist, meta_dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to read SOHM table information")
/* Check for the extension having a 'v1 B-tree "K"' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_BTREEK_ID, dxpl_id)) < 0)
+ if((status = H5O_msg_exists(&ext_loc, H5O_BTREEK_ID, meta_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
/* Retrieve the 'v1 B-tree "K"' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_BTREEK_ID, &btreek, dxpl_id))
+ if(NULL == H5O_msg_read(&ext_loc, H5O_BTREEK_ID, &btreek, meta_dxpl_id))
HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "v1 B-tree 'K' info message not present")
/* Set non-default v1 B-tree 'K' value info from file */
@@ -577,39 +667,133 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
} /* end if */
/* Check for the extension having a 'free-space manager info' message */
- if((status = H5O_msg_exists(&ext_loc, H5O_FSINFO_ID, dxpl_id)) < 0)
+ if((status = H5O_msg_exists(&ext_loc, H5O_FSINFO_ID, meta_dxpl_id)) < 0)
HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
if(status) {
- H5O_fsinfo_t fsinfo; /* Free-space manager info message from superblock extension */
+ H5O_fsinfo_t fsinfo; /* File space info message from superblock extension */
+ uint8_t flags; /* Message flags */
- /* Retrieve the 'free-space manager info' structure */
- if(NULL == H5O_msg_read(&ext_loc, H5O_FSINFO_ID, &fsinfo, dxpl_id))
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get free-space manager info message")
+ /* Get message flags */
+ if(H5O_msg_get_flags(&ext_loc, H5O_FSINFO_ID, meta_dxpl_id, &flags) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to message flags for free-space manager info message")
- /* Check for non-default info */
- if(f->shared->fs_strategy != fsinfo.strategy) {
- f->shared->fs_strategy = fsinfo.strategy;
+ /* If message is NOT marked "unknown"--set up file space info */
+ if(!(flags & H5O_MSG_FLAG_WAS_UNKNOWN)) {
- /* Set non-default strategy in the property list */
- if(H5P_set(c_plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, &fsinfo.strategy) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
- } /* end if */
- if(f->shared->fs_threshold != fsinfo.threshold) {
- f->shared->fs_threshold = fsinfo.threshold;
+ /* Retrieve the 'file space info' structure */
+ if(NULL == H5O_msg_read(&ext_loc, H5O_FSINFO_ID, &fsinfo, meta_dxpl_id))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get free-space manager info message")
- /* Set non-default threshold in the property list */
- if(H5P_set(c_plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, &fsinfo.threshold) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
- } /* end if */
+ /* Update changed values */
+ if(f->shared->fs_strategy != fsinfo.strategy) {
+ f->shared->fs_strategy = fsinfo.strategy;
+
+ /* Set non-default strategy in the property list */
+ if(H5P_set(c_plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, &fsinfo.strategy) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
+ } /* end if */
+ if(f->shared->fs_persist != fsinfo.persist) {
+ f->shared->fs_persist = fsinfo.persist;
+
+ /* Set non-default strategy in the property list */
+ if(H5P_set(c_plist, H5F_CRT_FREE_SPACE_PERSIST_NAME, &fsinfo.persist) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
+ } /* end if */
+ if(f->shared->fs_threshold != fsinfo.threshold) {
+ f->shared->fs_threshold = fsinfo.threshold;
+
+ /* Set non-default threshold in the property list */
+ if(H5P_set(c_plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, &fsinfo.threshold) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space strategy")
+ } /* end if */
+
+ HDassert(f->shared->fs_page_size >= H5F_FILE_SPACE_PAGE_SIZE_MIN);
+ HDassert(fsinfo.page_size >= H5F_FILE_SPACE_PAGE_SIZE_MIN);
+ if(f->shared->fs_page_size != fsinfo.page_size) {
+ f->shared->fs_page_size = fsinfo.page_size;
+
+ /* Set file space page size in the property list */
+ if(H5P_set(c_plist, H5F_CRT_FILE_SPACE_PAGE_SIZE_NAME, &fsinfo.page_size) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set file space page size")
+ } /* end if */
+ if(f->shared->pgend_meta_thres != fsinfo.pgend_meta_thres)
+ /* Initialize page end meta threshold */
+ f->shared->pgend_meta_thres = fsinfo.pgend_meta_thres;
+
+ if(f->shared->eoa_pre_fsm_fsalloc != fsinfo.eoa_pre_fsm_fsalloc)
+ f->shared->eoa_pre_fsm_fsalloc = fsinfo.eoa_pre_fsm_fsalloc;
+
+ /* f->shared->eoa_pre_fsm_fsalloc must always be HADDR_UNDEF
+ * in the absence of persistant free space managers.
+ */
+ HDassert((!f->shared->fs_persist) || (f->shared->eoa_pre_fsm_fsalloc != HADDR_UNDEF));
+ HDassert(!f->shared->first_alloc_dealloc);
+
+ if((f->shared->eoa_pre_fsm_fsalloc != HADDR_UNDEF) &&
+ (H5F_INTENT(f) & H5F_ACC_RDWR))
+ f->shared->first_alloc_dealloc = TRUE;
+
+ f->shared->fs_addr[0] = HADDR_UNDEF;
+ for(u = 1; u < NELMTS(f->shared->fs_addr); u++)
+ f->shared->fs_addr[u] = fsinfo.fs_addr[u - 1];
+
+ if(fsinfo.mapped && (rw_flags & H5AC__READ_ONLY_FLAG) == 0) {
+
+ /* Do the same kluge until we know for sure. VC */
+#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
+ /* KLUGE ALERT!!
+ *
+ * H5F_super_ext_write_msg() expects f->shared->sblock to
+ * be set -- verify that it is NULL, and then set it.
+ * Set it back to NULL when we are done.
+ */
+ HDassert(f->shared->sblock == NULL);
+ f->shared->sblock = sblock;
+#endif /* JRM */
+
+ if(H5F_super_ext_remove_msg(f, meta_dxpl_id, H5O_FSINFO_ID) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDELETE, FAIL, "error in removing message from superblock extension")
+
+ if(H5F_super_ext_write_msg(f, meta_dxpl_id, H5O_FSINFO_ID, &fsinfo, TRUE, H5O_MSG_FLAG_MARK_IF_UNKNOWN) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "error in writing fsinfo message to superblock extension")
+#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
+ f->shared->sblock = NULL;
+#endif /* JRM */
+
+ }
+ } /* end if not marked "unknown" */
+ } /* end if */
+
+ /* Check for the extension having a 'metadata cache image' message */
+ if((status = H5O_msg_exists(&ext_loc, H5O_MDCI_MSG_ID, meta_dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_EXISTS, FAIL, "unable to read object header")
+ if(status) {
+ hbool_t rw = ((rw_flags & H5AC__READ_ONLY_FLAG) == 0);
+ H5O_mdci_t mdci_msg;
+
+ /* if the metadata cache image superblock extension message exists,
+ * read its contents and pass the data on to the metadata cache.
+ * Given this data, the cache will load and decode the metadata
+ * cache image block, decoded it and load its contents into the
+ * the cache on the test protect call.
+ *
+ * Further, if the file is opened R/W, the metadata cache will
+ * delete the metadata cache image superblock extension and free
+ * the cache image block. Don't do this now as f->shared
+ * is not fully setup, which complicates matters.
+ */
- /* Set free-space manager addresses */
- f->shared->fs_addr[0] = HADDR_UNDEF;
- for(u = 1; u < NELMTS(f->shared->fs_addr); u++)
- f->shared->fs_addr[u] = fsinfo.fs_addr[u-1];
+ /* Retrieve the 'metadata cache image message' structure */
+ if(NULL == H5O_msg_read(&ext_loc, H5O_MDCI_MSG_ID, &mdci_msg, meta_dxpl_id))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get metadata cache image message")
+
+ /* Indicate to the cache that there's an image to load on first protect call */
+ if(H5AC_load_cache_image_on_next_protect(f, mdci_msg.addr, mdci_msg.size, rw) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTLOAD, FAIL, "call to H5AC_load_cache_image_on_next_protect failed");
} /* end if */
/* Close superblock extension */
- if(H5F_super_ext_close(f, &ext_loc, dxpl_id, FALSE) < 0)
+ if(H5F_super_ext_close(f, &ext_loc, meta_dxpl_id, FALSE) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEOBJ, FAIL, "unable to close file's superblock extension")
} /* end if */
@@ -653,8 +837,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
HDassert(f->shared->sblock == NULL);
f->shared->sblock = sblock;
#endif /* JRM */
-
- if(H5F_super_ext_write_msg(f, dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE) < 0)
+ if(H5F_super_ext_write_msg(f, meta_dxpl_id, H5O_DRVINFO_ID, &drvinfo, FALSE, H5O_MSG_NO_FLAGS_SET) < 0)
HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
#if 1 /* bug fix test code -- tidy this up if all goes well */ /* JRM */
@@ -666,7 +849,7 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
/* Check for eliminating the driver info block */
else if(H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
/* Remove the driver info message from the superblock extension */
- if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_DRVINFO_ID) < 0)
+ if(H5F_super_ext_remove_msg(f, meta_dxpl_id, H5O_DRVINFO_ID) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTRELEASE, FAIL, "error in removing message from superblock extension")
/* Check if the superblock extension was removed */
@@ -678,13 +861,17 @@ H5F__super_read(H5F_t *f, hid_t dxpl_id, hbool_t initial_read)
/* Set the pointer to the pinned superblock */
f->shared->sblock = sblock;
+ /* Set the page aggregation mode */
+ if(H5F__set_paged_aggr(f, (hbool_t)H5F_PAGED_AGGR(f)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "failed to set paged_aggr status for file driver")
+
done:
/* Reset the ring in the DXPL */
if(H5AC_reset_ring(dxpl, orig_ring) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
/* Release the superblock */
- if(sblock && H5AC_unprotect(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, sblock_flags) < 0)
+ if(sblock && H5AC_unprotect(f, meta_dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, sblock, sblock_flags) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTUNPROTECT, FAIL, "unable to close superblock")
/* If we have failed, make sure no entries are left in the
@@ -697,7 +884,7 @@ done:
HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin driver info")
/* Evict the driver info block from the cache */
- if(H5AC_expunge_entry(f, dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_expunge_entry(f, meta_dxpl_id, H5AC_DRVRINFO, sblock->driver_addr, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge driver info block")
} /* end if */
@@ -708,7 +895,7 @@ done:
HDONE_ERROR(H5E_FILE, H5E_CANTUNPIN, FAIL, "unable to unpin superblock")
/* Evict the superblock from the cache */
- if(H5AC_expunge_entry(f, dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, H5AC__NO_FLAGS_SET) < 0)
+ if(H5AC_expunge_entry(f, meta_dxpl_id, H5AC_SUPERBLOCK, (haddr_t)0, H5AC__NO_FLAGS_SET) < 0)
HDONE_ERROR(H5E_FILE, H5E_CANTEXPUNGE, FAIL, "unable to expunge superblock")
} /* end if */
} /* end if */
@@ -750,6 +937,7 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
H5O_loc_t ext_loc; /* Superblock extension object location */
hbool_t need_ext; /* Whether the superblock extension is needed */
hbool_t ext_created = FALSE; /* Whether the extension has been created */
+ hbool_t non_default_fs_settings = FALSE; /* Whether the file has non-default free-space settings */
herr_t ret_value = SUCCEED; /* Return Value */
FUNC_ENTER_PACKAGE_TAG(dxpl_id, H5AC__SUPERBLOCK_TAG, FAIL)
@@ -776,17 +964,27 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(H5P_get(plist, H5F_CRT_BTREE_RANK_NAME, &sblock->btree_k[0]) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "unable to get rank for btree internal nodes")
+ /* Check for non-default free-space settings */
+ if(!(f->shared->fs_strategy == H5F_FILE_SPACE_STRATEGY_DEF &&
+ f->shared->fs_persist == H5F_FREE_SPACE_PERSIST_DEF &&
+ f->shared->fs_threshold == H5F_FREE_SPACE_THRESHOLD_DEF &&
+ f->shared->fs_page_size == H5F_FILE_SPACE_PAGE_SIZE_DEF))
+ non_default_fs_settings = TRUE;
+
/* Bump superblock version if latest superblock version support is enabled */
if(H5F_USE_LATEST_FLAGS(f, H5F_LATEST_SUPERBLOCK))
super_vers = HDF5_SUPERBLOCK_VERSION_LATEST;
/* Bump superblock version to create superblock extension for SOHM info */
else if(f->shared->sohm_nindexes > 0)
super_vers = HDF5_SUPERBLOCK_VERSION_2;
- /* Bump superblock version to create superblock extension for
- * non-default file space strategy or non-default free-space threshold
+ /*
+ * Bump superblock version to create superblock extension for:
+ * -- non-default file space strategy or
+ * -- non-default persisting free-space or
+ * -- non-default free-space threshold or
+ * -- non-default page_size
*/
- else if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF)
+ else if(non_default_fs_settings)
super_vers = HDF5_SUPERBLOCK_VERSION_2;
/* Check for non-default indexed storage B-tree internal 'K' value
* and set the version # of the superblock to 1 if it is a non-default
@@ -805,6 +1003,9 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "unable to set superblock version")
} /* end if */
+ if(H5FD_set_paged_aggr(f->shared->lf, (hbool_t)H5F_PAGED_AGGR(f)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "failed to set paged_aggr status for file driver")
+
/*
* The superblock starts immediately after the user-defined
* header, which we have already insured is a proper size. The
@@ -816,9 +1017,12 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
/* Sanity check the userblock size vs. the file's allocation alignment */
if(userblock_size > 0) {
- if(userblock_size < f->shared->alignment)
+ /* Set up the alignment to use for page or aggr fs */
+ hsize_t alignment = H5F_PAGED_AGGR(f) ? f->shared->fs_page_size : f->shared->alignment;
+
+ if(userblock_size < alignment)
HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "userblock size must be > file object alignment")
- if(0 != (userblock_size % f->shared->alignment))
+ if(0 != (userblock_size % alignment))
HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "userblock size must be an integral multiple of file object alignment")
} /* end if */
@@ -845,8 +1049,18 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
/* Compute the size of the driver information block */
H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
+
+ /* The following code sets driver_size to the valued needed
+ * for the driver info block, and sets the driver info block
+ * address regardless of the version of the superblock.
+ */
if(driver_size > 0) {
- driver_size += H5F_DRVINFOBLOCK_HDR_SIZE;
+ /* Add in the driver info header, for older superblocks */
+ /* Superblock versions >= 2 will put the driver info in a message
+ * and don't need the header -QAK, 1/4/2017
+ */
+ if(super_vers < HDF5_SUPERBLOCK_VERSION_2)
+ driver_size += H5F_DRVINFOBLOCK_HDR_SIZE;
/*
* The file driver information block begins immediately after the
@@ -864,10 +1078,6 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
if(super_vers < HDF5_SUPERBLOCK_VERSION_2)
superblock_size += driver_size;
- /* Reserve space in the file for the superblock, instead of allocating it */
- if(H5F__set_eoa(f, H5FD_MEM_SUPER, superblock_size) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to set EOA value for superblock")
-
/* Set the ring type in the DXPL */
if(H5AC_set_ring(dxpl_id, H5AC_RING_SB, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set ring value")
@@ -880,6 +1090,10 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
/* Keep a copy of the superblock info */
f->shared->sblock = sblock;
+ /* Allocate space for the superblock */
+ if(HADDR_UNDEF == H5MF_alloc(f, H5FD_MEM_SUPER, dxpl_id, superblock_size))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "file allocation failed for superblock")
+
/* set the drvinfo filed to NULL -- will overwrite this later if needed */
f->shared->drvinfo = NULL;
@@ -893,8 +1107,7 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
need_ext = TRUE;
} /* end if */
/* Files with non-default free space settings always need the superblock extension */
- else if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
+ else if(non_default_fs_settings) {
HDassert(super_vers >= HDF5_SUPERBLOCK_VERSION_2);
need_ext = TRUE;
} /* end if */
@@ -977,21 +1190,29 @@ H5F__super_init(H5F_t *f, hid_t dxpl_id)
info.buf = dbuf;
if(H5O_msg_create(&ext_loc, H5O_DRVINFO_ID, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, &info, dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update driver info header message")
+
+ HDassert(FALSE == f->shared->drvinfo_sb_msg_exists);
+ f->shared->drvinfo_sb_msg_exists = TRUE;
} /* end if */
- /* Check for non-default free space settings */
- if(f->shared->fs_strategy != H5F_FILE_SPACE_STRATEGY_DEF ||
- f->shared->fs_threshold != H5F_FREE_SPACE_THRESHOLD_DEF) {
- H5FD_mem_t type; /* Memory type for iteration */
- H5O_fsinfo_t fsinfo; /* Free space manager info message */
+ /* Check for non-default free-space info settings */
+ if(non_default_fs_settings) {
+ H5F_mem_page_t ptype;
+ H5O_fsinfo_t fsinfo; /* Free space manager info message */
+
+ /* Write free-space manager info message to superblock extension object header if needed */
+ fsinfo.strategy = f->shared->fs_strategy;
+ fsinfo.persist = f->shared->fs_persist;
+ fsinfo.threshold = f->shared->fs_threshold;
+ fsinfo.page_size = f->shared->fs_page_size;
+ fsinfo.pgend_meta_thres = f->shared->pgend_meta_thres;
+ fsinfo.eoa_pre_fsm_fsalloc = HADDR_UNDEF;
+ fsinfo.mapped = FALSE;
- /* Write free-space manager info message to superblock extension object header if needed */
- fsinfo.strategy = f->shared->fs_strategy;
- fsinfo.threshold = f->shared->fs_threshold;
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
- fsinfo.fs_addr[type-1] = HADDR_UNDEF;
+ for(ptype = H5F_MEM_PAGE_SUPER; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ fsinfo.fs_addr[ptype - 1] = HADDR_UNDEF;
- if(H5O_msg_create(&ext_loc, H5O_FSINFO_ID, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, &fsinfo, dxpl_id) < 0)
+ if(H5O_msg_create(&ext_loc, H5O_FSINFO_ID, H5O_MSG_FLAG_DONTSHARE | H5O_MSG_FLAG_MARK_IF_UNKNOWN, H5O_UPDATE_TIME, &fsinfo, dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to update free-space info header message")
} /* end if */
} /* end if */
@@ -1078,20 +1299,20 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5F_super_dirty
+ * Function: H5F_eoa_dirty
*
- * Purpose: Mark the file's superblock dirty
+ * Purpose: Mark the file's EOA info dirty
*
* Return: Success: non-negative on success
* Failure: Negative
*
* Programmer: Quincey Koziol
- * August 14, 2009
+ * January 4, 2017
*
*-------------------------------------------------------------------------
*/
herr_t
-H5F_super_dirty(H5F_t *f)
+H5F_eoa_dirty(H5F_t *f, hid_t dxpl_id)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -1103,16 +1324,56 @@ H5F_super_dirty(H5F_t *f)
HDassert(f->shared->sblock);
/* Mark superblock dirty in cache, so change to EOA will get encoded */
- if(H5AC_mark_entry_dirty(f->shared->sblock) < 0)
+ if(H5F_super_dirty(f) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
- /* if the driver information block exists, mark it dirty as well
+ /* If the driver information block exists, mark it dirty as well
* so that the change in eoa will be reflected there as well if
* appropriate.
*/
- if ( f->shared->drvinfo )
+ if(f->shared->drvinfo) {
if(H5AC_mark_entry_dirty(f->shared->drvinfo) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark drvinfo as dirty")
+ } /* end if */
+ /* If the driver info is stored as a message, update that instead */
+ else if(f->shared->drvinfo_sb_msg_exists) {
+ if(H5F__update_super_ext_driver_msg(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark drvinfo message as dirty")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5F_eoa_dirty() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F_super_dirty
+ *
+ * Purpose: Mark the file's superblock dirty
+ *
+ * Return: Success: non-negative on success
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * August 14, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5F_super_dirty(H5F_t *f)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->sblock);
+
+ /* Mark superblock dirty in cache, so change to EOA will get encoded */
+ if(H5AC_mark_entry_dirty(f->shared->sblock) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTMARKDIRTY, FAIL, "unable to mark superblock as dirty")
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -1229,7 +1490,8 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg, hbool_t may_create)
+H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg,
+ hbool_t may_create, unsigned mesg_flags)
{
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
@@ -1274,7 +1536,7 @@ H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg, hbool_
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "Message should not exist")
/* Create the message with ID in the superblock extension */
- if(H5O_msg_create(&ext_loc, id, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, mesg, dxpl_id) < 0)
+ if(H5O_msg_create(&ext_loc, id, (mesg_flags | H5O_MSG_FLAG_DONTSHARE), H5O_UPDATE_TIME, mesg, dxpl_id) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to create the message in object header")
} /* end if */
else {
@@ -1282,7 +1544,7 @@ H5F_super_ext_write_msg(H5F_t *f, hid_t dxpl_id, unsigned id, void *mesg, hbool_
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "Message should exist")
/* Update the message with ID in the superblock extension */
- if(H5O_msg_write(&ext_loc, id, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, mesg, dxpl_id) < 0)
+ if(H5O_msg_write(&ext_loc, id, (mesg_flags | H5O_MSG_FLAG_DONTSHARE), H5O_UPDATE_TIME, mesg, dxpl_id) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTGET, FAIL, "unable to write the message in object header")
} /* end else */
diff --git a/src/H5Fsuper_cache.c b/src/H5Fsuper_cache.c
index 6cfd9c7..76866db 100644
--- a/src/H5Fsuper_cache.c
+++ b/src/H5Fsuper_cache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -74,9 +72,6 @@ static htri_t H5F__cache_superblock_verify_chksum(const void *image_ptr, size_t
static void *H5F__cache_superblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5F__cache_superblock_image_len(const void *thing, size_t *image_len);
-static herr_t H5F__cache_superblock_pre_serialize(const H5F_t *f,
- hid_t dxpl_id, void *thing, haddr_t addr, size_t len,
- haddr_t *new_addr, size_t *new_len, unsigned *flags);
static herr_t H5F__cache_superblock_serialize(const H5F_t *f, void *image, size_t len,
void *thing);
static herr_t H5F__cache_superblock_free_icr(void *thing);
@@ -91,6 +86,14 @@ static herr_t H5F__cache_drvrinfo_serialize(const H5F_t *f, void *image, size_t
void *thing);
static herr_t H5F__cache_drvrinfo_free_icr(void *thing);
+/* Local encode/decode routines */
+static herr_t H5F__superblock_prefix_decode(H5F_super_t *sblock,
+ const uint8_t **image_ref, const H5F_superblock_cache_ud_t *udata,
+ hbool_t extend_eoa);
+static herr_t H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvinfo, char *drv_name,
+ const uint8_t **image_ref, H5F_drvrinfo_cache_ud_t *udata,
+ hbool_t extend_eoa);
+
/*********************/
/* Package Variables */
@@ -107,7 +110,7 @@ const H5AC_class_t H5AC_SUPERBLOCK[1] = {{
H5F__cache_superblock_verify_chksum, /* 'verify_chksum' callback */
H5F__cache_superblock_deserialize, /* 'deserialize' callback */
H5F__cache_superblock_image_len, /* 'image_len' callback */
- H5F__cache_superblock_pre_serialize,/* 'pre_serialize' callback */
+ NULL, /* 'pre_serialize' callback */
H5F__cache_superblock_serialize, /* 'serialize' callback */
NULL, /* 'notify' callback */
H5F__cache_superblock_free_icr, /* 'free_icr' callback */
@@ -148,6 +151,158 @@ H5FL_EXTERN(H5F_super_t);
/*-------------------------------------------------------------------------
+ * Function: H5F__superblock_prefix_decode
+ *
+ * Purpose: Decode a superblock prefix
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * December 15, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F__superblock_prefix_decode(H5F_super_t *sblock, const uint8_t **image_ref,
+ const H5F_superblock_cache_ud_t *udata, hbool_t extend_eoa)
+{
+ const uint8_t *image = (const uint8_t *)*image_ref; /* Pointer into raw data buffer */
+ htri_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check arguments */
+ HDassert(sblock);
+ HDassert(image_ref);
+ HDassert(image);
+ HDassert(udata);
+ HDassert(udata->f);
+
+ /* Skip over signature (already checked when locating the superblock) */
+ image += H5F_SIGNATURE_LEN;
+
+ /* Superblock version */
+ sblock->super_vers = *image++;
+ if(sblock->super_vers > HDF5_SUPERBLOCK_VERSION_LATEST)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad superblock version number")
+
+ /* Sanity check */
+ HDassert(((size_t)(image - (const uint8_t *)*image_ref)) == H5F_SUPERBLOCK_FIXED_SIZE);
+
+ /* Determine the size of addresses & size of offsets, for computing the
+ * variable-sized portion of the superblock.
+ */
+ if(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) {
+ sblock->sizeof_addr = image[4];
+ sblock->sizeof_size = image[5];
+ } /* end if */
+ else {
+ sblock->sizeof_addr = image[0];
+ sblock->sizeof_size = image[1];
+ } /* end else */
+ if(sblock->sizeof_addr != 2 && sblock->sizeof_addr != 4 &&
+ sblock->sizeof_addr != 8 && sblock->sizeof_addr != 16 && sblock->sizeof_addr != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad byte number in an address")
+ if(sblock->sizeof_size != 2 && sblock->sizeof_size != 4 &&
+ sblock->sizeof_size != 8 && sblock->sizeof_size != 16 && sblock->sizeof_size != 32)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad byte number for object size")
+
+ /* Check for extending the EOA for the file */
+ if(extend_eoa) {
+ size_t variable_size; /* Variable size of superblock */
+
+ /* Determine the size of the variable-length part of the superblock */
+ variable_size = (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(sblock->super_vers, sblock->sizeof_addr, sblock->sizeof_size);
+ HDassert(variable_size > 0);
+
+ /* Make certain we can read the variable-sized portion of the superblock */
+ if(H5F__set_eoa(udata->f, H5FD_MEM_SUPER, (haddr_t)(H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed")
+ } /* end if */
+
+ /* Update the image buffer pointer */
+ *image_ref = image;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F__superblock_prefix_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5F__drvrinfo_prefix_decode
+ *
+ * Purpose: Decode a driver info prefix
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * December 15, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5F__drvrinfo_prefix_decode(H5O_drvinfo_t *drvrinfo, char *drv_name,
+ const uint8_t **image_ref, H5F_drvrinfo_cache_ud_t *udata,
+ hbool_t extend_eoa)
+{
+ const uint8_t *image = (const uint8_t *)*image_ref; /* Pointer into raw data buffer */
+ unsigned drv_vers; /* Version of driver info block */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(drvrinfo);
+ HDassert(image_ref);
+ HDassert(image);
+ HDassert(udata);
+ HDassert(udata->f);
+
+ /* Version number */
+ drv_vers = *image++;
+ if(drv_vers != HDF5_DRIVERINFO_VERSION_0)
+ HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad driver information block version number")
+
+ image += 3; /* reserved bytes */
+
+ /* Driver info size */
+ UINT32DECODE(image, drvrinfo->len);
+
+ /* Driver name and/or version */
+ if(drv_name) {
+ HDmemcpy(drv_name, (const char *)image, (size_t)8);
+ drv_name[8] = '\0';
+ image += 8; /* advance past name/version */
+ } /* end if */
+
+ /* Extend the EOA if required so that we can read the complete driver info block */
+ if(extend_eoa) {
+ haddr_t eoa; /* Current EOA for the file */
+ haddr_t min_eoa; /* Minimum EOA needed for reading the driver info */
+
+ /* Get current EOA... */
+ eoa = H5FD_get_eoa(udata->f->shared->lf, H5FD_MEM_SUPER);
+ if(!H5F_addr_defined(eoa))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+
+ /* ... if it is too small, extend it. */
+ min_eoa = udata->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE + drvrinfo->len;
+
+ /* If it grew, set it */
+ if(H5F_addr_gt(min_eoa, eoa))
+ if(H5FD_set_eoa(udata->f->shared->lf, H5FD_MEM_SUPER, min_eoa) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed")
+ } /* end if */
+
+ /* Update the image buffer pointer */
+ *image_ref = image;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5F__drvrinfo_prefix_decode() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5F__cache_superblock_get_initial_load_size
*
* Purpose: Compute the size of the data structure on disk.
@@ -195,10 +350,7 @@ H5F__cache_superblock_get_final_load_size(const void *_image, size_t image_len,
{
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */
- unsigned super_vers; /* Superblock version */
- uint8_t sizeof_addr; /* Size of offsets in the file (in bytes) */
- uint8_t sizeof_size; /* Size of lengths in the file (in bytes) */
- size_t variable_size; /* Variable size of superblock */
+ H5F_super_t sblock; /* Temporary file superblock */
htri_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -206,56 +358,20 @@ H5F__cache_superblock_get_final_load_size(const void *_image, size_t image_len,
/* Check arguments */
HDassert(image);
HDassert(udata);
- HDassert(udata->f);
HDassert(actual_len);
HDassert(*actual_len == image_len);
-
- /* Skip over file signature */
- image += H5F_SIGNATURE_LEN;
-
- /* Superblock version */
- super_vers = *image++;
- if(super_vers > HDF5_SUPERBLOCK_VERSION_LATEST)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad superblock version number")
-
- /* Save the version to be used in verify_chksum callback */
- udata->super_vers = super_vers;
-
- /* Sanity check */
- HDassert(((size_t)(image - (const uint8_t *)_image)) == H5F_SUPERBLOCK_FIXED_SIZE);
HDassert(image_len >= H5F_SUPERBLOCK_FIXED_SIZE + 6);
- /* Determine the size of addresses & size of offsets, for computing the
- * variable-sized portion of the superblock.
- */
- if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
- sizeof_addr = image[4];
- sizeof_size = image[5];
- } /* end if */
- else {
- sizeof_addr = image[0];
- sizeof_size = image[1];
- } /* end else */
- if(sizeof_addr != 2 && sizeof_addr != 4 &&
- sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad byte number in an address")
- if(sizeof_size != 2 && sizeof_size != 4 &&
- sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad byte number for object size")
-
- /* Determine the size of the variable-length part of the superblock */
- variable_size = (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(super_vers, sizeof_addr, sizeof_size);
- HDassert(variable_size > 0);
+ /* Deserialize the file superblock's prefix */
+ if(H5F__superblock_prefix_decode(&sblock, &image, udata, TRUE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "can't decode file superblock prefix")
- /* Sanity check */
- HDassert(image_len == (H5F_SUPERBLOCK_FIXED_SIZE + H5F_SUPERBLOCK_MINIMAL_VARLEN_SIZE));
-
- /* Make certain we can read the variable-sized portion of the superblock */
- if(H5F__set_eoa(udata->f, H5FD_MEM_SUPER, (haddr_t)(H5F_SUPERBLOCK_FIXED_SIZE + variable_size)) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed")
+ /* Save the version to be used in verify_chksum callback */
+ udata->super_vers = sblock.super_vers;
/* Set the final size for the cache image */
- *actual_len = H5F_SUPERBLOCK_FIXED_SIZE + variable_size;
+ *actual_len = H5F_SUPERBLOCK_FIXED_SIZE +
+ (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(sblock.super_vers, sblock.sizeof_addr, sblock.sizeof_size);
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -325,10 +441,6 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
H5F_super_t *sblock = NULL; /* File's superblock */
H5F_superblock_cache_ud_t *udata = (H5F_superblock_cache_ud_t *)_udata; /* User data */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
- size_t variable_size; /* Variable size of superblock */
- unsigned super_vers; /* Superblock version */
- uint8_t sizeof_addr; /* Size of offsets in the file (in bytes) */
- uint8_t sizeof_size; /* Size of lengths in the file (in bytes) */
H5F_super_t *ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC
@@ -337,54 +449,18 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
HDassert(image);
HDassert(udata);
HDassert(udata->f);
+ HDassert(len >= H5F_SUPERBLOCK_FIXED_SIZE + 6);
/* Allocate space for the superblock */
if(NULL == (sblock = H5FL_CALLOC(H5F_super_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* Skip over signature (already checked when locating the superblock) */
- image += H5F_SIGNATURE_LEN;
-
- /* Superblock version */
- super_vers = *image++;
- if(super_vers > HDF5_SUPERBLOCK_VERSION_LATEST)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad superblock version number")
-
- /* Record the superblock version */
- sblock->super_vers = super_vers;
-
- /* Sanity check */
- HDassert(((size_t)(image - (const uint8_t *)_image)) == H5F_SUPERBLOCK_FIXED_SIZE);
- HDassert(len >= H5F_SUPERBLOCK_FIXED_SIZE + 6);
-
- /* Determine the size of addresses & size of offsets, for computing the
- * variable-sized portion of the superblock.
- */
- if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
- sizeof_addr = image[4];
- sizeof_size = image[5];
- } /* end if */
- else {
- sizeof_addr = image[0];
- sizeof_size = image[1];
- } /* end else */
- if(sizeof_addr != 2 && sizeof_addr != 4 &&
- sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
- if(sizeof_size != 2 && sizeof_size != 4 &&
- sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
- sblock->sizeof_addr = sizeof_addr;
- sblock->sizeof_size = sizeof_size;
-
- /* Determine the size of the variable-length part of the superblock */
- variable_size = (size_t)H5F_SUPERBLOCK_VARLEN_SIZE(super_vers, sizeof_addr, sizeof_size);
- HDassert(variable_size > 0);
-
- HDassert(len == (H5F_SUPERBLOCK_FIXED_SIZE + variable_size));
+ /* Deserialize the file superblock's prefix */
+ if(H5F__superblock_prefix_decode(sblock, &image, udata, FALSE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode file superblock prefix")
/* Check for older version of superblock format */
- if(super_vers < HDF5_SUPERBLOCK_VERSION_2) {
+ if(sblock->super_vers < HDF5_SUPERBLOCK_VERSION_2) {
uint32_t status_flags; /* File status flags */
unsigned sym_leaf_k; /* Symbol table leaf node's 'K' value */
unsigned snode_btree_k; /* B-tree symbol table internal node 'K' value */
@@ -405,21 +481,13 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
if(HDF5_SHAREDHEADER_VERSION != *image++)
HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad shared-header format version number")
- /* Size of file addresses */
- sizeof_addr = *image++;
- if(sizeof_addr != 2 && sizeof_addr != 4 &&
- sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
- sblock->sizeof_addr = sizeof_addr;
- udata->f->shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
-
- /* Size of file sizes */
- sizeof_size = *image++;
- if(sizeof_size != 2 && sizeof_size != 4 &&
- sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
- sblock->sizeof_size = sizeof_size;
- udata->f->shared->sizeof_size = sizeof_size; /* Keep a local copy also */
+ /* Skip over size of file addresses (already decoded) */
+ image++;
+ udata->f->shared->sizeof_addr = sblock->sizeof_addr; /* Keep a local copy also */
+
+ /* Skip over size of file sizes (already decoded) */
+ image++;
+ udata->f->shared->sizeof_size = sblock->sizeof_size; /* Keep a local copy also */
/* Skip over reserved byte */
image++;
@@ -452,11 +520,11 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
* If the superblock version # is greater than 0, read in the indexed
* storage B-tree internal 'K' value
*/
- if(super_vers > HDF5_SUPERBLOCK_VERSION_DEF) {
+ if(sblock->super_vers > HDF5_SUPERBLOCK_VERSION_DEF) {
UINT16DECODE(image, chunk_btree_k);
/* Reserved bytes are present only in version 1 */
- if(super_vers == HDF5_SUPERBLOCK_VERSION_1)
+ if(sblock->super_vers == HDF5_SUPERBLOCK_VERSION_1)
image += 2; /* reserved */
} /* end if */
else
@@ -498,21 +566,13 @@ H5F__cache_superblock_deserialize(const void *_image, size_t len, void *_udata,
else {
uint32_t read_chksum; /* Checksum read from file */
- /* Size of file addresses */
- sizeof_addr = *image++;
- if(sizeof_addr != 2 && sizeof_addr != 4 &&
- sizeof_addr != 8 && sizeof_addr != 16 && sizeof_addr != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number in an address")
- sblock->sizeof_addr = sizeof_addr;
- udata->f->shared->sizeof_addr = sizeof_addr; /* Keep a local copy also */
-
- /* Size of file sizes */
- sizeof_size = *image++;
- if(sizeof_size != 2 && sizeof_size != 4 &&
- sizeof_size != 8 && sizeof_size != 16 && sizeof_size != 32)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad byte number for object size")
- sblock->sizeof_size = sizeof_size;
- udata->f->shared->sizeof_size = sizeof_size; /* Keep a local copy also */
+ /* Skip over size of file addresses (already decoded) */
+ image++;
+ udata->f->shared->sizeof_addr = sblock->sizeof_addr; /* Keep a local copy also */
+
+ /* Skip over size of file sizes (already decoded) */
+ image++;
+ udata->f->shared->sizeof_size = sblock->sizeof_size; /* Keep a local copy also */
/* File status flags (not really used yet) */
sblock->status_flags = *image++;
@@ -588,120 +648,6 @@ H5F__cache_superblock_image_len(const void *_thing, size_t *image_len)
/*-------------------------------------------------------------------------
- * Function: H5FS__cache_hdf_pre_serialize
- *
- * Purpose: The current use of this function is a cludge to repair an
- * oversight in the conversion of the superblock code to use the
- * version 3 cache.
- *
- * In the V2 metadata cache callbacks, the superblock dirver info
- * message was updated in the flush routine. Note that this
- * operation only applies to version 2 or later superblocks.
- *
- * Somehow, this functionality was lost in the conversion to use
- * the V3 cache, causing failures with the multi file driver
- * (and possibly the family file driver as well).
- *
- * Performing this operation is impossible in the current
- * serialize routine, as the dxpl_id is not available. While
- * I am pretty sure that this is not the correct place for this
- * functionality, as I can see it causing problems with both
- * journaling and possibly parallel HDF5 as well, I am placing
- * code for the necessary update in the pre_serialize call for
- * now for testing purposes. We will almost certainly want to
- * change this.
- *
- * Return: Success: SUCCEED
- * Failure: FAIL
- *
- * Programmer: John Mainzer
- * 10/82/14
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-H5F__cache_superblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
- void *_thing, haddr_t H5_ATTR_UNUSED addr, size_t H5_ATTR_UNUSED len,
- haddr_t H5_ATTR_UNUSED *new_addr, size_t H5_ATTR_UNUSED *new_len,
- unsigned H5_ATTR_UNUSED *flags)
-{
- H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
- H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- H5F_super_t *sblock = (H5F_super_t *)_thing; /* Pointer to the super block */
- herr_t ret_value = SUCCEED; /* Return value */
-
- FUNC_ENTER_NOAPI_NOINIT
-
- /* Sanity check */
- HDassert(f);
- HDassert(sblock);
- HDassert(sblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert(sblock->cache_info.type == H5AC_SUPERBLOCK);
- HDassert(flags);
-
- if(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2) {
- /* WARNING: This code almost certainly doesn't belong here. Must
- * discuss with Quincey where to put it. Note issues
- * for journaling and possibly parallel.
- *
- * -- JRM
- */
- /* Update the driver information message in the superblock extension
- * if appropriate.
- */
- if(H5F_addr_defined(sblock->ext_addr)) {
- size_t driver_size; /* Size of driver info block (bytes)*/
- H5O_loc_t ext_loc; /* "Object location" for superblock extension */
-
- HDassert(sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2);
-
- /* Open the superblock extension's object header */
- if(H5F_super_ext_open((H5F_t *)f, sblock->ext_addr, &ext_loc) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTOPENOBJ, FAIL, "unable to open file's superblock extension")
-
- /* Check for ignoring the driver info for this file */
- if(!H5F_HAS_FEATURE(f, H5FD_FEAT_IGNORE_DRVRINFO)) {
- /* Check for driver info message */
- H5_CHECKED_ASSIGN(driver_size, size_t, H5FD_sb_size(f->shared->lf), hsize_t);
- if(driver_size > 0) {
- H5O_drvinfo_t drvinfo; /* Driver info */
- uint8_t dbuf[H5F_MAX_DRVINFOBLOCK_SIZE]; /* Driver info block encoding buffer */
-
- /* Sanity check */
- HDassert(driver_size <= H5F_MAX_DRVINFOBLOCK_SIZE);
-
- /* Encode driver-specific data */
- if(H5FD_sb_encode(f->shared->lf, drvinfo.name, dbuf) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "unable to encode driver information")
-
- /* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_SBE, &dxpl, &orig_ring) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set ring value")
-
- /* Write driver info information to the superblock extension */
- drvinfo.len = driver_size;
- drvinfo.buf = dbuf;
- if(H5O_msg_write(&ext_loc, H5O_DRVINFO_ID, H5O_MSG_FLAG_DONTSHARE, H5O_UPDATE_TIME, &drvinfo, dxpl_id) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_WRITEERROR, FAIL, "unable to update driver info header message")
- } /* end if */
- } /* end if */
-
- /* Close the superblock extension object header */
- if(H5F_super_ext_close((H5F_t *)f, &ext_loc, dxpl_id, FALSE) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTCLOSEOBJ, FAIL, "unable to close file's superblock extension")
- } /* end if */
- } /* end if */
-
-done:
- /* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_FILE, H5E_CANTSET, FAIL, "unable to set property value")
-
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5FS_cache_superblock_pre_serialize() */
-
-
-/*-------------------------------------------------------------------------
* Function: H5F__cache_superblock_serialize
*
* Purpose: Flushes a dirty object to disk.
@@ -927,50 +873,24 @@ H5F__cache_drvrinfo_get_final_load_size(const void *_image, size_t image_len,
{
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
H5F_drvrinfo_cache_ud_t *udata = (H5F_drvrinfo_cache_ud_t *)_udata; /* User data */
- unsigned drv_vers; /* Version of driver info block */
- size_t drvinfo_len; /* Length of encoded buffer */
- haddr_t eoa; /* Current EOA for the file */
- haddr_t min_eoa; /* Minimum EOA needed for reading the driver info */
- htri_t ret_value = SUCCEED; /* Return value */
+ H5O_drvinfo_t drvrinfo; /* Driver info */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Check arguments */
HDassert(image);
HDassert(udata);
- HDassert(udata->f);
HDassert(actual_len);
HDassert(*actual_len == image_len);
-
- /* Version number */
- drv_vers = *image++;
- if(drv_vers != HDF5_DRIVERINFO_VERSION_0)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, FAIL, "bad driver information block version number")
-
- image += 3; /* reserved bytes */
-
- /* Driver info size */
- UINT32DECODE(image, drvinfo_len);
-
- /* Sanity check */
HDassert(image_len == H5F_DRVINFOBLOCK_HDR_SIZE);
- /* Extend the EOA if required so that we can read the complete driver info block */
-
- /* Get current EOA... */
- if((eoa = H5FD_get_eoa(udata->f->shared->lf, H5FD_MEM_SUPER)) == HADDR_UNDEF)
- HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
-
- /* ... if it is too small, extend it. */
- min_eoa = udata->driver_addr + H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo_len;
-
- /* If it grew, set it */
- if(H5F_addr_gt(min_eoa, eoa))
- if(H5FD_set_eoa(udata->f->shared->lf, H5FD_MEM_SUPER, min_eoa) < 0)
- HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "set end of space allocation request failed")
+ /* Deserialize the file driver info's prefix */
+ if(H5F__drvrinfo_prefix_decode(&drvrinfo, NULL, &image, udata, TRUE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, FAIL, "can't decode file driver info prefix")
/* Set the final size for the cache image */
- *actual_len = H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo_len;
+ *actual_len = H5F_DRVINFOBLOCK_HDR_SIZE + drvrinfo.len;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -982,7 +902,7 @@ done:
*
* Purpose: Loads an object from the disk.
*
- * Return: Success: Pointer to a new B-tree.
+ * Return: Success: Pointer to a new driver info struct
* Failure: NULL
*
* Programmer: Quincey Koziol
@@ -999,7 +919,6 @@ H5F__cache_drvrinfo_deserialize(const void *_image, size_t len, void *_udata,
H5F_drvrinfo_cache_ud_t *udata = (H5F_drvrinfo_cache_ud_t *)_udata; /* User data */
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
char drv_name[9]; /* Name of driver */
- unsigned drv_vers; /* Version of driver info block */
H5O_drvinfo_t *ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC
@@ -1014,21 +933,11 @@ H5F__cache_drvrinfo_deserialize(const void *_image, size_t len, void *_udata,
if(NULL == (drvinfo = (H5O_drvinfo_t *)H5MM_calloc(sizeof(H5O_drvinfo_t))))
HGOTO_ERROR(H5E_FILE, H5E_CANTALLOC, NULL, "memory allocation failed for driver info message")
- /* Version number */
- drv_vers = *image++;
- if(drv_vers != HDF5_DRIVERINFO_VERSION_0)
- HGOTO_ERROR(H5E_FILE, H5E_BADVALUE, NULL, "bad driver information block version number")
-
- image += 3; /* reserved bytes */
-
- /* Driver info size */
- UINT32DECODE(image, drvinfo->len);
-
- /* Driver name and/or version */
- HDstrncpy(drv_name, (const char *)image, (size_t)8);
- drv_name[8] = '\0';
- image += 8; /* advance past name/version */
+ /* Deserialize the file driver info's prefix */
+ if(H5F__drvrinfo_prefix_decode(drvinfo, drv_name, &image, udata, FALSE) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTDECODE, NULL, "can't decode file driver info prefix")
+ /* Sanity check */
HDassert(len == (H5F_DRVINFOBLOCK_HDR_SIZE + drvinfo->len));
/* Validate and decode driver information */
diff --git a/src/H5Ftest.c b/src/H5Ftest.c
index e3760b7..dd69b1e 100644
--- a/src/H5Ftest.c
+++ b/src/H5Ftest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5G.c b/src/H5G.c
index 1a18dc0..10eb5ed 100644
--- a/src/H5G.c
+++ b/src/H5G.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gbtree2.c b/src/H5Gbtree2.c
index ff7e200..71d15e5 100644
--- a/src/H5Gbtree2.c
+++ b/src/H5Gbtree2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gcache.c b/src/H5Gcache.c
index d153560..65115a5 100644
--- a/src/H5Gcache.c
+++ b/src/H5Gcache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gcompact.c b/src/H5Gcompact.c
index 41ca4f5..d5698a8 100644
--- a/src/H5Gcompact.c
+++ b/src/H5Gcompact.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gdense.c b/src/H5Gdense.c
index e8fa237..4ae6800 100644
--- a/src/H5Gdense.c
+++ b/src/H5Gdense.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gdeprec.c b/src/H5Gdeprec.c
index a775c58..228fb9c 100644
--- a/src/H5Gdeprec.c
+++ b/src/H5Gdeprec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gent.c b/src/H5Gent.c
index f64eaf0..b781fae 100644
--- a/src/H5Gent.c
+++ b/src/H5Gent.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Gint.c b/src/H5Gint.c
index 72cc1a4..6d33716 100644
--- a/src/H5Gint.c
+++ b/src/H5Gint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Glink.c b/src/H5Glink.c
index 5a195c8..d246ee7 100644
--- a/src/H5Glink.c
+++ b/src/H5Glink.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gloc.c b/src/H5Gloc.c
index 3fcbd09..c011cdf 100644
--- a/src/H5Gloc.c
+++ b/src/H5Gloc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gmodule.h b/src/H5Gmodule.h
index 52ac067..19ea982 100644
--- a/src/H5Gmodule.h
+++ b/src/H5Gmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Gname.c b/src/H5Gname.c
index aff9d31..7b53668 100644
--- a/src/H5Gname.c
+++ b/src/H5Gname.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gnode.c b/src/H5Gnode.c
index 89d1df8..20924ee 100644
--- a/src/H5Gnode.c
+++ b/src/H5Gnode.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gobj.c b/src/H5Gobj.c
index 92ad0af..d2dc83b 100644
--- a/src/H5Gobj.c
+++ b/src/H5Gobj.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Goh.c b/src/H5Goh.c
index bde540c..977fc2d 100644
--- a/src/H5Goh.c
+++ b/src/H5Goh.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5Gpkg.h b/src/H5Gpkg.h
index c994da3..76bf08b 100644
--- a/src/H5Gpkg.h
+++ b/src/H5Gpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -312,9 +310,6 @@ typedef struct H5G_copy_file_ud_t {
*/
H5_DLLVAR H5B_class_t H5B_SNODE[1];
-/* The cache subclass */
-H5_DLLVAR const H5AC_class_t H5AC_SNODE[1];
-
/* The v2 B-tree class for indexing 'name' field on links */
H5_DLLVAR const H5B2_class_t H5G_BT2_NAME[1];
diff --git a/src/H5Gprivate.h b/src/H5Gprivate.h
index 2ef99fd..c48e687 100644
--- a/src/H5Gprivate.h
+++ b/src/H5Gprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gpublic.h b/src/H5Gpublic.h
index 9d8ade5..ab6f200 100644
--- a/src/H5Gpublic.h
+++ b/src/H5Gpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Groot.c b/src/H5Groot.c
index ba25f31..fbc4f1a 100644
--- a/src/H5Groot.c
+++ b/src/H5Groot.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Gstab.c b/src/H5Gstab.c
index ed3dd84..a239cfe 100644
--- a/src/H5Gstab.c
+++ b/src/H5Gstab.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/src/H5Gtest.c b/src/H5Gtest.c
index 0e0a897..7271cdc 100644
--- a/src/H5Gtest.c
+++ b/src/H5Gtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5Gtraverse.c b/src/H5Gtraverse.c
index d6c90e4..ff1206e 100644
--- a/src/H5Gtraverse.c
+++ b/src/H5Gtraverse.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HF.c b/src/H5HF.c
index efd57ae..6c09969 100644
--- a/src/H5HF.c
+++ b/src/H5HF.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFbtree2.c b/src/H5HFbtree2.c
index d4a30b8..2d35368 100644
--- a/src/H5HFbtree2.c
+++ b/src/H5HFbtree2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFcache.c b/src/H5HFcache.c
index 04f1459..f957e2e 100644
--- a/src/H5HFcache.c
+++ b/src/H5HFcache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -69,6 +67,7 @@
/********************/
/* Local encode/decode routines */
+static herr_t H5HF__hdr_prefix_decode(H5HF_hdr_t *hdr, const uint8_t **image_ref);
static herr_t H5HF__dtable_encode(H5F_t *f, uint8_t **pp, const H5HF_dtable_t *dtable);
static herr_t H5HF__dtable_decode(H5F_t *f, const uint8_t **pp, H5HF_dtable_t *dtable);
@@ -80,7 +79,7 @@ static htri_t H5HF__cache_hdr_verify_chksum(const void *image_ptr, size_t len, v
static void *H5HF__cache_hdr_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HF__cache_hdr_image_len(const void *thing, size_t *image_len);
-static herr_t H5HF__cache_hdr_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+static herr_t H5HF__cache_hdr_pre_serialize(H5F_t *f, hid_t dxpl_id,
void *thing, haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len,
unsigned *flags);
static herr_t H5HF__cache_hdr_serialize(const H5F_t *f, void *image,
@@ -92,12 +91,12 @@ static htri_t H5HF__cache_iblock_verify_chksum(const void *image_ptr, size_t len
static void *H5HF__cache_iblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HF__cache_iblock_image_len(const void *thing, size_t *image_len);
-static herr_t H5HF__cache_iblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+static herr_t H5HF__cache_iblock_pre_serialize(H5F_t *f, hid_t dxpl_id,
void *thing, haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len,
unsigned *flags);
static herr_t H5HF__cache_iblock_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
-static herr_t H5HF__cache_iblock_notify(H5C_notify_action_t action, void *thing);
+static herr_t H5HF__cache_iblock_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5HF__cache_iblock_free_icr(void *thing);
static herr_t H5HF__cache_dblock_get_initial_load_size(void *udata, size_t *image_len);
@@ -105,24 +104,27 @@ static htri_t H5HF__cache_dblock_verify_chksum(const void *image_ptr, size_t len
static void *H5HF__cache_dblock_deserialize(const void *image, size_t len,
void *udata, hbool_t *dirty);
static herr_t H5HF__cache_dblock_image_len(const void *thing, size_t *image_len);
-static herr_t H5HF__cache_dblock_pre_serialize(const H5F_t *f, hid_t dxpl_id,
+static herr_t H5HF__cache_dblock_pre_serialize(H5F_t *f, hid_t dxpl_id,
void *thing, haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len,
unsigned *flags);
static herr_t H5HF__cache_dblock_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
-static herr_t H5HF__cache_dblock_notify(H5C_notify_action_t action, void *thing);
+static herr_t H5HF__cache_dblock_notify(H5AC_notify_action_t action, void *thing);
static herr_t H5HF__cache_dblock_free_icr(void *thing);
/* Debugging Function Prototypes */
#ifndef NDEBUG
-static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
- hbool_t *clean);
-static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
- H5HF_indirect_t *iblock, unsigned *iblock_status, hbool_t *clean);
-static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
- H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_dblocks);
-static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f,
- H5HF_indirect_t *iblock, hbool_t *clean, hbool_t *has_iblocks);
+static herr_t H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
+ H5HF_hdr_t *hdr, hbool_t *fd_clean, hbool_t *clean);
+static herr_t H5HF__cache_verify_iblock_descendants_clean(H5F_t *f,
+ hid_t dxpl_id, haddr_t fd_parent_addr, H5HF_indirect_t *iblock,
+ unsigned *iblock_status, hbool_t *fd_clean, hbool_t *clean);
+static herr_t H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f,
+ haddr_t fd_parent_addr, H5HF_indirect_t *iblock, hbool_t *fd_clean,
+ hbool_t *clean, hbool_t *has_dblocks);
+static herr_t H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f,
+ hid_t dxpl_id, haddr_t fd_parent_addr, H5HF_indirect_t *iblock,
+ hbool_t *fd_clean, hbool_t *clean, hbool_t *has_iblocks);
#endif /* NDEBUG */
@@ -200,6 +202,52 @@ H5FL_BLK_DEFINE(direct_block);
/*-------------------------------------------------------------------------
+ * Function: H5HF__hdr_prefix_decode()
+ *
+ * Purpose: Decode a fractal heap header's prefix
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * December 15, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HF__hdr_prefix_decode(H5HF_hdr_t *hdr, const uint8_t **image_ref)
+{
+ const uint8_t *image = *image_ref; /* Pointer into into supplied image */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(hdr);
+ HDassert(image);
+
+ /* Magic number */
+ if(HDmemcmp(image, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "wrong fractal heap header signature")
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version */
+ if(*image++ != H5HF_HDR_VERSION)
+ HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong fractal heap header version")
+
+ /* General heap information */
+ UINT16DECODE(image, hdr->id_len); /* Heap ID length */
+ UINT16DECODE(image, hdr->filter_len); /* I/O filters' encoded length */
+
+ /* Update the image buffer pointer */
+ *image_ref = image;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HF__hdr_prefix_decode() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HF__dtable_decode
*
* Purpose: Decodes the metadata for a doubling table
@@ -360,10 +408,10 @@ static herr_t
H5HF__cache_hdr_get_final_load_size(const void *_image, size_t image_len,
void *_udata, size_t *actual_len)
{
- const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
- H5HF_hdr_cache_ud_t *udata = (H5HF_hdr_cache_ud_t *)_udata; /* pointer to user data */
- unsigned filter_len; /* Size of I/O filter information (in bytes) */
- htri_t ret_value = SUCCEED; /* Return value */
+ H5HF_hdr_t hdr; /* Temporary fractal heap header */
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into into supplied image */
+ H5HF_hdr_cache_ud_t *udata = (H5HF_hdr_cache_ud_t *)_udata; /* User data for callback */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -373,25 +421,16 @@ H5HF__cache_hdr_get_final_load_size(const void *_image, size_t image_len,
HDassert(actual_len);
HDassert(*actual_len == image_len);
- /* Magic number */
- if(HDmemcmp(image, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "wrong fractal heap header signature")
- image += H5_SIZEOF_MAGIC;
-
- /* Version */
- if(*image++ != H5HF_HDR_VERSION)
- HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong fractal heap header version")
-
- /* General heap information */
- image += 2; /* Heap ID length */
- UINT16DECODE(image, filter_len); /* I/O filters' encoded length */
+ /* Deserialize the fractal heap header's prefix */
+ if(H5HF__hdr_prefix_decode(&hdr, &image) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, FAIL, "can't decode fractal heap header prefix")
/* Check for I/O filter info on this heap */
- if(filter_len > 0)
+ if(hdr.filter_len > 0)
/* Compute the extra heap header size */
*actual_len += (size_t)(H5F_SIZEOF_SIZE(udata->f) /* Size of size for filtered root direct block */
+ (unsigned)4 /* Size of filter mask for filtered root direct block */
- + filter_len); /* Size of encoded I/O filter info */
+ + hdr.filter_len); /* Size of encoded I/O filter info */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -473,18 +512,9 @@ H5HF__cache_hdr_deserialize(const void *_image, size_t len, void *_udata,
if(NULL == (hdr = H5HF_hdr_alloc(udata->f)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* Magic number */
- if(HDmemcmp(image, H5HF_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong fractal heap header signature")
- image += H5_SIZEOF_MAGIC;
-
- /* Version */
- if(*image++ != H5HF_HDR_VERSION)
- HGOTO_ERROR(H5E_HEAP, H5E_VERSION, NULL, "wrong fractal heap header version")
-
- /* General heap information */
- UINT16DECODE(image, hdr->id_len); /* Heap ID length */
- UINT16DECODE(image, hdr->filter_len); /* I/O filters' encoded length */
+ /* Deserialize the fractal heap header's prefix */
+ if(H5HF__hdr_prefix_decode(hdr, &image) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, NULL, "can't decode fractal heap header prefix")
/* Heap status flags */
/* (bit 0: "huge" object IDs have wrapped) */
@@ -635,7 +665,7 @@ H5HF__cache_hdr_image_len(const void *_thing, size_t *image_len)
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF__cache_hdr_pre_serialize(const H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id,
+H5HF__cache_hdr_pre_serialize(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id,
void *_thing, haddr_t addr, size_t len, haddr_t H5_ATTR_UNUSED *new_addr,
size_t H5_ATTR_UNUSED *new_len, unsigned *flags)
{
@@ -658,6 +688,7 @@ H5HF__cache_hdr_pre_serialize(const H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id,
#ifndef NDEBUG
{
hbool_t descendants_clean = TRUE;
+ hbool_t fd_children_clean = TRUE;
/* Verify that flush dependencies are working correctly. Do this
* by verifying that either:
@@ -672,10 +703,22 @@ H5HF__cache_hdr_pre_serialize(const H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id,
* constraint is met by default.
*
* Do this with a call to H5HF__cache_verify_hdr_descendants_clean().
+ *
+ * Note that decendants need not be clean if the pre_serialize call
+ * is made during a cache serialization instead of an entry or cache
+ * flush.
+ *
+ * Note also that with the recent change in the definition of flush
+ * dependency, not all decendants need be clean -- only direct flush
+ * dependency children.
+ *
+ * Finally, observe that the H5HF__cache_verify_hdr_descendants_clean()
+ * call still looks for dirty descendants. At present we do not check
+ * this value.
*/
- if(H5HF__cache_verify_hdr_descendants_clean((H5F_t *)f, hdr, &descendants_clean) < 0)
+ if(H5HF__cache_verify_hdr_descendants_clean((H5F_t *)f, dxpl_id, hdr, &fd_children_clean, &descendants_clean) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify hdr descendants clean.")
- HDassert(descendants_clean);
+ HDassert(fd_children_clean);
}
#endif /* NDEBUG */
@@ -1147,8 +1190,9 @@ H5HF__cache_iblock_image_len(const void *_thing, size_t *image_len)
* and if so, to move it to real file space before the entry is
* serialized.
*
- * In debug compiles, this function also verifies that all children
- * of this indirect block are either clean or are not in cache.
+ * In debug compiles, this function also verifies that all
+ * immediate flush dependency children of this indirect block
+ * are either clean or are not in cache.
*
* Return: Success: SUCCEED
* Failure: FAIL
@@ -1159,7 +1203,7 @@ H5HF__cache_iblock_image_len(const void *_thing, size_t *image_len)
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF__cache_iblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+H5HF__cache_iblock_pre_serialize(H5F_t *f, hid_t dxpl_id, void *_thing,
haddr_t addr, size_t H5_ATTR_UNUSED len, haddr_t *new_addr,
size_t H5_ATTR_UNUSED *new_len, unsigned *flags)
{
@@ -1188,10 +1232,12 @@ H5HF__cache_iblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
#ifndef NDEBUG
{
hbool_t descendants_clean = TRUE;
+ hbool_t fd_children_clean = TRUE;
unsigned iblock_status = 0;
/* verify that flush dependencies are working correctly. Do this
- * by verifying that all children of this iblock are clean.
+ * by verifying that all immediate flush dependency children of this
+ * iblock are clean.
*/
if(H5AC_get_entry_status(f, iblock->addr, &iblock_status) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
@@ -1201,9 +1247,9 @@ H5HF__cache_iblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
* there is no need to check to see if it is pinned or protected, or to
* protect it if it is not.
*/
- if(H5HF__cache_verify_iblock_descendants_clean((H5F_t *)f, iblock, &iblock_status, &descendants_clean) < 0)
+ if(H5HF__cache_verify_iblock_descendants_clean((H5F_t *)f, dxpl_id, iblock->addr, iblock, &iblock_status, &fd_children_clean, &descendants_clean) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify descendants clean.")
- HDassert(descendants_clean);
+ HDassert(fd_children_clean);
}
#endif /* NDEBUG */
@@ -1253,7 +1299,7 @@ H5HF__cache_iblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
} /* end if */
*new_addr = iblock_addr;
- *flags = H5C__SERIALIZE_MOVED_FLAG;
+ *flags = H5AC__SERIALIZE_MOVED_FLAG;
} /* end if */
else
*flags = 0;
@@ -1400,7 +1446,7 @@ H5HF__cache_iblock_serialize(const H5F_t *f, void *_image, size_t len,
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF__cache_iblock_notify(H5C_notify_action_t action, void *_thing)
+H5HF__cache_iblock_notify(H5AC_notify_action_t action, void *_thing)
{
H5HF_indirect_t *iblock = (H5HF_indirect_t *)_thing; /* Indirect block info */
herr_t ret_value = SUCCEED; /* Return value */
@@ -1463,6 +1509,8 @@ H5HF__cache_iblock_notify(H5C_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -1618,10 +1666,6 @@ H5HF__cache_dblock_verify_chksum(const void *_image, size_t len, void *_udata)
hdr = par_info->hdr;
HDassert(hdr);
- /* Reset callback context info */
- udata->decompressed = FALSE;
- udata->dblk = NULL;
-
/* Get out if data block is not checksummed */
if(!(hdr->checksum_dblocks))
HGOTO_DONE(TRUE);
@@ -1728,7 +1772,7 @@ H5HF__cache_dblock_deserialize(const void *_image, size_t len, void *_udata,
H5HF_dblock_cache_ud_t *udata = (H5HF_dblock_cache_ud_t *)_udata; /* User data for callback */
H5HF_parent_t *par_info; /* Pointer to parent information */
H5HF_direct_t *dblock = NULL; /* Direct block info */
- const uint8_t *image = _image;/* Pointer into raw data buffer */
+ const uint8_t *image = (const uint8_t *)_image;/* Pointer into raw data buffer */
void *read_buf = NULL; /* Pointer to buffer to decompress */
haddr_t heap_addr; /* Address of heap header in the file */
void * ret_value = NULL; /* Return value */
@@ -2028,7 +2072,7 @@ H5HF__cache_dblock_image_len(const void *_thing, size_t *image_len)
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF__cache_dblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
+H5HF__cache_dblock_pre_serialize(H5F_t *f, hid_t dxpl_id, void *_thing,
haddr_t addr, size_t len, haddr_t *new_addr, size_t *new_len, unsigned *flags)
{
hbool_t at_tmp_addr; /* Flag to indicate direct block is */
@@ -2345,12 +2389,12 @@ H5HF__cache_dblock_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *_thing,
/* finally, pass data back to the metadata cache as appropriate */
if(!H5F_addr_eq(addr, dblock_addr)) {
- dblock_flags |= H5C__SERIALIZE_MOVED_FLAG;
+ dblock_flags |= H5AC__SERIALIZE_MOVED_FLAG;
*new_addr = dblock_addr;
} /* end if */
if((hdr->filter_len > 0) && (len != write_size)) {
- dblock_flags |= H5C__SERIALIZE_RESIZED_FLAG;
+ dblock_flags |= H5AC__SERIALIZE_RESIZED_FLAG;
*new_len = write_size;
} /* end if */
@@ -2444,7 +2488,7 @@ H5HF__cache_dblock_serialize(const H5F_t *f, void *image, size_t len,
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF__cache_dblock_notify(H5C_notify_action_t action, void *_thing)
+H5HF__cache_dblock_notify(H5AC_notify_action_t action, void *_thing)
{
H5HF_direct_t *dblock = (H5HF_direct_t *)_thing; /* Fractal heap direct block */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2480,6 +2524,8 @@ H5HF__cache_dblock_notify(H5C_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -2556,6 +2602,54 @@ done:
* instance of H5HF_hdr_t are clean. Set *clean to
* TRUE if this is the case, and to FALSE otherwise.
*
+ * Update -- 8/24/15
+ *
+ * With the advent of the metadata cache image feature, it is
+ * possible for the pre-serialize and serialize calls to be
+ * invoked outside of a flush. While this serialization
+ * observes flush dependencies for the order of serialization,
+ * the entries are not written to disk, and hence dirty entries
+ * remain dirty.
+ *
+ * To address this, updated the sanity checks in this function
+ * to treat entries whose images are up to date as clean if
+ * a cache serialization is in progress.
+ *
+ * Update -- 9/29/16
+ *
+ * The implementation of flush dependencies has been changed.
+ * Prior to this change, a flush dependency parent could be
+ * flushed if and only if all its flush dependency decendants
+ * were clean. In the new definition, a flush dependency
+ * parent can be flushed if all its immediate flush dependency
+ * children are clean, regardless of any other dirty
+ * decendants.
+ *
+ * Further, metadata cache entries are now allowed to have
+ * multiple flush dependency parents.
+ *
+ * This means that the fractal heap is no longer ncessarily
+ * flushed from the bottom up.
+ *
+ * For example, it is now possible for a dirty fractal heap
+ * header to be flushed before a dirty dblock, as long as the
+ * there in an interviening iblock, and the header has no
+ * dirty immediate flush dependency children.
+ *
+ * Also, I gather that under some circumstances, a dblock
+ * will be direct a flush dependency child both of the iblock
+ * that points to it, and of the fractal heap header.
+ *
+ * As a result of these changes, the functionality of these
+ * sanity checking routines has been modified significantly.
+ * Instead of scanning the fractal heap from a starting point
+ * down, and verifying that there were no dirty entries, the
+ * functions now scan downward from the starting point and
+ * verify that there are no dirty flush dependency children
+ * of the specified flush dependency parent. In passing,
+ * they also walk the data structure, and verify it.
+ *
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -2565,9 +2659,10 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
- hbool_t *clean)
+H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, hid_t dxpl_id,
+ H5HF_hdr_t *hdr, hbool_t *fd_clean, hbool_t *clean)
{
+ hbool_t fd_exists = FALSE; /* whether flush dependency exists. */
haddr_t hdr_addr; /* Address of header */
unsigned hdr_status = 0; /* Header cache entry status */
herr_t ret_value = SUCCEED; /* Return value */
@@ -2579,6 +2674,7 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
HDassert(hdr);
HDassert(hdr->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(hdr->cache_info.type == H5AC_FHEAP_HDR);
+ HDassert(fd_clean);
HDassert(clean);
hdr_addr = hdr->cache_info.addr;
HDassert(hdr_addr == hdr->heap_addr);
@@ -2643,15 +2739,165 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
root_iblock_in_cache = ( (root_iblock_status & H5AC_ES__IN_CACHE) != 0);
HDassert(root_iblock_in_cache || (root_iblock == NULL));
- if(!root_iblock_in_cache) /* we are done */
- *clean = TRUE;
- else if(root_iblock_status & H5AC_ES__IS_DIRTY)
- *clean = FALSE;
+ if(!root_iblock_in_cache) { /* we are done */
+ *clean = TRUE;
+ *fd_clean = TRUE;
+ } /* end if */
+ else if((root_iblock_status & H5AC_ES__IS_DIRTY) &&
+ (((root_iblock_status & H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
+ (!H5AC_get_serialization_in_progress(f)))) {
+ *clean = FALSE;
+
+ /* verify that a flush dependency exists between the header and
+ * the root inode.
+ */
+ if(H5AC_flush_dependency_exists(f, hdr->heap_addr, root_iblock_addr, &fd_exists) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
+ HDassert(fd_exists);
+
+ *fd_clean = FALSE;
+ } /* end else-if */
+ else { /* must examine children */
+ hbool_t unprotect_root_iblock = FALSE;
+
+ /* At this point, the root iblock may be pinned, protected,
+ * both, or neither, and we may or may not have a pointer
+ * to root iblock in memory.
+ *
+ * Before we call H5HF__cache_verify_iblock_descendants_clean(),
+ * we must ensure that the root iblock is either pinned or
+ * protected or both, and that we have a pointer to it.
+ * Do this as follows:
+ */
+ if(root_iblock == NULL) { /* we don't have ptr to root iblock */
+ if(0 == (root_iblock_status & H5AC_ES__IS_PROTECTED)) {
+ /* just protect the root iblock -- this will give us
+ * the pointer we need to proceed, and ensure that
+ * it is locked into the metadata cache for the
+ * duration.
+ *
+ * Note that the udata is only used in the load callback.
+ * While the fractal heap makes heavy use of the udata
+ * in this case, since we know that the entry is in cache,
+ * we can pass NULL udata.
+ *
+ * The tag specified in the dxpl we received
+ * as a parameter (via dxpl_id) may not be correct.
+ * Grab the (hopefully) correct tag from the header,
+ * and load it into the dxpl via the H5_BEGIN_TAG and
+ * H5_END_TAG macros. Note that any error bracked by
+ * these macros must be reported with HGOTO_ERROR_TAG.
+ */
+ H5_BEGIN_TAG(dxpl_id, hdr->heap_addr, FAIL)
+
+ if(NULL == (root_iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
+ HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
+
+ H5_END_TAG(FAIL)
+
+ unprotect_root_iblock = TRUE;
+ } /* end if */
+ else {
+ /* the root iblock is protected, and we have no
+ * legitimate way of getting a pointer to it.
+ *
+ * We square this circle by using the
+ * H5AC_get_entry_ptr_from_addr() to get the needed
+ * pointer.
+ *
+ * WARNING: This call should be used only in debugging
+ * routines, and it should be avoided there when
+ * possible.
+ *
+ * Further, if we ever multi-thread the cache,
+ * this routine will have to be either discarded
+ * or heavily re-worked.
+ *
+ * Finally, keep in mind that the entry whose
+ * pointer is obtained in this fashion may not
+ * be in a stable state.
+ *
+ * Assuming that the flush dependency code is working
+ * as it should, the only reason for the root iblock to
+ * be unpinned is if none of its children are in cache.
+ * This unfortunately means that if it is protected and
+ * not pinned, the fractal heap is in the process of loading
+ * or inserting one of its children. The obvious
+ * implication is that there is a significant chance that
+ * the root iblock is in an unstable state.
+ *
+ * All this suggests that using
+ * H5AC_get_entry_ptr_from_addr() to obtain the pointer
+ * to the protected root iblock is questionable here.
+ * However, since this is test/debugging code, I expect
+ * that we will use this approach until it causes problems,
+ * or we think of a better way.
+ */
+ if(H5AC_get_entry_ptr_from_addr(f, root_iblock_addr, (void **)(&root_iblock)) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() failed.")
+ HDassert(root_iblock);
+ } /* end else */
+ } /* end if */
+ else { /* root_iblock != NULL */
+ /* we have the pointer to the root iblock. Protect it
+ * if it is neither pinned nor protected -- otherwise we
+ * are ready to go.
+ */
+ H5HF_indirect_t * iblock = NULL;
+
+ if(((root_iblock_status & H5AC_ES__IS_PINNED) == 0) &&
+ ((root_iblock_status & H5AC_ES__IS_PROTECTED) == 0)) {
+ /* the root iblock is neither pinned nor protected -- hence
+ * we must protect it before we proceed
+ *
+ * Note that the udata is only used in the load callback.
+ * While the fractal heap makes heavy use of the udata
+ * in this case, since we know that the entry is in cache,
+ * we can pass NULL udata.
+ *
+ * The tag associated specified in the dxpl we received
+ * as a parameter (via dxpl_id) may not be correct.
+ * Grab the (hopefully) correct tag from the header,
+ * and load it into the dxpl via the H5_BEGIN_TAG and
+ * H5_END_TAG macros. Note that any error bracked by
+ * these macros must be reported with HGOTO_ERROR_TAG.
+ */
+ H5_BEGIN_TAG(dxpl_id, hdr->heap_addr, FAIL)
+
+ if(NULL == (iblock = (H5HF_indirect_t *)H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
+ HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
+
+ H5_END_TAG(FAIL)
+
+ unprotect_root_iblock = TRUE;
+ HDassert(iblock == root_iblock);
+ } /* end if */
+ } /* end else */
+
+ /* at this point, one way or another, the root iblock is locked
+ * in memory for the duration of the call. Do some sanity checks,
+ * and then call H5HF__cache_verify_iblock_descendants_clean().
+ */
+ HDassert(root_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(root_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+
+ if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, hdr->heap_addr, root_iblock, &root_iblock_status, fd_clean, clean) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify root iblock & descendants clean.")
+
+ /* Unprotect the root indirect block if required */
+ if(unprotect_root_iblock) {
+ HDassert(root_iblock);
+ if(H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK, root_iblock_addr, root_iblock, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
+ } /* end if */
+ } /* end else */
} /* end if */
else if((hdr->man_dtable.curr_root_rows == 0) &&
(HADDR_UNDEF != hdr->man_dtable.table_addr)) {
haddr_t root_dblock_addr;
unsigned root_dblock_status = 0;
+ hbool_t in_cache;
+ hbool_t type_ok;
/* this is scenario 2 -- we have a root dblock */
root_dblock_addr = hdr->man_dtable.table_addr;
@@ -2659,25 +2905,48 @@ H5HF__cache_verify_hdr_descendants_clean(H5F_t *f, H5HF_hdr_t *hdr,
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get root dblock status")
if(root_dblock_status & H5AC_ES__IN_CACHE) {
+ if(H5AC_verify_entry_type(f, root_dblock_addr, &H5AC_FHEAP_DBLOCK[0], &in_cache, &type_ok) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check dblock type")
+ HDassert(in_cache);
+ if(!type_ok)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock addr doesn't refer to a dblock?!?")
+
/* If a root dblock is in cache, it must have a flush
- * dependency relationship with the header.
+ * dependency relationship with the header, and it
+ * may not be the parent in any flush dependency
+ * relationship.
+ *
+ * We don't test this fully, but we will verify that
+ * the root iblock is a child in a flush dependency
+ * relationship with the header.
*/
- if(0 == (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and not a flush dep child.")
+ if(H5AC_flush_dependency_exists(f, hdr->heap_addr, root_dblock_addr, &fd_exists) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
+ if(!fd_exists)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock is not a flush dep parent of header.")
+
if(0 != (root_dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT))
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "root dblock in cache and is a flush dep parent.")
- if(root_dblock_status & H5AC_ES__IS_DIRTY)
- *clean = FALSE;
- } /* end if */
- else /* root dblock not in cache */
- *clean = TRUE;
+ *clean = !((root_dblock_status & H5AC_ES__IS_DIRTY) &&
+ (((root_dblock_status &
+ H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
+ (!H5AC_get_serialization_in_progress(f))));
+
+ *fd_clean = *clean;
+ } /* end if */
+ else { /* root dblock not in cache */
+ *fd_clean = TRUE;
+ *clean = TRUE;
+ } /* end else */
} /* end else-if */
- else
- /* this is scenario 3 -- the fractal heap is empty, and we
- * have nothing to do.
- */
- *clean = TRUE;
+ else {
+ /* this is scenario 3 -- the fractal heap is empty, and we
+ * have nothing to do.
+ */
+ *fd_clean = TRUE;
+ *clean = TRUE;
+ } /* end else */
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2712,6 +2981,40 @@ done:
* H5HF__cache_verify_descendant_iblocks_clean() are
* recursive co-routines.
*
+ * Update -- 9/29/16
+ *
+ * The implementation of flush dependencies has been changed.
+ * Prior to this change, a flush dependency parent could be
+ * flushed if and only if all its flush dependency decendants
+ * were clean. In the new definition, a flush dependency
+ * parent can be flushed if all its immediate flush dependency
+ * children are clean, regardless of any other dirty
+ * decendants.
+ *
+ * Further, metadata cache entries are now allowed to have
+ * multiple flush dependency parents.
+ *
+ * This means that the fractal heap is no longer ncessarily
+ * flushed from the bottom up.
+ *
+ * For example, it is now possible for a dirty fractal heap
+ * header to be flushed before a dirty dblock, as long as the
+ * there in an interviening iblock, and the header has no
+ * dirty immediate flush dependency children.
+ *
+ * Also, I gather that under some circumstances, a dblock
+ * will be direct a flush dependency child both of the iblock
+ * that points to it, and of the fractal heap header.
+ *
+ * As a result of these changes, the functionality of these
+ * sanity checking routines has been modified significantly.
+ * Instead of scanning the fractal heap from a starting point
+ * down, and verifying that there were no dirty entries, the
+ * functions now scan downward from the starting point and
+ * verify that there are no dirty flush dependency children
+ * of the specified flush dependency parent. In passing,
+ * they also walk the data structure, and verify it.
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -2721,8 +3024,9 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, H5HF_indirect_t *iblock,
- unsigned *iblock_status, hbool_t *clean)
+H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, hid_t dxpl_id,
+ haddr_t fd_parent_addr, H5HF_indirect_t *iblock, unsigned *iblock_status,
+ hbool_t * fd_clean, hbool_t *clean)
{
hbool_t has_dblocks = FALSE;
hbool_t has_iblocks = FALSE;
@@ -2732,17 +3036,19 @@ H5HF__cache_verify_iblock_descendants_clean(H5F_t *f, H5HF_indirect_t *iblock,
/* Sanity checks */
HDassert(f);
+ HDassert(H5F_addr_defined(fd_parent_addr));
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
HDassert(iblock_status);
- HDassert(clean);
- HDassert(*clean);
+ HDassert(fd_clean);
+ HDassert(*fd_clean);
+ HDassert(clean); /* note that *clean need not be TRUE */
- if((*clean) && H5HF__cache_verify_iblocks_dblocks_clean(f, iblock, clean, &has_dblocks) < 0)
+ if((*fd_clean) && H5HF__cache_verify_iblocks_dblocks_clean(f, fd_parent_addr, iblock, fd_clean, clean, &has_dblocks) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify dblocks clean.")
- if((*clean) && H5HF__cache_verify_descendant_iblocks_clean(f, iblock, clean, &has_iblocks) < 0)
+ if((*fd_clean) && H5HF__cache_verify_descendant_iblocks_clean(f, dxpl_id, fd_parent_addr, iblock, fd_clean, clean, &has_iblocks) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify iblocks clean.")
/* verify that flush dependency setup is plausible */
@@ -2779,6 +3085,53 @@ done:
* during the call. Caller must ensure that this is
* the case before the call.
*
+ * Update -- 8/24/15
+ *
+ * With the advent of the metadata cache image feature, it is
+ * possible for the pre-serialize and serialize calls to be
+ * invoked outside of a flush. While this serialization
+ * observes flush dependencies for the order of serialization,
+ * the entries are not written to disk, and hence dirty entries
+ * remain dirty.
+ *
+ * To address this, updated the sanity checks in this function
+ * to treat entries whose images are up to date as clean if
+ * a cache serialization is in progress.
+ *
+ * Update -- 9/29/16
+ *
+ * The implementation of flush dependencies has been changed.
+ * Prior to this change, a flush dependency parent could be
+ * flushed if and only if all its flush dependency decendants
+ * were clean. In the new definition, a flush dependency
+ * parent can be flushed if all its immediate flush dependency
+ * children are clean, regardless of any other dirty
+ * decendants.
+ *
+ * Further, metadata cache entries are now allowed to have
+ * multiple flush dependency parents.
+ *
+ * This means that the fractal heap is no longer ncessarily
+ * flushed from the bottom up.
+ *
+ * For example, it is now possible for a dirty fractal heap
+ * header to be flushed before a dirty dblock, as long as the
+ * there in an interviening iblock, and the header has no
+ * dirty immediate flush dependency children.
+ *
+ * Also, I gather that under some circumstances, a dblock
+ * will be direct a flush dependency child both of the iblock
+ * that points to it, and of the fractal heap header.
+ *
+ * As a result of these changes, the functionality of these
+ * sanity checking routines has been modified significantly.
+ * Instead of scanning the fractal heap from a starting point
+ * down, and verifying that there were no dirty entries, the
+ * functions now scan downward from the starting point and
+ * verify that there are no dirty flush dependency children
+ * of the specified flush dependency parent. In passing,
+ * they also walk the data structure, and verify it.
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -2788,53 +3141,82 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, H5HF_indirect_t *iblock,
- hbool_t *clean, hbool_t *has_dblocks)
+H5HF__cache_verify_iblocks_dblocks_clean(H5F_t *f, haddr_t fd_parent_addr,
+ H5HF_indirect_t *iblock, hbool_t *fd_clean, hbool_t *clean,
+ hbool_t *has_dblocks)
{
unsigned num_direct_rows;
unsigned max_dblock_index;
unsigned i;
+ haddr_t iblock_addr;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Sanity checks */
HDassert(f);
+ HDassert(H5F_addr_defined(fd_parent_addr));
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
- HDassert(clean);
- HDassert(*clean);
+ HDassert(fd_clean);
+ HDassert(*fd_clean);
+ HDassert(clean); /* note that *clean need not be true */
HDassert(has_dblocks);
i = 0;
num_direct_rows = MIN(iblock->nrows, iblock->hdr->man_dtable.max_direct_rows);
HDassert(num_direct_rows <= iblock->nrows);
max_dblock_index = (num_direct_rows * iblock->hdr->man_dtable.cparam.width) - 1;
- while((*clean) && (i <= max_dblock_index)) {
+ iblock_addr = iblock->addr;
+ HDassert(H5F_addr_defined(iblock_addr));
+
+ while((*fd_clean) && (i <= max_dblock_index)) {
haddr_t dblock_addr;
dblock_addr = iblock->ents[i].addr;
if(H5F_addr_defined(dblock_addr)) {
- unsigned dblock_status = 0;
+ hbool_t in_cache;
+ hbool_t type_ok;
+
+ if(H5AC_verify_entry_type(f, dblock_addr, &H5AC_FHEAP_DBLOCK[0], &in_cache, &type_ok) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check dblock type")
+
+ if(in_cache) { /* dblock is in cache */
+ hbool_t fd_exists;
+ unsigned dblock_status = 0;
+
+ if(!type_ok)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock addr doesn't refer to a dblock?!?")
+
+ if(H5AC_get_entry_status(f, dblock_addr, &dblock_status) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get dblock status")
+
+ HDassert(dblock_status & H5AC_ES__IN_CACHE);
- if(H5AC_get_entry_status(f, dblock_addr, &dblock_status) < 0)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get dblock status")
- if(dblock_status & H5AC_ES__IN_CACHE) {
*has_dblocks = TRUE;
- if(dblock_status & H5AC_ES__IS_DIRTY)
+ if((dblock_status & H5AC_ES__IS_DIRTY) &&
+ (((dblock_status & H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
+ (!H5AC_get_serialization_in_progress(f)))) {
*clean = FALSE;
+
+ if(H5AC_flush_dependency_exists(f, fd_parent_addr, dblock_addr, &fd_exists) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
+
+ if(fd_exists)
+ *fd_clean = FALSE;
+ } /* end if */
- /* If a child dblock is in cache, it must have a flush
- * dependency relationship with this iblock, and it
- * may not be the parent in any flush dependency
- * relationship.
+ /* If a child dblock is in cache, it must have a flush
+ * dependency relationship with this iblock. Test this
+ * here.
*/
- if(0 == (dblock_status & H5AC_ES__IS_FLUSH_DEP_CHILD))
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and not a flush dep child.")
- if(0 != (dblock_status & H5AC_ES__IS_FLUSH_DEP_PARENT))
- HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and is a flush dep parent.")
+ if(H5AC_flush_dependency_exists(f, iblock_addr, dblock_addr, &fd_exists) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
+
+ if(!fd_exists)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "dblock in cache and not a flush dep child of iblock.")
} /* end if */
} /* end if */
@@ -2867,6 +3249,54 @@ done:
* during the call. Caller must ensure that this is
* the case before the call.
*
+ * Update -- 8/24/15
+ *
+ * With the advent of the metadata cache image feature, it is
+ * possible for the pre-serialize and serialize calls to be
+ * invoked outside of a flush. While this serialization
+ * observes flush dependencies for the order of serialization,
+ * the entries are not written to disk, and hence dirty entries
+ * remain dirty.
+ *
+ * To address this, updated the sanity checks in this function
+ * to treat entries whose images are up to date as clean if
+ * a cache serialization is in progress.
+ *
+ * Update -- 9/29/16
+ *
+ * The implementation of flush dependencies has been changed.
+ * Prior to this change, a flush dependency parent could be
+ * flushed if and only if all its flush dependency decendants
+ * were clean. In the new definition, a flush dependency
+ * parent can be flushed if all its immediate flush dependency
+ * children are clean, regardless of any other dirty
+ * decendants.
+ *
+ * Further, metadata cache entries are now allowed to have
+ * multiple flush dependency parents.
+ *
+ * This means that the fractal heap is no longer ncessarily
+ * flushed from the bottom up.
+ *
+ * For example, it is now possible for a dirty fractal heap
+ * header to be flushed before a dirty dblock, as long as the
+ * there in an interviening iblock, and the header has no
+ * dirty immediate flush dependency children.
+ *
+ * Also, I gather that under some circumstances, a dblock
+ * will be direct a flush dependency child both of the iblock
+ * that points to it, and of the fractal heap header.
+ *
+ * As a result of these changes, the functionality of these
+ * sanity checking routines has been modified significantly.
+ * Instead of scanning the fractal heap from a starting point
+ * down, and verifying that there were no dirty entries, the
+ * functions now scan downward from the starting point and
+ * verify that there are no dirty flush dependency children
+ * of the specified flush dependency parent. In passing,
+ * they also walk the data structure, and verify it.
+ *
+ *
* Return: Non-negative on success/Negative on failure
*
* Programmer: John Mainzer
@@ -2876,33 +3306,38 @@ done:
*/
#ifndef NDEBUG
static herr_t
-H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, H5HF_indirect_t *iblock,
+H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, hid_t dxpl_id,
+ haddr_t fd_parent_addr, H5HF_indirect_t *iblock, hbool_t *fd_clean,
hbool_t *clean, hbool_t *has_iblocks)
{
unsigned first_iblock_index;
unsigned last_iblock_index;
unsigned num_direct_rows;
unsigned i;
+ haddr_t iblock_addr;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Sanity checks */
HDassert(f);
+ HDassert(H5F_addr_defined(fd_parent_addr));
HDassert(iblock);
HDassert(iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
HDassert(iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
- HDassert(clean);
- HDassert(*clean);
+ HDassert(fd_clean);
+ HDassert(*fd_clean);
+ HDassert(clean); /* note that *clean need not be true */
HDassert(has_iblocks);
num_direct_rows = MIN(iblock->nrows, iblock->hdr->man_dtable.max_direct_rows);
HDassert(num_direct_rows <= iblock->nrows);
+ iblock_addr = iblock->addr;
first_iblock_index = num_direct_rows * iblock->hdr->man_dtable.cparam.width;
last_iblock_index = (iblock->nrows * iblock->hdr->man_dtable.cparam.width) - 1;
i = first_iblock_index;
- while((*clean) && (i <= last_iblock_index)) {
+ while((*fd_clean) && (i <= last_iblock_index)) {
haddr_t child_iblock_addr = iblock->ents[i].addr;
if(H5F_addr_defined(child_iblock_addr)) {
@@ -2912,9 +3347,157 @@ H5HF__cache_verify_descendant_iblocks_clean(H5F_t *f, H5HF_indirect_t *iblock,
HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't get iblock status")
if(child_iblock_status & H5AC_ES__IN_CACHE) {
+ hbool_t fd_exists;
+
*has_iblocks = TRUE;
- if(child_iblock_status & H5AC_ES__IS_DIRTY)
- *clean = FALSE;
+
+ if((child_iblock_status & H5AC_ES__IS_DIRTY) &&
+ (((child_iblock_status & H5AC_ES__IMAGE_IS_UP_TO_DATE) == 0) ||
+ (!H5AC_get_serialization_in_progress(f)))) {
+
+ *clean = FALSE;
+
+ if(H5AC_flush_dependency_exists(f, fd_parent_addr, child_iblock_addr, &fd_exists) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
+
+ if(fd_exists)
+ *fd_clean = FALSE;
+ } /* end if */
+
+ /* if the child iblock is in cache and *fd_clean is TRUE,
+ * we must continue to explore down the fractal heap tree
+ * structure to verify that all descendant blocks that are
+ * flush dependency children of the entry at parent_addr are
+ * either clean, or not in the metadata cache. We do this
+ * with a recursive call to
+ * H5HF__cache_verify_iblock_descendants_clean().
+ * However, we can't make this call unless the child iblock
+ * is somehow locked into the cache -- typically via either
+ * pinning or protecting.
+ *
+ * If the child iblock is pinned, we can look up its pointer
+ * on the current iblock's pinned child iblock list, and
+ * and use that pointer in the recursive call.
+ *
+ * If the entry is unprotected and unpinned, we simply
+ * protect it.
+ *
+ * If, however, the the child iblock is already protected,
+ * but not pinned, we have a bit of a problem, as we have
+ * no legitimate way of looking up its pointer in memory.
+ *
+ * To solve this problem, I have added a new metadata cache
+ * call to obtain the pointer.
+ *
+ * WARNING: This call should be used only in debugging
+ * routines, and it should be avoided there when
+ * possible.
+ *
+ * Further, if we ever multi-thread the cache,
+ * this routine will have to be either discarded
+ * or heavily re-worked.
+ *
+ * Finally, keep in mind that the entry whose
+ * pointer is obtained in this fashion may not
+ * be in a stable state.
+ *
+ * Assuming that the flush dependency code is working
+ * as it should, the only reason for the child entry to
+ * be unpinned is if none of its children are in cache.
+ * This unfortunately means that if it is protected and
+ * not pinned, the fractal heap is in the process of loading
+ * or inserting one of its children. The obvious implication
+ * is that there is a significant chance that the child
+ * iblock is in an unstable state.
+ *
+ * All this suggests that using the new call to obtain the
+ * pointer to the protected child iblock is questionable
+ * here. However, since this is test/debugging code, I
+ * expect that we will use this approach until it causes
+ * problems, or we think of a better way.
+ */
+ if(*fd_clean) {
+ H5HF_indirect_t *child_iblock = NULL;
+ hbool_t unprotect_child_iblock = FALSE;
+
+ if(0 == (child_iblock_status & H5AC_ES__IS_PINNED)) {
+ /* child iblock is not pinned */
+ if(0 == (child_iblock_status & H5AC_ES__IS_PROTECTED)) {
+ /* child iblock is unprotected, and unpinned */
+ /* protect it. Note that the udata is only */
+ /* used in the load callback. While the */
+ /* fractal heap makes heavy use of the udata */
+ /* in this case, since we know that the */
+ /* entry is in cache, we can pass NULL udata */
+ /* */
+ /* The tag associated specified in the dxpl */
+ /* we received as a parameter (via dxpl_id) */
+ /* may not be correct. */
+ /* */
+ /* Grab the (hopefully) correct tag from the */
+ /* parent iblock, and load it into the dxpl */
+ /* via the H5_BEGIN_TAG and H5_END_TAG */
+ /* macros. Note that any error bracked by */
+ /* these macros must be reported with */
+ /* HGOTO_ERROR_TAG. */
+
+ H5_BEGIN_TAG(dxpl_id, iblock->hdr->heap_addr, FAIL)
+
+ if(NULL == (child_iblock = (H5HF_indirect_t *) H5AC_protect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, NULL, H5AC__READ_ONLY_FLAG)))
+ HGOTO_ERROR_TAG(H5E_HEAP, H5E_CANTPROTECT, FAIL, "H5AC_protect() faild.")
+
+ H5_END_TAG(FAIL)
+
+ unprotect_child_iblock = TRUE;
+ } /* end if */
+ else {
+ /* child iblock is protected -- use */
+ /* H5AC_get_entry_ptr_from_addr() to get a */
+ /* pointer to the entry. This is very slimy -- */
+ /* come up with a better solution. */
+ if(H5AC_get_entry_ptr_from_addr(f, child_iblock_addr, (void **)(&child_iblock)) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "H5AC_get_entry_ptr_from_addr() faild.")
+ HDassert(child_iblock);
+ } /* end else */
+ } /* end if */
+ else {
+ /* child iblock is pinned -- look it up in the */
+ /* parent iblocks child_iblocks array. */
+ HDassert(iblock->child_iblocks);
+ child_iblock = iblock->child_iblocks[i - first_iblock_index];
+ } /* end else */
+
+ /* At this point, one way or another we should have
+ * a pointer to the child iblock. Verify that we
+ * that we have the correct one.
+ */
+ HDassert(child_iblock);
+ HDassert(child_iblock->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(child_iblock->cache_info.type == H5AC_FHEAP_IBLOCK);
+ HDassert(child_iblock->addr == child_iblock_addr);
+
+ /* now make the recursive call */
+ if(H5HF__cache_verify_iblock_descendants_clean(f, dxpl_id, fd_parent_addr, child_iblock, &child_iblock_status, fd_clean, clean) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "can't verify child iblock clean.")
+
+ /* if iblock_addr != fd_parent_addr, verify that a flush
+ * dependency relationship exists between iblock and
+ * the child iblock.
+ */
+ if(fd_parent_addr != iblock_addr) {
+ if(H5AC_flush_dependency_exists(f, iblock_addr, child_iblock_addr, &fd_exists) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTGET, FAIL, "can't check flush dependency")
+
+ if(!fd_exists)
+ HGOTO_ERROR(H5E_HEAP, H5E_SYSTEM, FAIL, "iblock is not a flush dep parent of child_iblock.")
+ } /* end if */
+
+ /* if we protected the child iblock, unprotect it now */
+ if(unprotect_child_iblock) {
+ if(H5AC_unprotect(f, dxpl_id, H5AC_FHEAP_IBLOCK, child_iblock_addr, child_iblock, H5AC__NO_FLAGS_SET) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTUNPROTECT, FAIL, "H5AC_unprotect() faild.")
+ } /* end if */
+ } /* end if */
} /* end if */
} /* end if */
diff --git a/src/H5HFdbg.c b/src/H5HFdbg.c
index 40191e5..279de1e 100644
--- a/src/H5HFdbg.c
+++ b/src/H5HFdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFdblock.c b/src/H5HFdblock.c
index 1272ab0..4496962 100644
--- a/src/H5HFdblock.c
+++ b/src/H5HFdblock.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -493,6 +491,10 @@ H5HF_man_dblock_protect(H5HF_hdr_t *hdr, hid_t dxpl_id, haddr_t dblock_addr,
udata.filter_mask = 0;
} /* end else */
+ /* Reset compression context info */
+ udata.decompressed = FALSE;
+ udata.dblk = NULL;
+
/* Protect the direct block */
if(NULL == (dblock = (H5HF_direct_t *)H5AC_protect(hdr->f, dxpl_id, H5AC_FHEAP_DBLOCK, dblock_addr, &udata, flags)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTPROTECT, NULL, "unable to protect fractal heap direct block")
diff --git a/src/H5HFdtable.c b/src/H5HFdtable.c
index 3ceb6f5..563e8c5 100644
--- a/src/H5HFdtable.c
+++ b/src/H5HFdtable.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFhdr.c b/src/H5HFhdr.c
index 42857c0..2f01ce6 100644
--- a/src/H5HFhdr.c
+++ b/src/H5HFhdr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFhuge.c b/src/H5HFhuge.c
index 94433c4..b2a1e68 100644
--- a/src/H5HFhuge.c
+++ b/src/H5HFhuge.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFiblock.c b/src/H5HFiblock.c
index bb31002..11abce0 100644
--- a/src/H5HFiblock.c
+++ b/src/H5HFiblock.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFiter.c b/src/H5HFiter.c
index 55a8532..54246d2 100644
--- a/src/H5HFiter.c
+++ b/src/H5HFiter.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFman.c b/src/H5HFman.c
index cff4395..2b88b50 100644
--- a/src/H5HFman.c
+++ b/src/H5HFman.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFmodule.h b/src/H5HFmodule.h
index e3274ce..dd6576a 100644
--- a/src/H5HFmodule.h
+++ b/src/H5HFmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5HFpkg.h b/src/H5HFpkg.h
index 6abae65..d03c1222 100644
--- a/src/H5HFpkg.h
+++ b/src/H5HFpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -559,15 +557,6 @@ typedef struct H5HF_dblock_cache_ud_t {
/* Package Private Variables */
/*****************************/
-/* H5HF header inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_FHEAP_HDR[1];
-
-/* H5HF indirect block inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_FHEAP_IBLOCK[1];
-
-/* H5HF direct block inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_FHEAP_DBLOCK[1];
-
/* The v2 B-tree class for tracking indirectly accessed 'huge' objects */
H5_DLLVAR const H5B2_class_t H5HF_HUGE_BT2_INDIR[1];
diff --git a/src/H5HFprivate.h b/src/H5HFprivate.h
index 441ad3e..8cf4b3c 100644
--- a/src/H5HFprivate.h
+++ b/src/H5HFprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFpublic.h b/src/H5HFpublic.h
index 78b367b..82cfc21 100644
--- a/src/H5HFpublic.h
+++ b/src/H5HFpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFsection.c b/src/H5HFsection.c
index 37ff8f4..42c12ec 100644
--- a/src/H5HFsection.c
+++ b/src/H5HFsection.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -85,14 +83,14 @@ static herr_t H5HF_sect_single_full_dblock(H5HF_hdr_t *hdr, hid_t dxpl_id,
H5HF_free_section_t *sect);
/* 'single' section callbacks */
-static herr_t H5HF_sect_single_add(H5FS_section_info_t *sect, unsigned *flags,
+static herr_t H5HF_sect_single_add(H5FS_section_info_t **sect, unsigned *flags,
void *udata);
static H5FS_section_info_t *H5HF_sect_single_deserialize(const H5FS_section_class_t *cls,
hid_t dxpl_id, const uint8_t *buf, haddr_t sect_addr, hsize_t sect_size,
unsigned *des_flags);
static htri_t H5HF_sect_single_can_merge(const H5FS_section_info_t *sect1,
const H5FS_section_info_t *sect2, void *udata);
-static herr_t H5HF_sect_single_merge(H5FS_section_info_t *sect1,
+static herr_t H5HF_sect_single_merge(H5FS_section_info_t **sect1,
H5FS_section_info_t *sect2, void *udata);
static htri_t H5HF_sect_single_can_shrink(const H5FS_section_info_t *sect,
void *udata);
@@ -121,7 +119,7 @@ static H5FS_section_info_t *H5HF_sect_row_deserialize(const H5FS_section_class_t
unsigned *des_flags);
static htri_t H5HF_sect_row_can_merge(const H5FS_section_info_t *sect1,
const H5FS_section_info_t *sect2, void *udata);
-static herr_t H5HF_sect_row_merge(H5FS_section_info_t *sect1,
+static herr_t H5HF_sect_row_merge(H5FS_section_info_t **sect1,
H5FS_section_info_t *sect2, void *udata);
static htri_t H5HF_sect_row_can_shrink(const H5FS_section_info_t *sect,
void *udata);
@@ -811,7 +809,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_sect_single_add(H5FS_section_info_t *_sect, unsigned *flags, void *_udata)
+H5HF_sect_single_add(H5FS_section_info_t **_sect, unsigned *flags, void *_udata)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -821,7 +819,7 @@ H5HF_sect_single_add(H5FS_section_info_t *_sect, unsigned *flags, void *_udata)
* have already been checked when it was first added
*/
if(!(*flags & H5FS_ADD_DESERIALIZING)) {
- H5HF_free_section_t *sect = (H5HF_free_section_t *)_sect; /* Fractal heap free section */
+ H5HF_free_section_t **sect = (H5HF_free_section_t **)_sect; /* Fractal heap free section */
H5HF_sect_add_ud_t *udata = (H5HF_sect_add_ud_t *)_udata; /* User callback data */
H5HF_hdr_t *hdr = udata->hdr; /* Fractal heap header */
hid_t dxpl_id = udata->dxpl_id; /* DXPL ID for operation */
@@ -832,14 +830,14 @@ H5HF_sect_single_add(H5FS_section_info_t *_sect, unsigned *flags, void *_udata)
/* Check if single section covers entire direct block it's in */
/* (converts to row section possibly) */
- if(H5HF_sect_single_full_dblock(hdr, dxpl_id, sect) < 0)
+ if(H5HF_sect_single_full_dblock(hdr, dxpl_id, (*sect)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCONVERT, FAIL, "can't check/convert single section")
/* Set the "returned space" flag if the single section was changed
* into a row section, so the "merging & shrinking" algorithm
* gets executed in the free space manager
*/
- if(sect->sect_info.type != H5HF_FSPACE_SECT_SINGLE)
+ if((*sect)->sect_info.type != H5HF_FSPACE_SECT_SINGLE)
*flags |= H5FS_ADD_RETURNED_SPACE;
} /* end if */
@@ -949,10 +947,10 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_sect_single_merge(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
+H5HF_sect_single_merge(H5FS_section_info_t **_sect1, H5FS_section_info_t *_sect2,
void *_udata)
{
- H5HF_free_section_t *sect1 = (H5HF_free_section_t *)_sect1; /* Fractal heap free section */
+ H5HF_free_section_t **sect1 = (H5HF_free_section_t **)_sect1; /* Fractal heap free section */
H5HF_free_section_t *sect2 = (H5HF_free_section_t *)_sect2; /* Fractal heap free section */
H5HF_sect_add_ud_t *udata = (H5HF_sect_add_ud_t *)_udata; /* User callback data */
H5HF_hdr_t *hdr = udata->hdr; /* Fractal heap header */
@@ -963,26 +961,26 @@ H5HF_sect_single_merge(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
/* Check arguments. */
HDassert(sect1);
- HDassert(sect1->sect_info.type == H5HF_FSPACE_SECT_SINGLE);
+ HDassert((*sect1)->sect_info.type == H5HF_FSPACE_SECT_SINGLE);
HDassert(sect2);
HDassert(sect2->sect_info.type == H5HF_FSPACE_SECT_SINGLE);
- HDassert(H5F_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr));
+ HDassert(H5F_addr_eq((*sect1)->sect_info.addr + (*sect1)->sect_info.size, sect2->sect_info.addr));
/* Add second section's size to first section */
- sect1->sect_info.size += sect2->sect_info.size;
+ (*sect1)->sect_info.size += sect2->sect_info.size;
/* Get rid of second section */
if(H5HF_sect_single_free((H5FS_section_info_t *)sect2) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't free section node")
/* Check to see if we should revive first section */
- if(sect1->sect_info.state != H5FS_SECT_LIVE)
- if(H5HF_sect_single_revive(hdr, dxpl_id, sect1) < 0)
+ if((*sect1)->sect_info.state != H5FS_SECT_LIVE)
+ if(H5HF_sect_single_revive(hdr, dxpl_id, (*sect1)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't revive single free section")
/* Check if single section covers entire direct block it's in */
/* (converts to row section possibly) */
- if(H5HF_sect_single_full_dblock(hdr, dxpl_id, sect1) < 0)
+ if(H5HF_sect_single_full_dblock(hdr, dxpl_id, (*sect1)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTCONVERT, FAIL, "can't check/convert single section")
done:
@@ -1771,10 +1769,10 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5HF_sect_row_merge(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
+H5HF_sect_row_merge(H5FS_section_info_t **_sect1, H5FS_section_info_t *_sect2,
void *_udata)
{
- H5HF_free_section_t *sect1 = (H5HF_free_section_t *)_sect1; /* Fractal heap free section */
+ H5HF_free_section_t **sect1 = (H5HF_free_section_t **)_sect1; /* Fractal heap free section */
H5HF_free_section_t *sect2 = (H5HF_free_section_t *)_sect2; /* Fractal heap free section */
H5HF_sect_add_ud_t *udata = (H5HF_sect_add_ud_t *)_udata; /* User callback data */
H5HF_hdr_t *hdr = udata->hdr; /* Fractal heap header */
@@ -1785,7 +1783,7 @@ H5HF_sect_row_merge(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
/* Check arguments. */
HDassert(sect1);
- HDassert(sect1->sect_info.type == H5HF_FSPACE_SECT_FIRST_ROW);
+ HDassert((*sect1)->sect_info.type == H5HF_FSPACE_SECT_FIRST_ROW);
HDassert(sect2);
HDassert(sect2->sect_info.type == H5HF_FSPACE_SECT_FIRST_ROW);
@@ -1802,8 +1800,8 @@ H5HF_sect_row_merge(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
} /* end if */
else {
/* Check to see if we should revive first section */
- if(sect1->sect_info.state != H5FS_SECT_LIVE)
- if(H5HF_sect_row_revive(hdr, dxpl_id, sect1) < 0)
+ if((*sect1)->sect_info.state != H5FS_SECT_LIVE)
+ if(H5HF_sect_row_revive(hdr, dxpl_id, (*sect1)) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't revive single free section")
/* Check to see if we should revive second section */
@@ -1812,7 +1810,7 @@ H5HF_sect_row_merge(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
HGOTO_ERROR(H5E_HEAP, H5E_CANTINIT, FAIL, "can't revive single free section")
/* Merge rows' underlying indirect sections together */
- if(H5HF_sect_indirect_merge_row(hdr, dxpl_id, sect1, sect2) < 0)
+ if(H5HF_sect_indirect_merge_row(hdr, dxpl_id, (*sect1), sect2) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_CANTMERGE, FAIL, "can't merge underlying indirect sections")
} /* end else */
diff --git a/src/H5HFspace.c b/src/H5HFspace.c
index 20057e4..41954fc 100644
--- a/src/H5HFspace.c
+++ b/src/H5HFspace.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HFstat.c b/src/H5HFstat.c
index 303b1f4..e38a9b1 100644
--- a/src/H5HFstat.c
+++ b/src/H5HFstat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5HFtest.c b/src/H5HFtest.c
index 4b97194..1b1f688 100644
--- a/src/H5HFtest.c
+++ b/src/H5HFtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5HFtiny.c b/src/H5HFtiny.c
index 711ceb9..79462e9 100644
--- a/src/H5HFtiny.c
+++ b/src/H5HFtiny.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HG.c b/src/H5HG.c
index e8eb2eb..893be80 100644
--- a/src/H5HG.c
+++ b/src/H5HG.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5HGcache.c b/src/H5HGcache.c
index 6bc9860..50b2669 100644
--- a/src/H5HGcache.c
+++ b/src/H5HGcache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -72,6 +70,10 @@ static herr_t H5HG__cache_heap_serialize(const H5F_t *f, void *image,
size_t len, void *thing);
static herr_t H5HG__cache_heap_free_icr(void *thing);
+/* Prefix deserialization */
+static herr_t H5HG__hdr_deserialize(H5HG_heap_t *heap, const uint8_t *image,
+ const H5F_t *f);
+
/*********************/
/* Package Variables */
@@ -108,6 +110,52 @@ const H5AC_class_t H5AC_GHEAP[1] = {{
/*-------------------------------------------------------------------------
+ * Function: H5HG__hdr_deserialize()
+ *
+ * Purpose: Decode a global heap's header
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * December 15, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HG__hdr_deserialize(H5HG_heap_t *heap, const uint8_t *image, const H5F_t *f)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(heap);
+ HDassert(image);
+ HDassert(f);
+
+ /* Magic number */
+ if(HDmemcmp(image, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad global heap collection signature")
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version */
+ if(H5HG_VERSION != *image++)
+ HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong version number in global heap")
+
+ /* Reserved */
+ image += 3;
+
+ /* Size */
+ H5F_DECODE_LENGTH(f, image, heap->size);
+ HDassert(heap->size >= H5HG_MINSIZE);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HG__hdr_deserialize() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HG__cache_heap_get_initial_load_size()
*
* Purpose: Return the initial speculative read size to the metadata
@@ -154,41 +202,27 @@ H5HG__cache_heap_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *imag
*-------------------------------------------------------------------------
*/
static herr_t
-H5HG__cache_heap_get_final_load_size(const void *_image, size_t image_len,
- void *_udata, size_t *actual_len)
+H5HG__cache_heap_get_final_load_size(const void *image, size_t image_len,
+ void *udata, size_t *actual_len)
{
- const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
- H5F_t *f = (H5F_t *)_udata; /* File pointer -- obtained from user data */
- size_t heap_size = 0; /* Total size of collection */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5HG_heap_t heap; /* Global heap */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
/* Sanity check */
HDassert(image);
- HDassert(f);
+ HDassert(udata);
HDassert(actual_len);
HDassert(*actual_len == image_len);
-
- /* Magic number */
- if(HDmemcmp(image, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad global heap collection signature")
- image += H5_SIZEOF_MAGIC;
-
- /* Version */
- if(H5HG_VERSION != *image++)
- HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong version number in global heap")
-
- /* Reserved */
- image += 3;
-
- /* Size */
- H5F_DECODE_LENGTH(f, image, heap_size);
- HDassert(heap_size >= H5HG_MINSIZE);
HDassert(image_len == H5HG_MINSIZE);
+ /* Deserialize the heap's header */
+ if(H5HG__hdr_deserialize(&heap, (const uint8_t *)image, (const H5F_t *)udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, FAIL, "can't decode global heap prefix")
+
/* Set the final size for the cache image */
- *actual_len = heap_size;
+ *actual_len = heap.size;
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -236,29 +270,13 @@ H5HG__cache_heap_deserialize(const void *_image, size_t len, void *_udata,
if(NULL == (heap->chunk = H5FL_BLK_MALLOC(gheap_chunk, len)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- /* copy the image buffer into the newly allocate chunk */
+ /* Copy the image buffer into the newly allocate chunk */
HDmemcpy(heap->chunk, _image, len);
- image = heap->chunk;
-
- /* Magic number */
- if(HDmemcmp(image, H5HG_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "bad global heap collection signature")
- image += H5_SIZEOF_MAGIC;
+ /* Deserialize the heap's header */
+ if(H5HG__hdr_deserialize(heap, (const uint8_t *)heap->chunk, f) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, NULL, "can't decode global heap header")
- /* Version */
- if(H5HG_VERSION != *image++)
- HGOTO_ERROR(H5E_HEAP, H5E_CANTLOAD, NULL, "wrong version number in global heap")
-
- /* Reserved */
- image += 3;
-
- /* Size */
- H5F_DECODE_LENGTH(f, image, heap->size);
- HDassert(heap->size >= H5HG_MINSIZE);
- HDassert((len == H5HG_MINSIZE) /* first try */ ||
- ((len == heap->size) && (len > H5HG_MINSIZE))); /* second try */
-
/* Decode each object */
image = heap->chunk + H5HG_SIZEOF_HDR(f);
nalloc = H5HG_NOBJS(f, heap->size);
diff --git a/src/H5HGdbg.c b/src/H5HGdbg.c
index 6013de5..6dd94f5 100644
--- a/src/H5HGdbg.c
+++ b/src/H5HGdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5HGmodule.h b/src/H5HGmodule.h
index aa34f29..1c68206 100644
--- a/src/H5HGmodule.h
+++ b/src/H5HGmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5HGpkg.h b/src/H5HGpkg.h
index e566ece..47760bf 100644
--- a/src/H5HGpkg.h
+++ b/src/H5HGpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -40,9 +38,6 @@
/* Package Private Variables */
/*****************************/
-/* The cache subclass */
-H5_DLLVAR const H5AC_class_t H5AC_GHEAP[1];
-
/* Declare extern the free list to manage the H5HG_t struct */
H5FL_EXTERN(H5HG_heap_t);
diff --git a/src/H5HGprivate.h b/src/H5HGprivate.h
index 0c0aa69..19dcaa4 100644
--- a/src/H5HGprivate.h
+++ b/src/H5HGprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5HGpublic.h b/src/H5HGpublic.h
index 01cd60c..fcec593 100644
--- a/src/H5HGpublic.h
+++ b/src/H5HGpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5HGquery.c b/src/H5HGquery.c
index d07edcc..35abc2e 100644
--- a/src/H5HGquery.c
+++ b/src/H5HGquery.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5HL.c b/src/H5HL.c
index 9af1119..fa577c3 100644
--- a/src/H5HL.c
+++ b/src/H5HL.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HLcache.c b/src/H5HLcache.c
index c53292a..926f787 100644
--- a/src/H5HLcache.c
+++ b/src/H5HLcache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -91,6 +89,10 @@ static herr_t H5HL__cache_datablock_serialize(const H5F_t *f, void *image,
static herr_t H5HL__cache_datablock_notify(H5C_notify_action_t action, void *_thing);
static herr_t H5HL__cache_datablock_free_icr(void *thing);
+/* Header deserialization */
+static herr_t H5HL__hdr_deserialize(H5HL_t *heap, const uint8_t *image,
+ H5HL_cache_prfx_ud_t *udata);
+
/* Free list de/serialization */
static herr_t H5HL__fl_deserialize(H5HL_t *heap);
static void H5HL__fl_serialize(const H5HL_t *heap);
@@ -147,6 +149,64 @@ const H5AC_class_t H5AC_LHEAP_DBLK[1] = {{
/*-------------------------------------------------------------------------
+ * Function: H5HL__hdr_deserialize()
+ *
+ * Purpose: Decode a local heap's header
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * December 15, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5HL__hdr_deserialize( H5HL_t *heap, const uint8_t *image,
+ H5HL_cache_prfx_ud_t *udata)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(heap);
+ HDassert(image);
+ HDassert(udata);
+
+ /* Check magic number */
+ if(HDmemcmp(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC))
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad local heap signature")
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version */
+ if(H5HL_VERSION != *image++)
+ HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong version number in local heap")
+
+ /* Reserved */
+ image += 3;
+
+ /* Store the prefix's address & length */
+ heap->prfx_addr = udata->prfx_addr;
+ heap->prfx_size = udata->sizeof_prfx;
+
+ /* Heap data size */
+ H5F_DECODE_LENGTH_LEN(image, heap->dblk_size, udata->sizeof_size);
+
+ /* Free list head */
+ H5F_DECODE_LENGTH_LEN(image, heap->free_block, udata->sizeof_size);
+ if(heap->free_block != H5HL_FREE_NULL && heap->free_block >= heap->dblk_size)
+ HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap free list")
+
+ /* Heap data address */
+ H5F_addr_decode_len(udata->sizeof_addr, &image, &(heap->dblk_addr));
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5HL__hdr_deserialize() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5HL__fl_deserialize
*
* Purpose: Deserialize the free list for a heap data block
@@ -309,7 +369,7 @@ H5HL__cache_prefix_get_final_load_size(const void *_image, size_t image_len,
const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
H5HL_cache_prfx_ud_t *udata = (H5HL_cache_prfx_ud_t *)_udata; /* User data for callback */
H5HL_t heap; /* Local heap */
- htri_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -319,32 +379,9 @@ H5HL__cache_prefix_get_final_load_size(const void *_image, size_t image_len,
HDassert(actual_len);
HDassert(*actual_len == image_len);
- /* Check magic number */
- if(HDmemcmp(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad local heap signature")
- image += H5_SIZEOF_MAGIC;
-
- /* Version */
- if(H5HL_VERSION != *image++)
- HGOTO_ERROR(H5E_HEAP, H5E_VERSION, FAIL, "wrong version number in local heap")
-
- /* Reserved */
- image += 3;
-
- /* Store the prefix's address & length */
- heap.prfx_addr = udata->prfx_addr; /* NEED */
- heap.prfx_size = udata->sizeof_prfx; /* NEED */
-
- /* Heap data size */
- H5F_DECODE_LENGTH_LEN(image, heap.dblk_size, udata->sizeof_size); /* NEED */
-
- /* Free list head */
- H5F_DECODE_LENGTH_LEN(image, heap.free_block, udata->sizeof_size);
- if(heap.free_block != H5HL_FREE_NULL && heap.free_block >= heap.dblk_size)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, FAIL, "bad heap free list");
-
- /* Heap data address */
- H5F_addr_decode_len(udata->sizeof_addr, &image, &(heap.dblk_addr)); /* NEED */
+ /* Deserialize the heap's header */
+ if(H5HL__hdr_deserialize(&heap, (const uint8_t *)image, udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, FAIL, "can't decode local heap header")
/* Set the final size for the cache image */
*actual_len = heap.prfx_size;
@@ -398,41 +435,18 @@ H5HL__cache_prefix_deserialize(const void *_image, size_t len, void *_udata,
HDassert(H5F_addr_defined(udata->prfx_addr));
HDassert(dirty);
- /* Check magic number */
- if(HDmemcmp(image, H5HL_MAGIC, (size_t)H5_SIZEOF_MAGIC))
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad local heap signature")
- image += H5_SIZEOF_MAGIC;
-
- /* Version */
- if(H5HL_VERSION != *image++)
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "wrong version number in local heap")
-
- /* Reserved */
- image += 3;
-
/* Allocate space in memory for the heap */
if(NULL == (heap = H5HL__new(udata->sizeof_size, udata->sizeof_addr, udata->sizeof_prfx)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap structure");
+ /* Deserialize the heap's header */
+ if(H5HL__hdr_deserialize(heap, (const uint8_t *)image, udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_CANTDECODE, NULL, "can't decode local heap header")
+
/* Allocate the heap prefix */
if(NULL == (prfx = H5HL__prfx_new(heap)))
HGOTO_ERROR(H5E_HEAP, H5E_CANTALLOC, NULL, "can't allocate local heap prefix");
- /* Store the prefix's address & length */
- heap->prfx_addr = udata->prfx_addr;
- heap->prfx_size = udata->sizeof_prfx;
-
- /* Heap data size */
- H5F_DECODE_LENGTH_LEN(image, heap->dblk_size, udata->sizeof_size);
-
- /* Free list head */
- H5F_DECODE_LENGTH_LEN(image, heap->free_block, udata->sizeof_size);
- if((heap->free_block != H5HL_FREE_NULL) && (heap->free_block >= heap->dblk_size))
- HGOTO_ERROR(H5E_HEAP, H5E_BADVALUE, NULL, "bad heap free list")
-
- /* Heap data address */
- H5F_addr_decode_len(udata->sizeof_addr, &image, &(heap->dblk_addr));
-
/* Check if heap block exists */
if(heap->dblk_size) {
/* Check if heap data block is contiguous with header */
@@ -888,6 +902,8 @@ H5HL__cache_datablock_notify(H5C_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_ENTRY_CLEANED:
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
diff --git a/src/H5HLdbg.c b/src/H5HLdbg.c
index fc8f5d3..3533a39 100644
--- a/src/H5HLdbg.c
+++ b/src/H5HLdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
diff --git a/src/H5HLdblk.c b/src/H5HLdblk.c
index f90562b..684d7f9 100644
--- a/src/H5HLdblk.c
+++ b/src/H5HLdblk.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HLint.c b/src/H5HLint.c
index 5b547cf..e625f3d 100644
--- a/src/H5HLint.c
+++ b/src/H5HLint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HLmodule.h b/src/H5HLmodule.h
index b38c077..b0fd750 100644
--- a/src/H5HLmodule.h
+++ b/src/H5HLmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5HLpkg.h b/src/H5HLpkg.h
index 7075b2a..770b7c0 100644
--- a/src/H5HLpkg.h
+++ b/src/H5HLpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -39,12 +37,6 @@
/* Package Private Variables */
/*****************************/
-/* The local heap prefix cache subclass */
-H5_DLLVAR const H5AC_class_t H5AC_LHEAP_PRFX[1];
-
-/* The local heap data block cache subclass */
-H5_DLLVAR const H5AC_class_t H5AC_LHEAP_DBLK[1];
-
/* Declare extern the free list to manage the H5HL_free_t struct */
H5FL_EXTERN(H5HL_free_t);
diff --git a/src/H5HLprfx.c b/src/H5HLprfx.c
index ed1c4db..41c254a 100644
--- a/src/H5HLprfx.c
+++ b/src/H5HLprfx.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HLprivate.h b/src/H5HLprivate.h
index e2bf29c..054d396 100644
--- a/src/H5HLprivate.h
+++ b/src/H5HLprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HLpublic.h b/src/H5HLpublic.h
index 6dc1828..143bb78 100644
--- a/src/H5HLpublic.h
+++ b/src/H5HLpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5HP.c b/src/H5HP.c
index 78e9e5b..4ef5662 100644
--- a/src/H5HP.c
+++ b/src/H5HP.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5HPprivate.h b/src/H5HPprivate.h
index 8db5b21..041c2b9 100644
--- a/src/H5HPprivate.h
+++ b/src/H5HPprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5I.c b/src/H5I.c
index 0fe9782..ce4ecdc 100644
--- a/src/H5I.c
+++ b/src/H5I.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Imodule.h b/src/H5Imodule.h
index 19cc9a4..60bda5a 100644
--- a/src/H5Imodule.h
+++ b/src/H5Imodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Ipkg.h b/src/H5Ipkg.h
index 0a6ae8c..16d7d67 100644
--- a/src/H5Ipkg.h
+++ b/src/H5Ipkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Iprivate.h b/src/H5Iprivate.h
index c916f31..25cea4f 100644
--- a/src/H5Iprivate.h
+++ b/src/H5Iprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-----------------------------------------------------------------------------
diff --git a/src/H5Ipublic.h b/src/H5Ipublic.h
index 3bf3c66..896f82f 100644
--- a/src/H5Ipublic.h
+++ b/src/H5Ipublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Itest.c b/src/H5Itest.c
index 506ca87..e7bd2a7 100644
--- a/src/H5Itest.c
+++ b/src/H5Itest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgoup.org>
diff --git a/src/H5L.c b/src/H5L.c
index b5c6240..469a86c 100644
--- a/src/H5L.c
+++ b/src/H5L.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -2351,9 +2349,11 @@ H5L_delete_cb(H5G_loc_t *grp_loc/*in*/, const char *name, const H5O_link_t *lnk,
if(name == NULL)
HGOTO_ERROR(H5E_SYM, H5E_NOTFOUND, FAIL, "name doesn't exist")
- /* Check for removing '.' */
+ /* Check for non-existent (NULL) link.
+ * Note that this can also occur when attempting to remove '.'
+ */
if(lnk == NULL)
- HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "can't delete self")
+ HGOTO_ERROR(H5E_SYM, H5E_CANTDELETE, FAIL, "callback link pointer is NULL (specified link may be '.' or not exist)")
/* Remove the link from the group */
if(H5G_obj_remove(grp_loc->oloc, grp_loc->path->full_path_r, name, udata->dxpl_id) < 0)
diff --git a/src/H5Lexternal.c b/src/H5Lexternal.c
index 7f59e50..b4a173f 100644
--- a/src/H5Lexternal.c
+++ b/src/H5Lexternal.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5Lmodule.h b/src/H5Lmodule.h
index e665c81..cba4de4 100644
--- a/src/H5Lmodule.h
+++ b/src/H5Lmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Lpkg.h b/src/H5Lpkg.h
index bb8534b..39e3197 100644
--- a/src/H5Lpkg.h
+++ b/src/H5Lpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Lprivate.h b/src/H5Lprivate.h
index 1c8690b..dd243a6 100644
--- a/src/H5Lprivate.h
+++ b/src/H5Lprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Lpublic.h b/src/H5Lpublic.h
index ff2322f..9d1643e 100644
--- a/src/H5Lpublic.h
+++ b/src/H5Lpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5MF.c b/src/H5MF.c
index 024cf19..e54d809 100644
--- a/src/H5MF.c
+++ b/src/H5MF.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -29,6 +27,7 @@
/****************/
#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5FS_FRIEND /*suppress error about including H5Fpkg */
#include "H5MFmodule.h" /* This source code file is part of the H5MF module */
@@ -38,6 +37,7 @@
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5Fpkg.h" /* File access */
+#include "H5FSpkg.h" /* File access */
#include "H5Iprivate.h" /* IDs */
#include "H5MFpkg.h" /* File memory management */
#include "H5VMprivate.h" /* Vectors and arrays */
@@ -50,11 +50,6 @@
#define H5MF_FSPACE_SHRINK 80 /* Percent of "normal" size to shrink serialized free space size */
#define H5MF_FSPACE_EXPAND 120 /* Percent of "normal" size to expand serialized free space size */
-/* Map an allocation request type to a free list */
-#define H5MF_ALLOC_TO_FS_TYPE(F, T) ((H5FD_MEM_DEFAULT == (F)->shared->fs_type_map[T]) \
- ? (T) : (F)->shared->fs_type_map[T])
-
-
/******************/
/* Local Typedefs */
/******************/
@@ -84,9 +79,23 @@ typedef struct {
/********************/
/* Allocator routines */
-static herr_t H5MF_alloc_create(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type);
-static herr_t H5MF__alloc_close(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type);
-static herr_t H5MF__close_delete(H5F_t *f, hid_t dxpl_id);
+static haddr_t H5MF__alloc_pagefs(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, hsize_t size);
+
+/* "File closing" routines */
+static herr_t H5MF__close_aggrfs(H5F_t *f, hid_t dxpl_id);
+static herr_t H5MF__close_pagefs(H5F_t *f, hid_t dxpl_id);
+static herr_t H5MF__close_shrink_eoa(H5F_t *f, hid_t dxpl_id);
+
+/* General routines */
+static herr_t H5MF__get_free_sects(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, H5MF_sect_iter_ud_t *sect_udata, size_t *nums);
+static hbool_t H5MF__fsm_type_is_self_referential(H5F_t *f, H5F_mem_page_t fsm_type);
+static hbool_t H5MF__fsm_is_self_referential(H5F_t *f, H5FS_t *fspace);
+
+/* Free-space type manager routines */
+static herr_t H5MF__create_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type);
+static herr_t H5MF__close_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type);
+static herr_t H5MF__delete_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type);
+static herr_t H5MF__close_delete_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type);
/*********************/
@@ -222,10 +231,54 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5MF_alloc_open
+ * Function: H5MF_alloc_to_fs_type
+ *
+ * Purpose: Map "alloc_type" to the free-space manager type
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Nov 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+H5MF_alloc_to_fs_type(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size, H5F_mem_page_t *fs_type)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(f);
+ HDassert(fs_type);
+
+ if(H5F_PAGED_AGGR(f)) { /* paged aggregation */
+ if(size >= f->shared->fs_page_size) {
+ if(H5F_HAS_FEATURE(f, H5FD_FEAT_PAGED_AGGR)) { /* multi or split driver */
+ /* For non-contiguous address space, map to large size free-space manager for each alloc_type */
+ if(H5FD_MEM_DEFAULT == f->shared->fs_type_map[alloc_type])
+ *fs_type = (H5F_mem_page_t) (alloc_type + (H5FD_MEM_NTYPES - 1));
+ else
+ *fs_type = (H5F_mem_page_t) (f->shared->fs_type_map[alloc_type] + (H5FD_MEM_NTYPES - 1));
+ } /* end if */
+ else
+ /* For contiguous address space, map to generic large size free-space manager */
+ *fs_type = H5F_MEM_PAGE_GENERIC; /* H5F_MEM_PAGE_SUPER */
+ } /* end if */
+ else
+ *fs_type = (H5F_mem_page_t)H5MF_ALLOC_TO_FS_AGGR_TYPE(f, alloc_type);
+ } /* end if */
+ else /* non-paged aggregation */
+ *fs_type = (H5F_mem_page_t)H5MF_ALLOC_TO_FS_AGGR_TYPE(f, alloc_type);
+
+ FUNC_LEAVE_NOAPI_VOID
+} /* end H5MF_alloc_to_fs_type() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_open_fstype
*
* Purpose: Open an existing free space manager of TYPE for file by
- * creating a free-space structure
+ * creating a free-space structure.
+ * Note that TYPE can be H5F_mem_page_t or H5FD_mem_t enum types.
*
* Return: Success: non-negative
* Failure: negative
@@ -237,33 +290,59 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5MF_alloc_open(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
+H5MF_open_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type)
{
const H5FS_section_class_t *classes[] = { /* Free space section classes implemented for file */
- H5MF_FSPACE_SECT_CLS_SIMPLE};
- H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
- H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5MF_FSPACE_SECT_CLS_SIMPLE,
+ H5MF_FSPACE_SECT_CLS_SMALL,
+ H5MF_FSPACE_SECT_CLS_LARGE };
+ hsize_t alignment; /* Alignment to use */
+ hsize_t threshold; /* Threshold to use */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* ring of fsm */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
/*
* Check arguments.
*/
HDassert(f);
+ if(H5F_PAGED_AGGR(f))
+ HDassert(type < H5F_MEM_PAGE_NTYPES);
+ else {
+ HDassert((H5FD_mem_t)type < H5FD_MEM_NTYPES);
+ HDassert((H5FD_mem_t)type != H5FD_MEM_NOLIST);
+ } /* end else */
HDassert(f->shared);
- HDassert(type != H5FD_MEM_NOLIST);
HDassert(H5F_addr_defined(f->shared->fs_addr[type]));
HDassert(f->shared->fs_state[type] == H5F_FS_STATE_CLOSED);
+ /* Set up the aligment and threshold to use depending on the manager type */
+ if(H5F_PAGED_AGGR(f)) {
+ alignment = (type == H5F_MEM_PAGE_GENERIC) ? f->shared->fs_page_size : (hsize_t)H5F_ALIGN_DEF;
+ threshold = H5F_ALIGN_THRHD_DEF;
+ } /* end if */
+ else {
+ alignment = f->shared->alignment;
+ threshold = f->shared->threshold;
+ } /* end else */
+
/* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ if(H5MF__fsm_type_is_self_referential(f, type))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
/* Open an existing free space structure for the file */
if(NULL == (f->shared->fs_man[type] = H5FS_open(f, dxpl_id, f->shared->fs_addr[type],
- NELMTS(classes), classes, f, f->shared->alignment, f->shared->threshold)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space info")
+ NELMTS(classes), classes, f, alignment, threshold)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space info")
/* Set the state for the free space manager to "open", if it is now */
if(f->shared->fs_man[type])
@@ -271,18 +350,20 @@ H5MF_alloc_open(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
done:
/* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5MF_alloc_open() */
+} /* end H5MF_open_fstype() */
/*-------------------------------------------------------------------------
- * Function: H5MF_alloc_create
+ * Function: H5MF__create_fstype
*
* Purpose: Create free space manager of TYPE for the file by creating
- * a free-space structure
+ * a free-space structure
+ * Note that TYPE can be H5F_mem_page_t or H5FD_mem_t enum types.
*
* Return: Success: non-negative
* Failure: negative
@@ -294,21 +375,34 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5MF_alloc_create(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
+H5MF__create_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type)
{
const H5FS_section_class_t *classes[] = { /* Free space section classes implemented for file */
- H5MF_FSPACE_SECT_CLS_SIMPLE};
- herr_t ret_value = SUCCEED; /* Return value */
- H5FS_create_t fs_create; /* Free space creation parameters */
+ H5MF_FSPACE_SECT_CLS_SIMPLE,
+ H5MF_FSPACE_SECT_CLS_SMALL,
+ H5MF_FSPACE_SECT_CLS_LARGE };
+ H5FS_create_t fs_create; /* Free space creation parameters */
+ hsize_t alignment; /* Alignment to use */
+ hsize_t threshold; /* Threshold to use */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* ring of fsm */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+ FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
/*
* Check arguments.
*/
HDassert(f);
+ if(H5F_PAGED_AGGR(f))
+ HDassert(type < H5F_MEM_PAGE_NTYPES);
+ else {
+ HDassert((H5FD_mem_t)type < H5FD_MEM_NTYPES);
+ HDassert((H5FD_mem_t)type != H5FD_MEM_NOLIST);
+ } /* end else */
HDassert(f->shared);
- HDassert(type != H5FD_MEM_NOLIST);
HDassert(!H5F_addr_defined(f->shared->fs_addr[type]));
HDassert(f->shared->fs_state[type] == H5F_FS_STATE_CLOSED);
@@ -319,24 +413,48 @@ H5MF_alloc_create(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
fs_create.max_sect_addr = 1 + H5VM_log2_gen((uint64_t)f->shared->maxaddr);
fs_create.max_sect_size = f->shared->maxaddr;
- if(NULL == (f->shared->fs_man[type] = H5FS_create(f, dxpl_id, NULL,
- &fs_create, NELMTS(classes), classes, f, f->shared->alignment, f->shared->threshold)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space info")
+ /* Set up alignment and threshold to use depending on TYPE */
+ if(H5F_PAGED_AGGR(f)) {
+ alignment = (type == H5F_MEM_PAGE_GENERIC) ? f->shared->fs_page_size : (hsize_t)H5F_ALIGN_DEF;
+ threshold = H5F_ALIGN_THRHD_DEF;
+ } /* end if */
+ else {
+ alignment = f->shared->alignment;
+ threshold = f->shared->threshold;
+ } /* end else */
+ /* Set the ring type in the DXPL */
+ if(H5MF__fsm_type_is_self_referential(f, type))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+
+ if(NULL == (f->shared->fs_man[type] = H5FS_create(f, dxpl_id, NULL,
+ &fs_create, NELMTS(classes), classes, f, alignment, threshold)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space info")
/* Set the state for the free space manager to "open", if it is now */
if(f->shared->fs_man[type])
f->shared->fs_state[type] = H5F_FS_STATE_OPEN;
done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5MF_alloc_create() */
+} /* end H5MF__create_fstype() */
/*-------------------------------------------------------------------------
- * Function: H5MF_alloc_start
+ * Function: H5MF_start_fstype
*
- * Purpose: Open or create a free space manager of a given type
+ * Purpose: Open or create a free space manager of a given TYPE.
+ * Note that TYPE can be H5F_mem_page_t or H5FD_mem_t enum types.
*
* Return: Success: non-negative
* Failure: negative
@@ -348,40 +466,123 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5MF_alloc_start(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
+H5MF_start_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
/*
* Check arguments.
*/
HDassert(f);
HDassert(f->shared);
- HDassert(type != H5FD_MEM_NOLIST);
+ if(H5F_PAGED_AGGR(f))
+ HDassert(type < H5F_MEM_PAGE_NTYPES);
+ else {
+ HDassert((H5FD_mem_t)type < H5FD_MEM_NTYPES);
+ HDassert((H5FD_mem_t)type != H5FD_MEM_NOLIST);
+ } /* end else */
/* Check if the free space manager exists already */
if(H5F_addr_defined(f->shared->fs_addr[type])) {
/* Open existing free space manager */
- if(H5MF_alloc_open(f, dxpl_id, type) < 0)
+ if(H5MF_open_fstype(f, dxpl_id, type) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTOPENOBJ, FAIL, "can't initialize file free space")
} /* end if */
else {
/* Create new free space manager */
- if(H5MF_alloc_create(f, dxpl_id, type) < 0)
+ if(H5MF__create_fstype(f, dxpl_id, type) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTCREATE, FAIL, "can't initialize file free space")
} /* end else */
done:
- FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5MF_alloc_start() */
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5MF_start_fstype() */
/*-------------------------------------------------------------------------
- * Function: H5MF__alloc_close
+ * Function: H5MF__delete_fstype
*
- * Purpose: Close an existing free space manager of TYPE for file
+ * Purpose: Delete the free-space manager as specified by TYPE.
+ * Note that TYPE can be H5F_mem_page_t or H5FD_mem_t enum types.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; April 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5MF__delete_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type)
+{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* ring of fsm */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ haddr_t tmp_fs_addr; /* Temporary holder for free space manager address */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+
+ /* check args */
+ HDassert(f);
+ if(H5F_PAGED_AGGR(f))
+ HDassert(type < H5F_MEM_PAGE_NTYPES);
+ else
+ HDassert((H5FD_mem_t)type < H5FD_MEM_NTYPES);
+ HDassert(H5F_addr_defined(f->shared->fs_addr[type]));
+
+ /* Put address into temporary variable and reset it */
+ /* (Avoids loopback in file space freeing routine) */
+ tmp_fs_addr = f->shared->fs_addr[type];
+ f->shared->fs_addr[type] = HADDR_UNDEF;
+
+ /* Shift to "deleting" state, to make certain we don't track any
+ * file space freed as a result of deleting the free space manager.
+ */
+ f->shared->fs_state[type] = H5F_FS_STATE_DELETING;
+
+ /* Set the ring type in the DXPL */
+ if(H5MF__fsm_type_is_self_referential(f, type))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Before deleting free space manager\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
+ /* Delete free space manager for this type */
+ if(H5FS_delete(f, dxpl_id, tmp_fs_addr) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't delete free space manager")
+
+ /* Shift [back] to closed state */
+ HDassert(f->shared->fs_state[type] == H5F_FS_STATE_DELETING);
+ f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
+
+ /* Sanity check that the free space manager for this type wasn't started up again */
+ HDassert(!H5F_addr_defined(f->shared->fs_addr[type]));
+
+done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5MF__delete_fstype() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF__close_fstype
+ *
+ * Purpose: Close the free space manager of TYPE for file
+ * Note that TYPE can be H5F_mem_page_t or H5FD_mem_t enum types.
*
* Return: Success: non-negative
* Failure: negative
@@ -391,7 +592,7 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5MF__alloc_close(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
+H5MF__close_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type)
{
herr_t ret_value = SUCCEED; /* Return value */
@@ -401,20 +602,178 @@ H5MF__alloc_close(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type)
* Check arguments.
*/
HDassert(f);
+ if(H5F_PAGED_AGGR(f))
+ HDassert(type < H5F_MEM_PAGE_NTYPES);
+ else
+ HDassert((H5FD_mem_t)type < H5FD_MEM_NTYPES);
HDassert(f->shared);
- HDassert(type != H5FD_MEM_NOLIST);
HDassert(f->shared->fs_man[type]);
HDassert(f->shared->fs_state[type] != H5F_FS_STATE_CLOSED);
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Before closing free space manager\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
/* Close an existing free space structure for the file */
if(H5FS_close(f, dxpl_id, f->shared->fs_man[type]) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't release free space info")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't release free space info")
f->shared->fs_man[type] = NULL;
f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
done:
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5MF__alloc_close() */
+} /* end H5MF__close_fstype() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_add_sect
+ *
+ * Purpose: To add a section to the specified free-space manager.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; April 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5MF_add_sect(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, H5FS_t *fspace, H5MF_free_section_t *node)
+{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* ring of fsm */
+ H5MF_sect_ud_t udata; /* User data for callback */
+ H5F_mem_page_t fs_type; /* Free space type (mapped from allocation type) */
+
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+
+ HDassert(f);
+ HDassert(fspace);
+ HDassert(node);
+
+ H5MF_alloc_to_fs_type(f, alloc_type, node->sect_info.size, &fs_type);
+
+ /* Construct user data for callbacks */
+ udata.f = f;
+ udata.dxpl_id = dxpl_id;
+ udata.alloc_type = alloc_type;
+ udata.allow_sect_absorb = TRUE;
+ udata.allow_eoa_shrink_only = FALSE;
+
+ /* Set the ring type in the DXPL */
+ if(H5MF__fsm_is_self_referential(f, fspace))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: adding node, node->sect_info.addr = %a, node->sect_info.size = %Hu\n", FUNC, node->sect_info.addr, node->sect_info.size);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+ /* Add the section */
+ if(H5FS_sect_add(f, dxpl_id, fspace, (H5FS_section_info_t *)node, H5FS_ADD_RETURNED_SPACE, &udata) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't re-add section to file free space")
+
+done:
+ /* Reset the ring in the DXPL */
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5MF_add_sect() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_find_sect
+ *
+ * Purpose: To find a section from the specified free-space manager to fulfill the request.
+ * If found, re-add the left-over space back to the manager.
+ *
+ * Return: TRUE if a section is found to fulfill the request
+ * FALSE if not
+ *
+ * Programmer: Vailin Choi; April 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+htri_t
+H5MF_find_sect(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, hsize_t size, H5FS_t *fspace, haddr_t *addr)
+{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* ring of fsm */
+ H5MF_free_section_t *node; /* Free space section pointer */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ htri_t ret_value = FAIL; /* Whether an existing free list node was found */
+
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+
+ HDassert(f);
+ HDassert(fspace);
+
+ /* Set the ring type in the DXPL */
+ if(H5MF__fsm_is_self_referential(f, fspace))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+
+ /* Try to get a section from the free space manager */
+ if((ret_value = H5FS_sect_find(f, dxpl_id, fspace, size, (H5FS_section_info_t **)&node)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "error locating free space in file")
+
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: section found = %t\n", FUNC, ret_value);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
+ /* Check for actually finding section */
+ if(ret_value) {
+ /* Sanity check */
+ HDassert(node);
+
+ /* Retrieve return value */
+ if(addr)
+ *addr = node->sect_info.addr;
+
+ /* Check for eliminating the section */
+ if(node->sect_info.size == size) {
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: freeing node\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
+ /* Free section node */
+ if(H5MF_sect_free((H5FS_section_info_t *)node) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free simple section node")
+ } /* end if */
+ else {
+ /* Adjust information for section */
+ node->sect_info.addr += size;
+ node->sect_info.size -= size;
+
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: re-adding node, node->sect_info.size = %Hu\n", FUNC, node->sect_info.size);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
+ /* Re-add the section to the free-space manager */
+ if(H5MF_add_sect(f, alloc_type, dxpl_id, fspace, node) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't re-add section to file free space")
+ } /* end else */
+ } /* end if */
+
+done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5MF_find_sect() */
/*-------------------------------------------------------------------------
@@ -438,8 +797,10 @@ haddr_t
H5MF_alloc(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, hsize_t size)
{
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* free space manager ring */
H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- H5FD_mem_t fs_type; /* Free space type (mapped from allocation type) */
+ H5F_mem_page_t fs_type; /* Free space type (mapped from allocation type) */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
haddr_t ret_value = HADDR_UNDEF; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, HADDR_UNDEF)
@@ -453,89 +814,211 @@ HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_typ
HDassert(f->shared->lf);
HDassert(size > 0);
- /* Get free space type from allocation type */
- fs_type = H5MF_ALLOC_TO_FS_TYPE(f, alloc_type);
+ if(f->shared->first_alloc_dealloc) {
+ HDassert(! H5AC_cache_image_pending(f));
+ if(H5MF_tidy_self_referential_fsm_hack(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, HADDR_UNDEF, "tidy of self referential fsm hack failed")
+ } /* end if */
+
+ H5MF_alloc_to_fs_type(f, alloc_type, size, &fs_type);
+
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Check 1.0\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ if(H5MF__fsm_type_is_self_referential(f, fs_type))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, HADDR_UNDEF, "unable to set ring value")
+ reset_ring = TRUE;
/* Check if we are using the free space manager for this file */
if(H5F_HAVE_FREE_SPACE_MANAGER(f)) {
+ /* We are about to change the contents of the free space manager --
+ * notify metadata cache that the associated fsm ring is
+ * unsettled
+ */
+ if(H5AC_unsettle_ring(f, fsm_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_SYSTEM, HADDR_UNDEF, "attempt to notify cache that ring is unsettled failed")
+
/* Check if the free space manager for the file has been initialized */
- if(!f->shared->fs_man[fs_type] && H5F_addr_defined(f->shared->fs_addr[fs_type]))
- if(H5MF_alloc_open(f, dxpl_id, fs_type) < 0)
+ if(!f->shared->fs_man[fs_type] && H5F_addr_defined(f->shared->fs_addr[fs_type])) {
+ /* Open the free-space manager */
+ if(H5MF_open_fstype(f, dxpl_id, fs_type) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTOPENOBJ, HADDR_UNDEF, "can't initialize file free space")
+ HDassert(f->shared->fs_man[fs_type]);
+ } /* end if */
/* Search for large enough space in the free space manager */
- if(f->shared->fs_man[fs_type]) {
- H5MF_free_section_t *node; /* Free space section pointer */
- htri_t node_found = FALSE; /* Whether an existing free list node was found */
+ if(f->shared->fs_man[fs_type])
+ if(H5MF_find_sect(f, alloc_type, dxpl_id, size, f->shared->fs_man[fs_type], &ret_value) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "error locating a node")
+ } /* end if */
- /* Try to get a section from the free space manager */
- if((node_found = H5FS_sect_find(f, dxpl_id, f->shared->fs_man[fs_type], size, (H5FS_section_info_t **)&node)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "error locating free space in file")
+ /* If no space is found from the free-space manager, continue further action */
+ if(!H5F_addr_defined(ret_value)) {
#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: Check 1.5, node_found = %t\n", FUNC, node_found);
+HDfprintf(stderr, "%s: Check 2.0\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+ if(f->shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE) {
+ HDassert(f->shared->fs_page_size >= H5F_FILE_SPACE_PAGE_SIZE_MIN);
+ if(HADDR_UNDEF == (ret_value = H5MF__alloc_pagefs(f, alloc_type, dxpl_id, size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed from paged aggregation")
+ } /* end if */
+ else { /* For non-paged aggregation, continue further action */
+ if(HADDR_UNDEF == (ret_value = H5MF_aggr_vfd_alloc(f, alloc_type, dxpl_id, size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed from aggr/vfd")
+ } /* end else */
+ } /* end if */
+ HDassert(H5F_addr_defined(ret_value));
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Check 3.0\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG_MORE */
- /* Check for actually finding section */
- if(node_found) {
- /* Sanity check */
- HDassert(node);
+done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, HADDR_UNDEF, "unable to set property value")
- /* Retrieve return value */
- ret_value = node->sect_info.addr;
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: Leaving: ret_value = %a, size = %Hu\n", FUNC, ret_value, size);
+#endif /* H5MF_ALLOC_DEBUG */
+#ifdef H5MF_ALLOC_DEBUG_DUMP
+H5MF_sects_dump(f, dxpl_id, stderr);
+#endif /* H5MF_ALLOC_DEBUG_DUMP */
- /* Check for eliminating the section */
- if(node->sect_info.size == size) {
-#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: Check 1.6, freeing node\n", FUNC);
-#endif /* H5MF_ALLOC_DEBUG_MORE */
- /* Free section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, HADDR_UNDEF, "can't free simple section node")
- } /* end if */
- else {
- H5MF_sect_ud_t udata; /* User data for callback */
+ FUNC_LEAVE_NOAPI_TAG(ret_value, HADDR_UNDEF)
+} /* end H5MF_alloc() */
- /* Adjust information for section */
- node->sect_info.addr += size;
- node->sect_info.size -= size;
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF__alloc_pagefs
+ *
+ * Purpose: Allocate space from either the large or small free-space manager.
+ * For "large" request:
+ * Allocate request from VFD
+ * Determine mis-aligned fragment and return the fragment to the
+ * appropriate manager
+ * For "small" request:
+ * Allocate a page from the large manager
+ * Determine whether space is available from a mis-aligned fragment
+ * being returned to the manager
+ * Return left-over space to the manager after fulfilling request
+ *
+ * Return: Success: The file address of new chunk.
+ * Failure: HADDR_UNDEF
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static haddr_t
+H5MF__alloc_pagefs(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, hsize_t size)
+{
+ H5F_mem_page_t ptype; /* Free-space mananger type */
+ H5MF_free_section_t *node = NULL; /* Free space section pointer */
+ haddr_t ret_value = HADDR_UNDEF; /* Return value */
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = dxpl_id;
- udata.alloc_type = alloc_type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, HADDR_UNDEF)
-#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: Check 1.7, re-adding node, node->sect_info.size = %Hu\n", FUNC, node->sect_info.size);
-#endif /* H5MF_ALLOC_DEBUG_MORE */
- /* Re-insert section node into file's free space */
- if(H5FS_sect_add(f, dxpl_id, f->shared->fs_man[fs_type], (H5FS_section_info_t *)node, H5FS_ADD_RETURNED_SPACE, &udata) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, HADDR_UNDEF, "can't re-add section to file free space")
- } /* end else */
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_type, size);
+#endif /* H5MF_ALLOC_DEBUG */
+
+ H5MF_alloc_to_fs_type(f, alloc_type, size, &ptype);
+
+ switch(ptype) {
+ case H5F_MEM_PAGE_GENERIC:
+ case H5F_MEM_PAGE_LARGE_BTREE:
+ case H5F_MEM_PAGE_LARGE_DRAW:
+ case H5F_MEM_PAGE_LARGE_GHEAP:
+ case H5F_MEM_PAGE_LARGE_LHEAP:
+ case H5F_MEM_PAGE_LARGE_OHDR:
+ {
+ haddr_t eoa; /* EOA for the file */
+ hsize_t frag_size = 0; /* Fragment size */
+
+ /* Get the EOA for the file */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, alloc_type)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, HADDR_UNDEF, "Unable to get eoa")
+ HDassert(!(eoa % f->shared->fs_page_size));
+
+ H5MF_EOA_MISALIGN(f, (eoa+size), f->shared->fs_page_size, frag_size);
+
+ /* Allocate from VFD */
+ if(HADDR_UNDEF == (ret_value = H5F_alloc(f, dxpl_id, alloc_type, size + frag_size, NULL, NULL)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate file space")
- /* Leave now */
- HGOTO_DONE(ret_value)
+ /* If there is a mis-aligned fragment at EOA */
+ if(frag_size) {
+
+ /* Start up the free-space manager */
+ if(!(f->shared->fs_man[ptype]))
+ if(H5MF_start_fstype(f, dxpl_id, ptype) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, HADDR_UNDEF, "can't initialize file free space")
+
+ /* Create free space section for the fragment */
+ if(NULL == (node = H5MF_sect_new(H5MF_FSPACE_SECT_LARGE, ret_value + size, frag_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, HADDR_UNDEF, "can't initialize free space section")
+
+ /* Add the fragment to the large free-space manager */
+ if(H5MF_add_sect(f, alloc_type, dxpl_id, f->shared->fs_man[ptype], node) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, HADDR_UNDEF, "can't re-add section to file free space")
+
+ node = NULL;
} /* end if */
- } /* end if */
-#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: Check 2.0\n", FUNC);
-#endif /* H5MF_ALLOC_DEBUG_MORE */
- } /* end if */
+ }
+ break;
+
+ case H5F_MEM_PAGE_META:
+ case H5F_MEM_PAGE_DRAW:
+ case H5F_MEM_PAGE_BTREE:
+ case H5F_MEM_PAGE_GHEAP:
+ case H5F_MEM_PAGE_LHEAP:
+ case H5F_MEM_PAGE_OHDR:
+ {
+ haddr_t new_page; /* The address for the new file size page */
+
+ /* Allocate one file space page */
+ if(HADDR_UNDEF == (new_page = H5MF_alloc(f, alloc_type, dxpl_id, f->shared->fs_page_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate file space")
+
+ /* Start up the free-space manager */
+ if(!(f->shared->fs_man[ptype]))
+ if(H5MF_start_fstype(f, dxpl_id, ptype) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, HADDR_UNDEF, "can't initialize file free space")
+ HDassert(f->shared->fs_man[ptype]);
+
+ if(NULL == (node = H5MF_sect_new(H5MF_FSPACE_SECT_SMALL, (new_page + size), (f->shared->fs_page_size - size))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, HADDR_UNDEF, "can't initialize free space section")
+
+ /* Add the remaining space in the page to the manager */
+ if(H5MF_add_sect(f, alloc_type, dxpl_id, f->shared->fs_man[ptype], node) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, HADDR_UNDEF, "can't re-add section to file free space")
- /* Allocate from the metadata aggregator (or the VFD) */
- if(HADDR_UNDEF == (ret_value = H5MF_aggr_vfd_alloc(f, alloc_type, dxpl_id, size)))
- HGOTO_ERROR(H5E_VFL, H5E_CANTALLOC, HADDR_UNDEF, "allocation failed from aggr/vfd")
+ node = NULL;
-done:
- /* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, HADDR_UNDEF, "unable to set property value")
+ /* Insert the new page into the Page Buffer list of new pages so
+ we don't read an empty page from disk */
+ if(f->shared->page_buf != NULL && H5PB_add_new_page(f, alloc_type, new_page) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, HADDR_UNDEF, "can't add new page to Page Buffer new page list")
+
+ ret_value = new_page;
+ }
+ break;
+
+ case H5F_MEM_PAGE_NTYPES:
+ case H5F_MEM_PAGE_DEFAULT:
+ default:
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate file space: unrecognized type")
+ break;
+ } /* end switch */
+done:
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Leaving: ret_value = %a, size = %Hu\n", FUNC, ret_value, size);
#endif /* H5MF_ALLOC_DEBUG */
@@ -543,8 +1026,13 @@ HDfprintf(stderr, "%s: Leaving: ret_value = %a, size = %Hu\n", FUNC, ret_value,
H5MF_sects_dump(f, dxpl_id, stderr);
#endif /* H5MF_ALLOC_DEBUG_DUMP */
+ /* Release section node, if allocated and not added to section list or merged */
+ if(node)
+ if(H5MF_sect_free((H5FS_section_info_t *)node) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, HADDR_UNDEF, "can't free section node")
+
FUNC_LEAVE_NOAPI_TAG(ret_value, HADDR_UNDEF)
-} /* end H5MF_alloc() */
+} /* end H5MF__alloc_pagefs() */
/*-------------------------------------------------------------------------
@@ -622,15 +1110,17 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5MF_xfree(const H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, haddr_t addr,
+H5MF_xfree(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, haddr_t addr,
hsize_t size)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
+ H5F_mem_page_t fs_type; /* Free space type (mapped from allocation type) */
H5MF_free_section_t *node = NULL; /* Free space section pointer */
- H5MF_sect_ud_t udata; /* User data for callback */
+ unsigned ctype; /* section class type */
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
- H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- H5FD_mem_t fs_type; /* Free space type (mapped from allocation type) */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* Ring of fsm */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
@@ -641,32 +1131,58 @@ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", FUN
/* check arguments */
HDassert(f);
if(!H5F_addr_defined(addr) || 0 == size)
- HGOTO_DONE(SUCCEED);
+ HGOTO_DONE(SUCCEED)
HDassert(addr != 0); /* Can't deallocate the superblock :-) */
- /* Check for attempting to free space that's a 'temporary' file address */
- if(H5F_addr_le(f->shared->tmp_addr, addr))
- HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, FAIL, "attempting to free temporary file space")
+ if(f->shared->first_alloc_dealloc) {
+ HDassert(!H5AC_cache_image_pending(f));
+ if(H5MF_tidy_self_referential_fsm_hack(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "tidy of self referential fsm hack failed")
+ } /* end if */
+
+ H5MF_alloc_to_fs_type(f, alloc_type, size, &fs_type);
/* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ if(H5MF__fsm_type_is_self_referential(f, fs_type))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+
+ /* we are about to change the contents of the free space manager --
+ * notify metadata cache that the associated fsm ring is
+ * unsettled
+ */
+ /* Only do so for strategies that use free-space managers */
+ if(H5F_HAVE_FREE_SPACE_MANAGER(f))
+ if(H5AC_unsettle_ring(f, fsm_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_SYSTEM, FAIL, "attempt to notify cache that ring is unsettled failed")
+
+ /* Check for attempting to free space that's a 'temporary' file address */
+ if(H5F_addr_le(f->shared->tmp_addr, addr))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, FAIL, "attempting to free temporary file space")
/* Set up I/O info for operation */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(H5FD_MEM_DRAW == alloc_type) {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end if */
+ else {
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "can't get property list")
+ } /* end else */
/* Check if the space to free intersects with the file's metadata accumulator */
if(H5F__accum_free(&fio_info, alloc_type, addr, size) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't check free space intersection w/metadata accumulator")
- /* Get free space type from allocation type */
- fs_type = H5MF_ALLOC_TO_FS_TYPE(f, alloc_type);
-#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: fs_type = %u\n", FUNC, (unsigned)fs_type);
-#endif /* H5MF_ALLOC_DEBUG_MORE */
-
/* Check if the free space manager for the file has been initialized */
if(!f->shared->fs_man[fs_type]) {
/* If there's no free space manager for objects of this type,
@@ -674,7 +1190,7 @@ HDfprintf(stderr, "%s: fs_type = %u\n", FUNC, (unsigned)fs_type);
* space is at the end of the file
*/
#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: f->shared->fs_addr[%u] = %a\n", FUNC, (unsigned)fs_type, f->shared->fs_addr[fs_type]);
+HDfprintf(stderr, "%s: fs_addr = %a\n", FUNC, f->shared->fs_addr[fs_type]);
#endif /* H5MF_ALLOC_DEBUG_MORE */
if(!H5F_addr_defined(f->shared->fs_addr[fs_type])) {
htri_t status; /* "can absorb" status for section into */
@@ -684,7 +1200,7 @@ HDfprintf(stderr, "%s: Trying to avoid starting up free space manager\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG_MORE */
/* Try to shrink the file or absorb the block into a block aggregator */
if((status = H5MF_try_shrink(f, alloc_type, dxpl_id, addr, size)) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTMERGE, FAIL, "can't check for absorbing block")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTMERGE, FAIL, "can't check for absorbing block")
else if(status > 0)
/* Indicate success */
HGOTO_DONE(SUCCEED)
@@ -693,13 +1209,13 @@ HDfprintf(stderr, "%s: Trying to avoid starting up free space manager\n", FUNC);
HDfprintf(stderr, "%s: dropping addr = %a, size = %Hu, on the floor!\n", FUNC, addr, size);
#endif /* H5MF_ALLOC_DEBUG_MORE */
HGOTO_DONE(SUCCEED)
- }
+ } /* end else-if */
} /* end if */
/* If we are deleting the free space manager, leave now, to avoid
* [re-]starting it.
- * or if file space strategy type is not using a free space manager
- * (H5F_FILE_SPACE_AGGR_VFD or H5F_FILE_SPACE_VFD), drop free space
+ * or if file space strategy type is not using a free space manager
+ * (H5F_FSPACE_STRATEGY_AGGR or H5F_FSPACE_STRATEGY_NONE), drop free space
* section on the floor.
*
* Note: this drops the space to free on the floor...
@@ -717,38 +1233,42 @@ HDfprintf(stderr, "%s: dropping addr = %a, size = %Hu, on the floor!\n", FUNC, a
* space isn't at the end of the file, so start up (or create)
* the file space manager
*/
- if(H5MF_alloc_start(f, dxpl_id, fs_type) < 0)
+ if(H5MF_start_fstype(f, dxpl_id, fs_type) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
} /* end if */
- /* Create free space section for block */
- if(NULL == (node = H5MF_sect_simple_new(addr, size)))
+ /* Create the free-space section for the freed section */
+ ctype = H5MF_SECT_CLASS_TYPE(f, size);
+ if(NULL == (node = H5MF_sect_new(ctype, addr, size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space section")
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = dxpl_id;
- udata.alloc_type = alloc_type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
-
- /* If size of section freed is larger than threshold, add it to the free space manager */
+ /* If size of the freed section is larger than threshold, add it to the free space manager */
if(size >= f->shared->fs_threshold) {
HDassert(f->shared->fs_man[fs_type]);
#ifdef H5MF_ALLOC_DEBUG_MORE
HDfprintf(stderr, "%s: Before H5FS_sect_add()\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG_MORE */
+
/* Add to the free space for the file */
- if(H5FS_sect_add(f, dxpl_id, f->shared->fs_man[fs_type], (H5FS_section_info_t *)node, H5FS_ADD_RETURNED_SPACE, &udata) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't add section to file free space")
- node = NULL;
+ if(H5MF_add_sect(f, alloc_type, dxpl_id, f->shared->fs_man[fs_type], node) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't add section to file free space")
+ node = NULL;
+
#ifdef H5MF_ALLOC_DEBUG_MORE
HDfprintf(stderr, "%s: After H5FS_sect_add()\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG_MORE */
} /* end if */
else {
htri_t merged; /* Whether node was merged */
+ H5MF_sect_ud_t udata; /* User data for callback */
+
+ /* Construct user data for callbacks */
+ udata.f = f;
+ udata.dxpl_id = dxpl_id;
+ udata.alloc_type = alloc_type;
+ udata.allow_sect_absorb = TRUE;
+ udata.allow_eoa_shrink_only = FALSE;
/* Try to merge the section that is smaller than threshold */
if((merged = H5FS_sect_try_merge(f, dxpl_id, f->shared->fs_man[fs_type], (H5FS_section_info_t *)node, H5FS_ADD_RETURNED_SPACE, &udata)) < 0)
@@ -760,12 +1280,13 @@ HDfprintf(stderr, "%s: After H5FS_sect_add()\n", FUNC);
done:
/* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
/* Release section node, if allocated and not added to section list or merged */
if(node)
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
+ if(H5MF_sect_free((H5FS_section_info_t *)node) < 0)
HDONE_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free simple section node")
#ifdef H5MF_ALLOC_DEBUG
@@ -782,6 +1303,14 @@ H5MF_sects_dump(f, dxpl_id, stderr);
* Function: H5MF_try_extend
*
* Purpose: Extend a block in the file if possible.
+ * For non-paged aggregation:
+ * --try to extend at EOA
+ * --try to extend into the aggregators
+ * --try to extend into a free-space section if adjoined
+ * For paged aggregation:
+ * --try to extend at EOA
+ * --try to extend into a free-space section if adjoined
+ * --try to extend into the page end threshold if a metadata block
*
* Return: Success: TRUE(1) - Block was extended
* FALSE(0) - Block could not be extended
@@ -796,11 +1325,16 @@ htri_t
H5MF_try_extend(H5F_t *f, hid_t dxpl_id, H5FD_mem_t alloc_type, haddr_t addr,
hsize_t size, hsize_t extra_requested)
{
- H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
- H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- haddr_t end; /* End of block to extend */
- H5FD_mem_t map_type; /* Mapped type */
- htri_t ret_value = FAIL; /* Return value */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* ring of fsm */
+ haddr_t end; /* End of block to extend */
+ H5FD_mem_t map_type; /* Mapped type */
+ H5F_mem_page_t fs_type; /* free space type */
+ htri_t allow_extend = TRUE; /* Possible to extend the block */
+ hsize_t frag_size = 0; /* Size of mis-aligned fragment */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ htri_t ret_value = FALSE; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
#ifdef H5MF_ALLOC_DEBUG
@@ -811,204 +1345,152 @@ HDfprintf(stderr, "%s: Entering: alloc_type = %u, addr = %a, size = %Hu, extra_r
HDassert(f);
HDassert(H5F_INTENT(f) & H5F_ACC_RDWR);
+ if(f->shared->first_alloc_dealloc) {
+ HDassert(! H5AC_cache_image_pending(f));
+ if(H5MF_tidy_self_referential_fsm_hack(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "tidy of self referential fsm hack failed")
+ } /* end if */
+
/* Set mapped type, treating global heap as raw data */
map_type = (alloc_type == H5FD_MEM_GHEAP) ? H5FD_MEM_DRAW : alloc_type;
/* Compute end of block to extend */
end = addr + size;
- /* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
-
- /* Check if the block is exactly at the end of the file */
- if((ret_value = H5FD_try_extend(f->shared->lf, map_type, f, end, extra_requested)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending file")
- else if(ret_value == FALSE) {
- H5F_blk_aggr_t *aggr; /* Aggregator to use */
-
- /* Check for test block able to extend aggregation block */
- aggr = (map_type == H5FD_MEM_DRAW) ? &(f->shared->sdata_aggr) : &(f->shared->meta_aggr);
- if((ret_value = H5MF_aggr_try_extend(f, aggr, map_type, end, extra_requested)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending aggregation block")
- else if(ret_value == FALSE) {
- H5FD_mem_t fs_type; /* Free space type (mapped from allocation type) */
-
- /* Get free space type from allocation type */
- fs_type = H5MF_ALLOC_TO_FS_TYPE(f, alloc_type);
+ /* For paged aggregation:
+ * To extend a small block: can only extend if not crossing page boundary
+ * To extend a large block at EOA: calculate in advance mis-aligned fragment so EOA will still end at page boundary
+ */
+ if(H5F_PAGED_AGGR(f)) {
+ if(size < f->shared->fs_page_size) {
+ /* To extend a small block: cannot cross page boundary */
+ if((addr / f->shared->fs_page_size) != (((end + extra_requested) - 1) / f->shared->fs_page_size))
+ allow_extend = FALSE;
+ } /* end if */
+ else {
+ haddr_t eoa; /* EOA for the file */
- /* Check if the free space for the file has been initialized */
- if(!f->shared->fs_man[fs_type] && H5F_addr_defined(f->shared->fs_addr[fs_type]))
- if(H5MF_alloc_open(f, dxpl_id, fs_type) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
+ /* To extend a large block: calculate in advance the mis-aligned fragment so EOA will end at page boundary if extended */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, alloc_type)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "Unable to get eoa")
+ HDassert(!(eoa % f->shared->fs_page_size));
- /* Check for test block able to block in free space manager */
- if(f->shared->fs_man[fs_type])
- if((ret_value = H5FS_sect_try_extend(f, dxpl_id, f->shared->fs_man[fs_type], addr, size, extra_requested)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending block in free space manager")
- } /* end if */
+ H5MF_EOA_MISALIGN(f, (eoa+extra_requested), f->shared->fs_page_size, frag_size);
+ } /* end else */
} /* end if */
-done:
- /* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ /* Get free space type from allocation type */
+ H5MF_alloc_to_fs_type(f, alloc_type, size, &fs_type);
-#ifdef H5MF_ALLOC_DEBUG
-HDfprintf(stderr, "%s: Leaving: ret_value = %t\n", FUNC, ret_value);
-#endif /* H5MF_ALLOC_DEBUG */
-#ifdef H5MF_ALLOC_DEBUG_DUMP
-H5MF_sects_dump(f, dxpl_id, stderr);
-#endif /* H5MF_ALLOC_DEBUG_DUMP */
+ /* Set the ring type in the DXPL */
+ if(H5MF__fsm_type_is_self_referential(f, fs_type))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
- FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5MF_try_extend() */
+ if(allow_extend) {
+ /* Try extending the block at EOA */
+ if((ret_value = H5F_try_extend(f, dxpl_id, map_type, end, extra_requested + frag_size)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending file")
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: extended = %t\n", FUNC, ret_value);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
-
-/*-------------------------------------------------------------------------
- * Function: H5MF_get_freespace
- *
- * Purpose: Retrieve the amount of free space in a file.
- *
- * Return: Success: Amount of free space in file
- * Failure: Negative
- *
- * Programmer: Quincey Koziol
- * Monday, October 6, 2003
- *
- * Modifications:
- * Vailin Choi; July 2012
- * As the default free-list mapping is changed to H5FD_FLMAP_DICHOTOMY,
- * checks are added to account for the last section of each free-space manager
- * and the remaining space in the two aggregators are at EOF.
- *-------------------------------------------------------------------------
- */
-herr_t
-H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space, hsize_t *meta_size)
-{
- H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
- H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- haddr_t eoa; /* End of allocated space in the file */
- haddr_t ma_addr = HADDR_UNDEF; /* Base "metadata aggregator" address */
- hsize_t ma_size = 0; /* Size of "metadata aggregator" */
- haddr_t sda_addr = HADDR_UNDEF; /* Base "small data aggregator" address */
- hsize_t sda_size = 0; /* Size of "small data aggregator" */
- hsize_t tot_fs_size = 0; /* Amount of all free space managed */
- hsize_t tot_meta_size = 0; /* Amount of metadata for free space managers */
- H5FD_mem_t type; /* Memory type for iteration */
- hbool_t fs_started[H5FD_MEM_NTYPES]; /* Indicate whether the free-space manager has been started */
- hbool_t eoa_shrank; /* Whether an EOA shrink occurs */
- herr_t ret_value = SUCCEED; /* Return value */
+ /* If extending at EOA succeeds: */
+ /* for paged aggregation, put the fragment into the large-sized free-space manager */
+ if(ret_value == TRUE && H5F_PAGED_AGGR(f) && frag_size) {
+ H5MF_free_section_t *node = NULL; /* Free space section pointer */
- FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+ /* Should be large-sized block */
+ HDassert(size >= f->shared->fs_page_size);
- /* check args */
- HDassert(f);
- HDassert(f->shared);
- HDassert(f->shared->lf);
+ /* Start up the free-space manager */
+ if(!(f->shared->fs_man[fs_type]))
+ if(H5MF_start_fstype(f, dxpl_id, fs_type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
- /* Retrieve the 'eoa' for the file */
- if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, H5FD_MEM_DEFAULT)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+ /* Create free space section for the fragment */
+ if(NULL == (node = H5MF_sect_new(H5MF_FSPACE_SECT_LARGE, end + extra_requested, frag_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space section")
- /* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ /* Add the fragment to the large-sized free-space manager */
+ if(H5MF_add_sect(f, alloc_type, dxpl_id, f->shared->fs_man[fs_type], node) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINSERT, FAIL, "can't re-add section to file free space")
- /* Retrieve metadata aggregator info, if available */
- if(H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query metadata aggregator stats")
+ node = NULL;
+ } /* end if */
- /* Retrieve 'small data' aggregator info, if available */
- if(H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sda_addr, &sda_size) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query small data aggregator stats")
+ /* For non-paged aggregation: try to extend into the aggregators */
+ if(ret_value == FALSE && (f->shared->fs_strategy == H5F_FSPACE_STRATEGY_FSM_AGGR ||
+ f->shared->fs_strategy == H5F_FSPACE_STRATEGY_AGGR) ) {
+ H5F_blk_aggr_t *aggr; /* Aggregator to use */
- /* Iterate over all the free space types that have managers and get each free list's space */
- for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
+ /* Check if the block is able to extend into aggregation block */
+ aggr = (map_type == H5FD_MEM_DRAW) ? &(f->shared->sdata_aggr) : &(f->shared->meta_aggr);
+ if((ret_value = H5MF_aggr_try_extend(f, dxpl_id, aggr, map_type, end, extra_requested)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending aggregation block")
- fs_started[type] = FALSE;
-
- /* Check if the free space for the file has been initialized */
- if(!f->shared->fs_man[type] && H5F_addr_defined(f->shared->fs_addr[type])) {
- if(H5MF_alloc_open(f, dxpl_id, type) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
- HDassert(f->shared->fs_man[type]);
- fs_started[type] = TRUE;
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: H5MF_aggr_try_extend = %t\n", FUNC, ret_value);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
} /* end if */
- /* Check if there's free space of this type */
- if(f->shared->fs_man[type]) {
- hsize_t type_fs_size = 0; /* Amount of free space managed for each type */
- hsize_t type_meta_size = 0; /* Amount of free space metadata for each type */
+ /* If no extension so far, try to extend into a free-space section */
+ if(ret_value == FALSE && ((f->shared->fs_strategy == H5F_FSPACE_STRATEGY_FSM_AGGR) ||
+ (H5F_PAGED_AGGR(f))) ) {
+ H5MF_sect_ud_t udata; /* User data */
- /* Retrieve free space size from free space manager */
- if(H5FS_sect_stats(f->shared->fs_man[type], &type_fs_size, NULL) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query free space stats")
- if(H5FS_size(f, f->shared->fs_man[type], &type_meta_size) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query free space metadata stats")
+ /* Construct user data for callbacks */
+ udata.f = f;
+ udata.dxpl_id = dxpl_id;
+ udata.alloc_type = alloc_type;
- /* Increment total free space for types */
- tot_fs_size += type_fs_size;
- tot_meta_size += type_meta_size;
- } /* end if */
- } /* end for */
-
- /* Iterate until no more EOA shrink occurs */
- do {
- eoa_shrank = FALSE;
+ /* Check if the free space for the file has been initialized */
+ if(!f->shared->fs_man[fs_type] && H5F_addr_defined(f->shared->fs_addr[fs_type]))
+ /* Open the free-space manager */
+ if(H5MF_open_fstype(f, dxpl_id, fs_type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
- /* Check the last section of each free-space manager */
- for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
- haddr_t sect_addr = HADDR_UNDEF;
- hsize_t sect_size = 0;
-
- if(f->shared->fs_man[type]) {
- if(H5FS_sect_query_last_sect(f->shared->fs_man[type], &sect_addr, &sect_size) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query last section on merge list")
-
- /* Deduct space from previous accumulation if the section is at EOA */
- if(H5F_addr_eq(sect_addr + sect_size, eoa)) {
- eoa = sect_addr;
- eoa_shrank = TRUE;
- tot_fs_size -= sect_size;
- } /* end if */
- } /* end if */
- } /* end for */
-
- /* Check the metadata and raw data aggregators */
- if(ma_size > 0 && H5F_addr_eq(ma_addr + ma_size, eoa)) {
- eoa = ma_addr;
- eoa_shrank = TRUE;
- ma_size = 0;
- } /* end if */
- if(sda_size > 0 && H5F_addr_eq(sda_addr + sda_size, eoa)) {
- eoa = sda_addr;
- eoa_shrank = TRUE;
- sda_size = 0;
- } /* end if */
- } while(eoa_shrank);
+ /* Try to extend the block into a free-space section */
+ if(f->shared->fs_man[fs_type]) {
+ if((ret_value = H5FS_sect_try_extend(f, dxpl_id, f->shared->fs_man[fs_type], addr, size, extra_requested, H5FS_ADD_RETURNED_SPACE, &udata)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending block in free space manager")
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Try to H5FS_sect_try_extend = %t\n", FUNC, ret_value);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+ } /* end if */
- /* Close the free-space managers if they were opened earlier in this routine */
- for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
- if(fs_started[type])
- if(H5MF__alloc_close(f, dxpl_id, type) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't close file free space")
- } /* end for */
+ /* For paged aggregation and a metadata block: try to extend into page end threshold */
+ if(ret_value == FALSE && H5F_PAGED_AGGR(f) && map_type != H5FD_MEM_DRAW) {
+ H5MF_EOA_MISALIGN(f, end, f->shared->fs_page_size, frag_size);
- /* Set the value(s) to return */
- /* (The metadata & small data aggregators count as free space now, since they aren't at EOA) */
- if(tot_space)
- *tot_space = tot_fs_size + ma_size + sda_size;
- if(meta_size)
- *meta_size = tot_meta_size;
+ if(frag_size <= H5F_PGEND_META_THRES(f) && extra_requested <= frag_size)
+ ret_value = TRUE;
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Try to extend into the page end threshold = %t\n", FUNC, ret_value);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+ } /* end if */
+ } /* end if */
+ } /* allow_extend */
done:
/* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: Leaving: ret_value = %t\n", FUNC, ret_value);
+#endif /* H5MF_ALLOC_DEBUG */
+#ifdef H5MF_ALLOC_DEBUG_DUMP
+H5MF_sects_dump(f, dxpl_id, stderr);
+#endif /* H5MF_ALLOC_DEBUG_DUMP */
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5MF_get_freespace() */
+} /* end H5MF_try_extend() */
/*-------------------------------------------------------------------------
@@ -1031,11 +1513,15 @@ H5MF_try_shrink(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, haddr_t addr,
{
H5MF_free_section_t *node = NULL; /* Free space section pointer */
H5MF_sect_ud_t udata; /* User data for callback */
+ H5FS_section_class_t *sect_cls; /* Section class */
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
- H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- htri_t ret_value = FAIL; /* Return value */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t fsm_ring = H5AC_RING_INV; /* Ring of fsm */
+ H5F_mem_page_t fs_type; /* Free space type */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ htri_t ret_value = FAIL; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", FUNC, (unsigned)alloc_type, addr, size);
#endif /* H5MF_ALLOC_DEBUG */
@@ -1047,12 +1533,24 @@ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", FUN
HDassert(H5F_addr_defined(addr));
HDassert(size > 0);
+ /* Set up free-space section class information */
+ sect_cls = H5MF_SECT_CLS_TYPE(f, size);
+ HDassert(sect_cls);
+
+ /* Get free space type from allocation type */
+ H5MF_alloc_to_fs_type(f, alloc_type, size, &fs_type);
+
/* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ if(H5MF__fsm_type_is_self_referential(f, fs_type))
+ fsm_ring = H5AC_RING_MDFSM;
+ else
+ fsm_ring = H5AC_RING_RDFSM;
+ if(H5AC_set_ring(dxpl_id, fsm_ring, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
- /* Create free space section for block */
- if(NULL == (node = H5MF_sect_simple_new(addr, size)))
+ /* Create free-space section for block */
+ if(NULL == (node = H5MF_sect_new(sect_cls->type, addr, size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize free space section")
/* Construct user data for callbacks */
@@ -1060,98 +1558,87 @@ HDfprintf(stderr, "%s: Entering - alloc_type = %u, addr = %a, size = %Hu\n", FUN
udata.dxpl_id = dxpl_id;
udata.alloc_type = alloc_type;
udata.allow_sect_absorb = FALSE; /* Force section to be absorbed into aggregator */
- udata.allow_eoa_shrink_only = FALSE;
-
- /* Call the "can shrink" callback for the section */
- if((ret_value = H5MF_sect_simple_can_shrink((const H5FS_section_info_t *)node, &udata)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTMERGE, FAIL, "can't check if section can shrink container")
- else if(ret_value > 0) {
- /* Shrink or absorb the section */
- if(H5MF_sect_simple_shrink((H5FS_section_info_t **)&node, &udata) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink container")
+ udata.allow_eoa_shrink_only = FALSE;
+
+ /* Check if the block can shrink the container */
+ if(sect_cls->can_shrink) {
+ if((ret_value = (*sect_cls->can_shrink)((const H5FS_section_info_t *)node, &udata)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTMERGE, FAIL, "can't check if section can shrink container")
+ if(ret_value > 0) {
+ HDassert(sect_cls->shrink);
+
+ if((*sect_cls->shrink)((H5FS_section_info_t **)&node, &udata) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink container")
+ } /* end if */
} /* end if */
done:
/* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
/* Free section node allocated */
- if(node && H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
+ if(node && H5MF_sect_free((H5FS_section_info_t *)node) < 0)
HDONE_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free simple section node")
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Leaving, ret_value = %d\n", FUNC, ret_value);
#endif /* H5MF_ALLOC_DEBUG */
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* end H5MF_try_shrink() */
/*-------------------------------------------------------------------------
- * Function: H5MF_close_shrink_eoa
+ * Function: H5MF_close
*
- * Purpose: Shrink the EOA while closing
+ * Purpose: Close the free space tracker(s) for a file:
+ * paged or non-paged aggregation
*
* Return: SUCCEED/FAIL
*
- * Programmer: Quincey Koziol
- * Saturday, July 7, 2012
+ * Programmer: Vailin Choi; Dec 2012
*
*-------------------------------------------------------------------------
*/
-static herr_t
-H5MF_close_shrink_eoa(H5F_t *f, hid_t dxpl_id)
+herr_t
+H5MF_close(H5F_t *f, hid_t dxpl_id)
{
- H5FD_mem_t type; /* Memory type for iteration */
- hbool_t eoa_shrank; /* Whether an EOA shrink occurs */
- htri_t status; /* Status value */
- H5MF_sect_ud_t udata; /* User data for callback */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: Entering\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG */
/* check args */
HDassert(f);
HDassert(f->shared);
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = dxpl_id;
- udata.allow_sect_absorb = FALSE;
- udata.allow_eoa_shrink_only = TRUE;
-
- /* Iterate until no more EOA shrinking occurs */
- do {
- eoa_shrank = FALSE;
-
- /* Check the last section of each free-space manager */
- for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
- if(f->shared->fs_man[type]) {
- udata.alloc_type = type;
- if((status = H5FS_sect_try_shrink_eoa(f, dxpl_id, f->shared->fs_man[type], &udata)) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
- else if(status > 0)
- eoa_shrank = TRUE;
- } /* end if */
- } /* end for */
-
- /* check the two aggregators */
- if((status = H5MF_aggrs_try_shrink_eoa(f, dxpl_id)) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
- else if(status > 0)
- eoa_shrank = TRUE;
- } while(eoa_shrank);
+ if(H5F_PAGED_AGGR(f)) {
+ if((ret_value = H5MF__close_pagefs(f, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFREE, FAIL, "can't close free-space managers for 'page' file space")
+ } /* end if */
+ else {
+ if((ret_value = H5MF__close_aggrfs(f, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTFREE, FAIL, "can't close free-space managers for 'aggr' file space")
+ } /* end else */
done:
+
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: Leaving\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG */
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5MF_close_shrink_eoa() */
+} /* end H5MF_close() */
/*-------------------------------------------------------------------------
- * Function: H5MF__close_delete
+ * Function: H5MF__close_delete_fstype
*
- * Purpose: Common code for closing and deleting freespace managers from
- * the file.
+ * Purpose: Common code for closing and deleting the freespace manager
+ * of TYPE for file.
+ * Note that TYPE can be H5F_mem_page_t or H5FD_mem_t enum types.
*
* Return: SUCCEED/FAIL
*
@@ -1161,9 +1648,8 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5MF__close_delete(H5F_t *f, hid_t dxpl_id)
+H5MF__close_delete_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type)
{
- H5FD_mem_t type; /* Memory type for iteration */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
@@ -1174,56 +1660,28 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
/* check args */
HDassert(f);
HDassert(f->shared);
+ if(H5F_PAGED_AGGR(f))
+ HDassert(type < H5F_MEM_PAGE_NTYPES);
+ else
+ HDassert((H5FD_mem_t)type < H5FD_MEM_NTYPES);
- /* Iterate over all the free space types that have managers and get each free list's space */
- for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
#ifdef H5MF_ALLOC_DEBUG_MORE
HDfprintf(stderr, "%s: Check 1.0 - f->shared->fs_man[%u] = %p, f->shared->fs_addr[%u] = %a\n", FUNC, (unsigned)type, f->shared->fs_man[type], (unsigned)type, f->shared->fs_addr[type]);
#endif /* H5MF_ALLOC_DEBUG_MORE */
- /* If the free space manager for this type is open, close it */
- if(f->shared->fs_man[type]) {
-#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: Before closing free space manager\n", FUNC);
-#endif /* H5MF_ALLOC_DEBUG_MORE */
- if(H5FS_close(f, dxpl_id, f->shared->fs_man[type]) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't release free space info")
- f->shared->fs_man[type] = NULL;
- f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
- } /* end if */
-#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: Check 2.0 - f->shared->fs_man[%u] = %p, f->shared->fs_addr[%u] = %a\n", FUNC, (unsigned)type, f->shared->fs_man[type], (unsigned)type, f->shared->fs_addr[type]);
-#endif /* H5MF_ALLOC_DEBUG_MORE */
- /* If there is free space manager info for this type, delete it */
- if(H5F_addr_defined(f->shared->fs_addr[type])) {
- haddr_t tmp_fs_addr; /* Temporary holder for free space manager address */
-
- /* Put address into temporary variable and reset it */
- /* (Avoids loopback in file space freeing routine) */
- tmp_fs_addr = f->shared->fs_addr[type];
- f->shared->fs_addr[type] = HADDR_UNDEF;
-
- /* Shift to "deleting" state, to make certain we don't track any
- * file space freed as a result of deleting the free space manager.
- */
- f->shared->fs_state[type] = H5F_FS_STATE_DELETING;
+ /* If the free space manager for this type is open, close it */
+ if(f->shared->fs_man[type])
+ if(H5MF__close_fstype(f, dxpl_id, type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't close the free space manager")
#ifdef H5MF_ALLOC_DEBUG_MORE
-HDfprintf(stderr, "%s: Before deleting free space manager\n", FUNC);
+HDfprintf(stderr, "%s: Check 2.0 - f->shared->fs_man[%u] = %p, f->shared->fs_addr[%u] = %a\n", FUNC, (unsigned)type, f->shared->fs_man[type], (unsigned)type, f->shared->fs_addr[type]);
#endif /* H5MF_ALLOC_DEBUG_MORE */
- /* Delete free space manager for this type */
- if(H5FS_delete(f, dxpl_id, tmp_fs_addr) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTFREE, FAIL, "can't delete free space manager")
-
- /* Shift [back] to closed state */
- HDassert(f->shared->fs_state[type] == H5F_FS_STATE_DELETING);
- f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
-
- /* Sanity check that the free space manager for this type wasn't started up again */
- HDassert(!H5F_addr_defined(f->shared->fs_addr[type]));
- } /* end if */
- } /* end for */
+ /* If there is free space manager info for this type, delete it */
+ if(H5F_addr_defined(f->shared->fs_addr[type]))
+ if(H5MF__delete_fstype(f, dxpl_id, type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't delete the free space manager")
done:
#ifdef H5MF_ALLOC_DEBUG
@@ -1237,8 +1695,8 @@ HDfprintf(stderr, "%s: Leaving\n", FUNC);
* Function: H5MF_try_close
*
* Purpose: This is called by H5Fformat_convert() to close and delete
- * free-space managers when downgrading persistent free-space
- * to non-persistent.
+ * free-space managers when downgrading persistent free-space
+ * to non-persistent.
*
* Return: SUCCEED/FAIL
*
@@ -1251,10 +1709,13 @@ herr_t
H5MF_try_close(H5F_t *f, hid_t dxpl_id)
{
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t curr_ring = H5AC_RING_INV; /* Current ring value */
+ H5AC_ring_t needed_ring = H5AC_RING_INV; /* Ring value needed for this iteration */
H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Entering\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG */
@@ -1262,53 +1723,125 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
/* check args */
HDassert(f);
- /* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ /* If there have been no file space allocations / deallocation so
+ * far, must call H5MF_tidy_self_referential_fsm_hack() to float
+ * all self referential FSMs and release file space allocated to
+ * them. Otherwise, the function will be called after the format
+ * conversion, and will become very confused.
+ *
+ * The situation is further complicated if a cache image exists
+ * and had not yet been loaded into the metadata cache. In this
+ * case, call H5AC_force_cache_image_load() instead of
+ * H5MF_tidy_self_referential_fsm_hack(). H5AC_force_cache_image_load()
+ * will load the cache image, and then call
+ * H5MF_tidy_self_referential_fsm_hack() to discard the cache image
+ * block.
+ */
+ if(f->shared->first_alloc_dealloc) {
+ if(H5AC_cache_image_pending(f)) {
+ if(H5AC_force_cache_image_load(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "forced cache image load failed")
+ } /* end if */
+ else {
+ if(H5MF_tidy_self_referential_fsm_hack(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "tidy of self referential fsm hack failed")
+ } /* end else */
+ } /* end if */
+
+ /* Set the ring type in the DXPL. In most cases, we will
+ * need H5AC_RING_RDFSM, so initialy set the ring in
+ * the DXPL to that value. We will alter this later if
+ * needed.
+ */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_RDFSM, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+ curr_ring = H5AC_RING_RDFSM;
- /* Close and delete freespace managers from the file */
- if(H5MF__close_delete(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to close delete free-space managers")
+ if(H5F_PAGED_AGGR(f)) {
+ H5F_mem_page_t ptype; /* Memory type for iteration */
+
+ /* Iterate over all the free space types that have managers and
+ * get each free list's space
+ */
+ for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype)) {
+ /* Test to see if we need to switch rings -- do so if required */
+ if(H5MF__fsm_type_is_self_referential(f, ptype))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring ) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value (1)")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ if(H5MF__close_delete_fstype(f, dxpl_id, ptype) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't close the free space manager")
+ } /* end for */
+ } /* end if */
+ else {
+ H5FD_mem_t type; /* Memory type for iteration */
+
+ /* Iterate over all the free space types that have managers and
+ * get each free list's space
+ */
+ for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
+ /* test to see if we need to switch rings -- do so if required */
+ if(H5MF__fsm_type_is_self_referential(f, (H5F_mem_page_t)type))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ if(H5MF__close_delete_fstype(f, dxpl_id, (H5F_mem_page_t)type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't close the free space manager")
+ } /* end for */
+ } /* end else */
done:
/* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Leaving\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG */
- FUNC_LEAVE_NOAPI(ret_value)
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
} /* H5MF_try_close() */
/*-------------------------------------------------------------------------
- * Function: H5MF_close
+ * Function: H5MF__close_aggrfs
*
- * Purpose: Close the free space tracker(s) for a file
+ * Purpose: Close the free space tracker(s) for a file: non-paged aggregation
*
- * Return: SUCCEED/FAIL
+ * Return: SUCCEED/FAIL
*
* Programmer: Quincey Koziol
* Tuesday, January 22, 2008
*
- * Modifications:
- * Vailin Choi; July 2012
- * As the default free-list mapping is changed to H5FD_FLMAP_DICHOTOMY,
- * modifications are needed to shrink EOA if the last section of each free-space manager
- * and the remaining space in the two aggregators are at EOA.
-
*-------------------------------------------------------------------------
*/
-herr_t
-H5MF_close(H5F_t *f, hid_t dxpl_id)
+static herr_t
+H5MF__close_aggrfs(H5F_t *f, hid_t dxpl_id)
{
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t curr_ring = H5AC_RING_INV; /* Current ring value */
+ H5AC_ring_t needed_ring = H5AC_RING_INV; /* Ring value needed for this iteration. */
H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- H5FD_mem_t type; /* Memory type for iteration */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5FD_mem_t type; /* Memory type for iteration */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+ FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Entering\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG */
@@ -1319,113 +1852,122 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
HDassert(f->shared->lf);
HDassert(f->shared->sblock);
- /* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
+ /* Set the ring type in the DXPL. In most cases, we will
+ * need H5AC_RING_RDFSM, so initialy set the ring in
+ * the DXPL to that value. We will alter this later if
+ * needed.
+ */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_RDFSM, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+ curr_ring = H5AC_RING_RDFSM;
/* Free the space in aggregators */
- /* (for space not at EOF, it may be put into free space managers) */
+ /* (for space not at EOA, it may be put into free space managers) */
if(H5MF_free_aggrs(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_FILE, H5E_CANTFREE, FAIL, "can't free aggregators")
/* Trying shrinking the EOA for the file */
- if(H5MF_close_shrink_eoa(f, dxpl_id) < 0)
+ if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
/* Making free-space managers persistent for superblock version >= 2 */
if(f->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2
- && f->shared->fs_strategy == H5F_FILE_SPACE_ALL_PERSIST) {
- H5O_fsinfo_t fsinfo; /* Free space manager info message */
- hbool_t update = FALSE; /* To update info for the message */
+ && f->shared->fs_persist) {
+ H5O_fsinfo_t fsinfo; /* File space info message */
+ haddr_t final_eoa; /* Final eoa -- for sanity check */
+ H5F_mem_page_t ptype; /* Memory type for iteration */
- /* Check to remove free-space manager info message from superblock extension */
- if(H5F_addr_defined(f->shared->sblock->ext_addr))
- if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_FSINFO_ID) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "error in removing message from superblock extension")
+ /* superblock extension and free space manager message should
+ * exist at this point -- verify at least the former.
+ */
+ HDassert(H5F_addr_defined(f->shared->sblock->ext_addr));
- /* Free free-space manager header and/or section info header */
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
- H5FS_stat_t fs_stat; /* Information for free-space manager */
+ /* file space for all non-empty free space managers should be
+ * allocated at this point, and these free space managers should
+ * be written to file and thus their headers and section info
+ * entries in the metadata cache should be clean.
+ */
- /* Check for free space manager of this type */
- if(f->shared->fs_man[type]) {
- /* Switch to "about to be deleted" state */
- f->shared->fs_state[type] = H5F_FS_STATE_DELETING;
-
- /* Query the free space manager's information */
- if(H5FS_stat_info(f, f->shared->fs_man[type], &fs_stat) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
-
- /* Check if the free space manager has space in the file */
- if(H5F_addr_defined(fs_stat.addr) || H5F_addr_defined(fs_stat.sect_addr)) {
- /* Delete the free space manager in the file */
- /* (will re-allocate later) */
- if(H5FS_free(f, f->shared->fs_man[type], dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't release free-space headers")
- f->shared->fs_addr[type] = HADDR_UNDEF;
- } /* end if */
- } /* end iif */
- fsinfo.fs_addr[type-1] = HADDR_UNDEF;
- } /* end for */
-
- fsinfo.strategy = f->shared->fs_strategy;
- fsinfo.threshold = f->shared->fs_threshold;
-
- /* Write free-space manager info message to superblock extension object header */
- /* Create the superblock extension object header in advance if needed */
- if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, TRUE) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
-
- /* Re-allocate free-space manager header and/or section info header */
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
- H5FS_stat_t fs_stat; /* Information for free-space manager */
-
- /* Check for active free space manager of this type */
+ /* gather data for the free space manager superblock extension message.
+ *
+ * In passing, verify that all the free space managers are closed.
+ */
+ for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ fsinfo.fs_addr[ptype - 1] = HADDR_UNDEF;
+ for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
+ fsinfo.fs_addr[type-1] = f->shared->fs_addr[type];
+ fsinfo.strategy = f->shared->fs_strategy;
+ fsinfo.persist = f->shared->fs_persist;
+ fsinfo.threshold = f->shared->fs_threshold;
+ fsinfo.page_size = f->shared->fs_page_size;
+ fsinfo.pgend_meta_thres = f->shared->pgend_meta_thres;
+ fsinfo.eoa_pre_fsm_fsalloc = f->shared->eoa_pre_fsm_fsalloc;
+
+ /* Write the free space manager message -- message must already exist */
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, FALSE, H5O_MSG_FLAG_MARK_IF_UNKNOWN) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
+
+ /* Close the free space managers */
+ for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
if(f->shared->fs_man[type]) {
- /* Re-query free space manager info for this type */
- if(H5FS_stat_info(f, f->shared->fs_man[type], &fs_stat) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
-
- /* Are there sections to persist? */
- if(fs_stat.serial_sect_count) {
- /* Allocate space for free-space manager header */
- if(H5FS_alloc_hdr(f, f->shared->fs_man[type], &f->shared->fs_addr[type], dxpl_id) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_NOSPACE, FAIL, "can't allocated free-space header")
-
- /* Allocate space for free-space maanger section info header */
- if(H5FS_alloc_sect(f, f->shared->fs_man[type], dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "can't allocate free-space section info")
-
- HDassert(f->shared->fs_addr[type]);
- fsinfo.fs_addr[type-1] = f->shared->fs_addr[type];
- update = TRUE;
- } /* end if */
- } else if(H5F_addr_defined(f->shared->fs_addr[type])) {
- fsinfo.fs_addr[type-1] = f->shared->fs_addr[type];
- update = TRUE;
- } /* end else-if */
- } /* end for */
-
- /* Update the free space manager info message in superblock extension object header */
- if(update)
- if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, FALSE) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
-
- /* Final close of free-space managers */
- for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
- if(f->shared->fs_man[type]) {
- if(H5FS_close(f, dxpl_id, f->shared->fs_man[type]) < 0)
- HGOTO_ERROR(H5E_FSPACE, H5E_CANTRELEASE, FAIL, "can't close free space manager")
- f->shared->fs_man[type] = NULL;
- f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
- } /* end if */
- f->shared->fs_addr[type] = HADDR_UNDEF;
- } /* end for */
+ /* test to see if we need to switch rings -- do
+ * so if required
+ */
+ if(H5MF__fsm_type_is_self_referential(f, (H5F_mem_page_t)type))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value (1)")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ HDassert(f->shared->fs_state[type] == H5F_FS_STATE_OPEN);
+
+ if(H5FS_close(f, dxpl_id, f->shared->fs_man[type]) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't close free space manager")
+ f->shared->fs_man[type] = NULL;
+ f->shared->fs_state[type] = H5F_FS_STATE_CLOSED;
+ } /* end if */
+ f->shared->fs_addr[type] = HADDR_UNDEF;
+ } /* end for */
+
+ /* verify that we haven't dirtied any metadata cache entries
+ * from the metadata free space manager ring out.
+ */
+ HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
+
+ /* verify that the aggregators are still shutdown. */
+ HDassert(f->shared->sdata_aggr.tot_size == 0);
+ HDassert(f->shared->sdata_aggr.addr == 0);
+ HDassert(f->shared->sdata_aggr.size == 0);
+
+ HDassert(f->shared->meta_aggr.tot_size == 0);
+ HDassert(f->shared->meta_aggr.addr == 0);
+ HDassert(f->shared->meta_aggr.size == 0);
+
+ /* Trying shrinking the EOA for the file */
+ /* (in case any free space is now at the EOA) */
+ if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
+
+ /* get the eoa, and verify that it has the expected value */
+ if(HADDR_UNDEF == (final_eoa = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)) )
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file size")
+
+ /* f->shared->eoa_post_fsm_fsalloc is undefined if there has
+ * been no file space allocation or deallocation since file
+ * open.
+ */
+ HDassert((f->shared->first_alloc_dealloc) || (final_eoa == f->shared->eoa_post_fsm_fsalloc));
} /* end if */
else { /* super_vers can be 0, 1, 2 */
- /* Close and delete freespace managers from the file */
- if(H5MF__close_delete(f, dxpl_id) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
+ for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
+ if(H5MF__close_delete_fstype(f, dxpl_id, (H5F_mem_page_t)type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
} /* end else */
/* Free the space in aggregators (again) */
@@ -1435,76 +1977,357 @@ HDfprintf(stderr, "%s: Entering\n", FUNC);
/* Trying shrinking the EOA for the file */
/* (in case any free space is now at the EOA) */
- if(H5MF_close_shrink_eoa(f, dxpl_id) < 0)
+ if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
done:
/* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
#ifdef H5MF_ALLOC_DEBUG
HDfprintf(stderr, "%s: Leaving\n", FUNC);
#endif /* H5MF_ALLOC_DEBUG */
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
-} /* end H5MF_close() */
+} /* end H5MF__close_aggrfs() */
/*-------------------------------------------------------------------------
- * Function: H5MF_sects_cb()
+ * Function: H5MF__close_pagefs
*
- * Purpose: Iterator callback for each free-space section
- * Retrieve address and size into user data
+ * Purpose: Close the free space tracker(s) for a file: paged aggregation
*
- * Return: Always succeed
+ * Return: SUCCEED/FAIL
*
- * Programmer: Vailin Choi
- * July 1st, 2009
+ * Programmer: Vailin Choi; Dec 2012
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5MF_sects_cb(H5FS_section_info_t *_sect, void *_udata)
+H5MF__close_pagefs(H5F_t *f, hid_t dxpl_id)
{
- H5MF_free_section_t *sect = (H5MF_free_section_t *)_sect;
- H5MF_sect_iter_ud_t *udata = (H5MF_sect_iter_ud_t *)_udata;
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t curr_ring = H5AC_RING_INV; /* current ring value */
+ H5AC_ring_t needed_ring = H5AC_RING_INV; /* ring value needed for this
+ * iteration.
+ */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5F_mem_page_t ptype; /* Memory type for iteration */
+ H5O_fsinfo_t fsinfo; /* File space info message */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: Entering\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG */
- if(udata->sect_idx < udata->sect_count) {
- udata->sects[udata->sect_idx].addr = sect->sect_info.addr;
- udata->sects[udata->sect_idx].size = sect->sect_info.size;
- udata->sect_idx++;
+ /* check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->lf);
+ HDassert(f->shared->sblock);
+ HDassert(f->shared->fs_page_size);
+ HDassert(f->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2);
+
+ /* Set the ring type in the DXPL. In most cases, we will
+ * need H5AC_RING_RDFSM, so initialy set the ring in
+ * the DXPL to that value. We will alter this later if
+ * needed.
+ */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_RDFSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+ curr_ring = H5AC_RING_RDFSM;
+
+ /* Trying shrinking the EOA for the file */
+ if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
+
+ /* Set up file space info message */
+ fsinfo.strategy = f->shared->fs_strategy;
+ fsinfo.persist = f->shared->fs_persist;
+ fsinfo.threshold = f->shared->fs_threshold;
+ fsinfo.page_size = f->shared->fs_page_size;
+ fsinfo.pgend_meta_thres = f->shared->pgend_meta_thres;
+ fsinfo.eoa_pre_fsm_fsalloc = HADDR_UNDEF;
+ for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ fsinfo.fs_addr[ptype - 1] = HADDR_UNDEF;
+
+ if(f->shared->fs_persist) {
+ haddr_t final_eoa; /* final eoa -- for sanity check */
+
+ /* superblock extension and free space manager message should
+ * exist at this point -- verify at least the former.
+ */
+ HDassert(H5F_addr_defined(f->shared->sblock->ext_addr));
+
+ /* file space for all non-empty free space managers should be
+ * allocated at this point, and these free space managers should
+ * be written to file and thus their headers and section info
+ * entries in the metadata cache should be clean.
+ */
+
+ /* gather data for the free space manager superblock extension message.
+ * Only need addresses of FSMs and eoa prior to allocation of
+ * file space for the self referential free space managers. Other
+ * data was gathered above.
+ */
+ for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ fsinfo.fs_addr[ptype-1] = f->shared->fs_addr[ptype];
+ fsinfo.eoa_pre_fsm_fsalloc = f->shared->eoa_pre_fsm_fsalloc;
+
+ /* Write the free space manager message -- message must already exist */
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, FALSE, H5O_MSG_FLAG_MARK_IF_UNKNOWN) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
+
+ /* Close the free space managers */
+ /* use H5MF__close_fstype() for this? */
+ for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype)) {
+ if(f->shared->fs_man[ptype]) {
+ /* test to see if we need to switch rings -- do
+ * so if required
+ */
+ if(H5MF__fsm_type_is_self_referential(f, ptype))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value (1)")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ HDassert(f->shared->fs_state[ptype] == H5F_FS_STATE_OPEN);
+
+ if(H5FS_close(f, dxpl_id, f->shared->fs_man[ptype]) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't close free space manager")
+ f->shared->fs_man[ptype] = NULL;
+ f->shared->fs_state[ptype] = H5F_FS_STATE_CLOSED;
+ } /* end if */
+ f->shared->fs_addr[ptype] = HADDR_UNDEF;
+ } /* end for */
+
+ /* verify that we haven't dirtied any metadata cache entries
+ * from the metadata free space manager ring out.
+ */
+ HDassert(H5AC_cache_is_clean(f, H5AC_RING_MDFSM));
+
+ /* Trying shrinking the EOA for the file */
+ /* (in case any free space is now at the EOA) */
+ if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
+
+ /* get the eoa, and verify that it has the expected value */
+ if(HADDR_UNDEF == (final_eoa = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)) )
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file size")
+
+ /* f->shared->eoa_post_fsm_fsalloc is undefined if there has
+ * been no file space allocation or deallocation since file
+ * open.
+ *
+ * If there is a cache image in the file at file open,
+ * f->shared->first_alloc_dealloc will always be FALSE unless
+ * the file is opened R/O, as otherwise, the image will have been
+ * read and discarded by this point.
+ *
+ * If a cache image was created on file close, the actual EOA
+ * should be in f->shared->eoa_post_mdci_fsalloc. Note that in
+ * this case, it is conceivable that f->shared->first_alloc_dealloc
+ * will still be TRUE, as the cache image is allocated directly from
+ * the file driver layer. However, as this possibility seems remote,
+ * it is ignored in the following assert.
+ */
+ HDassert((f->shared->first_alloc_dealloc) ||
+ (final_eoa == f->shared->eoa_post_fsm_fsalloc) ||
+ ((H5F_addr_defined(f->shared->eoa_post_mdci_fsalloc)) &&
+ (final_eoa == f->shared->eoa_post_mdci_fsalloc)));
} /* end if */
+ else {
+ /* Iterate over all the free space types that have managers
+ * and get each free list's space
+ */
+ for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ if(H5MF__close_delete_fstype(f, dxpl_id, ptype) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't close the free space manager")
+
+ /* Write file space info message to superblock extension object header */
+ /* Create the superblock extension object header in advance if needed */
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, FALSE, H5O_MSG_FLAG_MARK_IF_UNKNOWN) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing message to superblock extension")
+ } /* end else */
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5MF_sects_cb() */
+ /* Trying shrinking the EOA for the file */
+ /* (in case any free space is now at the EOA) */
+ if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
+
+done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: Leaving\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG */
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5MF__close_pagefs() */
/*-------------------------------------------------------------------------
- * Function: H5MF_get_free_sections()
+ * Function: H5MF__close_shrink_eoa
*
- * Purpose: To iterate over one or all free-space managers for:
- * # of sections
- * section info as defined in H5F_sect_info_t
+ * Purpose: Shrink the EOA while closing
*
* Return: SUCCEED/FAIL
*
- * Programmer: Vailin Choi
- * July 1st, 2009
+ * Programmer: Quincey Koziol
+ * Saturday, July 7, 2012
*
*-------------------------------------------------------------------------
*/
-ssize_t
-H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects, H5F_sect_info_t *sect_info)
+static herr_t
+H5MF__close_shrink_eoa(H5F_t *f, hid_t dxpl_id)
{
- size_t total_sects = 0; /* total number of sections */
- H5MF_sect_iter_ud_t sect_udata; /* User data for callback */
H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t curr_ring = H5AC_RING_INV; /* Current ring value */
+ H5AC_ring_t needed_ring = H5AC_RING_INV; /* Ring value needed for this iteration. */
H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
- H5FD_mem_t start_type, end_type; /* Memory types to iterate over */
- H5FD_mem_t ty; /* Memory type for iteration */
- ssize_t ret_value = -1; /* Return value */
+ H5F_mem_t type;
+ H5F_mem_page_t ptype; /* Memory type for iteration */
+ hbool_t eoa_shrank; /* Whether an EOA shrink occurs */
+ htri_t status; /* Status value */
+ H5MF_sect_ud_t udata; /* User data for callback */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+
+ /* check args */
+ HDassert(f);
+ HDassert(f->shared);
+
+ /* Construct user data for callbacks */
+ udata.f = f;
+ udata.dxpl_id = dxpl_id;
+ udata.allow_sect_absorb = FALSE;
+ udata.allow_eoa_shrink_only = TRUE;
+
+ /* Set the ring type in the DXPL */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_RDFSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value(1)")
+ reset_ring = TRUE;
+ curr_ring = H5AC_RING_RDFSM;
+
+ /* Iterate until no more EOA shrinking occurs */
+ do {
+ eoa_shrank = FALSE;
+
+ if(H5F_PAGED_AGGR(f)) {
+ /* Check the last section of each free-space manager */
+ for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype)) {
+ if(f->shared->fs_man[ptype]) {
+ /* Test to see if we need to switch rings -- do so if required */
+ if(H5MF__fsm_type_is_self_referential(f, ptype))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value (1)")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ udata.alloc_type = (H5FD_mem_t)((H5FD_mem_t)ptype < H5FD_MEM_NTYPES ? ptype : ((ptype % H5FD_MEM_NTYPES) + 1));
+
+ if((status = H5FS_sect_try_shrink_eoa(f, dxpl_id, f->shared->fs_man[ptype], &udata)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
+ else if(status > 0)
+ eoa_shrank = TRUE;
+ } /* end if */
+ } /* end for */
+ } /* end if */
+ else {
+ /* Check the last section of each free-space manager */
+ for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
+ if(f->shared->fs_man[type]) {
+ /* Test to see if we need to switch rings -- do so if required */
+ if(H5MF__fsm_type_is_self_referential(f, (H5F_mem_page_t)type))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value (1)")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ udata.alloc_type = type;
+
+ if((status = H5FS_sect_try_shrink_eoa(f, dxpl_id, f->shared->fs_man[type], &udata)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
+ else if(status > 0)
+ eoa_shrank = TRUE;
+ } /* end if */
+ } /* end for */
+
+ /* check the two aggregators */
+ if((status = H5MF_aggrs_try_shrink_eoa(f, dxpl_id)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
+ else if(status > 0)
+ eoa_shrank = TRUE;
+ } /* end else */
+ } while(eoa_shrank);
+
+done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5MF__close_shrink_eoa() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_get_freespace
+ *
+ * Purpose: Retrieve the amount of free space in the file
+ *
+ * Return: Success: Amount of free space in file
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * Monday, October 6, 2003
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space, hsize_t *meta_size)
+{
+ haddr_t ma_addr = HADDR_UNDEF; /* Base "metadata aggregator" address */
+ hsize_t ma_size = 0; /* Size of "metadata aggregator" */
+ haddr_t sda_addr = HADDR_UNDEF; /* Base "small data aggregator" address */
+ hsize_t sda_size = 0; /* Size of "small data aggregator" */
+ hsize_t tot_fs_size = 0; /* Amount of all free space managed */
+ hsize_t tot_meta_size = 0; /* Amount of metadata for free space managers */
+ H5FD_mem_t tt; /* Memory type for iteration */
+ H5F_mem_page_t type; /* Memory type for iteration */
+ H5F_mem_page_t start_type; /* Memory type for iteration */
+ H5F_mem_page_t end_type; /* Memory type for iteration */
+ htri_t fs_started[H5F_MEM_PAGE_NTYPES]; /* Indicate whether the free-space manager has been started */
+ haddr_t fs_eoa[H5FD_MEM_NTYPES]; /* EAO for each free-space manager */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t curr_ring = H5AC_RING_INV; /* Current ring value */
+ H5AC_ring_t needed_ring = H5AC_RING_INV; /* Ring value needed for this iteration. */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
@@ -1513,14 +2336,185 @@ H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects,
HDassert(f->shared);
HDassert(f->shared->lf);
+ /* Set the ring type in the DXPL. In most cases, we will
+ * need H5AC_RING_RDFSM, so initialy set the ring in
+ * the DXPL to that value. We will alter this later if
+ * needed.
+ */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_RDFSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+ curr_ring = H5AC_RING_RDFSM;
+
/* Determine start/end points for loop */
+ if(H5F_PAGED_AGGR(f)) {
+ start_type = H5F_MEM_PAGE_META;
+ end_type = H5F_MEM_PAGE_NTYPES;
+ } /* end if */
+ else {
+ start_type = (H5F_mem_page_t)H5FD_MEM_SUPER;
+ end_type = (H5F_mem_page_t)H5FD_MEM_NTYPES;
+ } /* end else */
+
+ for(tt = H5FD_MEM_SUPER; tt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, tt))
+ if(HADDR_UNDEF == (fs_eoa[tt] = H5F_get_eoa(f, tt)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+
+ if(!H5F_PAGED_AGGR(f)) {
+ /* Retrieve metadata aggregator info, if available */
+ if(H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query metadata aggregator stats")
+
+ /* Retrieve 'small data' aggregator info, if available */
+ if(H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sda_addr, &sda_size) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query small data aggregator stats")
+ } /* end if */
+
+ /* Iterate over all the free space types that have managers and get each free list's space */
+ for(type = start_type; type < end_type; H5_INC_ENUM(H5F_mem_page_t, type)) {
+ fs_started[type] = FALSE;
+
+ /* Check if the free space for the file has been initialized */
+ if(!f->shared->fs_man[type] && H5F_addr_defined(f->shared->fs_addr[type])) {
+ if(H5MF_open_fstype(f, dxpl_id, type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
+ HDassert(f->shared->fs_man[type]);
+ fs_started[type] = TRUE;
+ } /* end if */
+
+ /* test to see if we need to switch rings -- do
+ * so if required
+ */
+ if(H5MF__fsm_type_is_self_referential(f, (H5F_mem_page_t)type))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ /* Check if there's free space of this type */
+ if(f->shared->fs_man[type]) {
+ hsize_t type_fs_size = 0; /* Amount of free space managed for each type */
+ hsize_t type_meta_size = 0; /* Amount of free space metadata for each type */
+
+ /* Retrieve free space size from free space manager */
+ if(H5FS_sect_stats(f->shared->fs_man[type], &type_fs_size, NULL) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query free space stats")
+ if(H5FS_size(f, f->shared->fs_man[type], &type_meta_size) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query free space metadata stats")
+
+ /* Increment total free space for types */
+ tot_fs_size += type_fs_size;
+ tot_meta_size += type_meta_size;
+ } /* end if */
+ } /* end for */
+
+ /* Close the free-space managers if they were opened earlier in this routine */
+ for(type = start_type; type < end_type; H5_INC_ENUM(H5F_mem_page_t, type)) {
+ /* Test to see if we need to switch rings -- do so if required */
+ if(H5MF__fsm_type_is_self_referential(f, (H5F_mem_page_t)type))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ if(fs_started[type])
+ if(H5MF__close_fstype(f, dxpl_id, type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't close file free space")
+ } /* end for */
+
+ /* Set the value(s) to return */
+ /* (The metadata & small data aggregators count as free space now, since they aren't at EOA) */
+ if(tot_space)
+ *tot_space = tot_fs_size + ma_size + sda_size;
+ if(meta_size)
+ *meta_size = tot_meta_size;
+
+done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* end H5MF_get_freespace() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_get_free_sections()
+ *
+ * Purpose: To retrieve free-space section information for
+ * paged or non-paged aggregation
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+ssize_t
+H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects, H5F_sect_info_t *sect_info)
+{
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t curr_ring = H5AC_RING_INV; /* Current ring value */
+ H5AC_ring_t needed_ring = H5AC_RING_INV; /* Ring value needed for this iteration. */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ size_t total_sects = 0; /* Total number of sections */
+ H5MF_sect_iter_ud_t sect_udata; /* User data for callback */
+ H5F_mem_page_t start_type, end_type; /* Memory types to iterate over */
+ H5F_mem_page_t ty; /* Memory type for iteration */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ ssize_t ret_value = -1; /* Return value */
+
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, -1)
+
+ /* check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->lf);
+
+ /* H5MF_tidy_self_referential_fsm_hack() will fail if any self
+ * referential FSM is opened prior to the call to it. Thus call
+ * it here if necessary and if it hasn't been called already.
+ *
+ * The situation is further complicated if a cache image exists
+ * and had not yet been loaded into the metadata cache. In this
+ * case, call H5AC_force_cache_image_load() instead of
+ * H5MF_tidy_self_referential_fsm_hack(). H5AC_force_cache_image_load()
+ * will load the cache image, and then call
+ * H5MF_tidy_self_referential_fsm_hack() to discard the cache image
+ * block.
+ */
+ if(f->shared->first_alloc_dealloc) {
+ if(H5AC_cache_image_pending(f)) {
+ if(H5AC_force_cache_image_load(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "forced cache image load failed")
+ } /* end if */
+ else {
+ if(H5MF_tidy_self_referential_fsm_hack(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "tidy of self referential fsm hack failed")
+ } /* end else */
+ } /* end if */
+
if(type == H5FD_MEM_DEFAULT) {
- start_type = H5FD_MEM_SUPER;
- end_type = H5FD_MEM_NTYPES;
+ start_type = H5F_MEM_PAGE_SUPER;
+ end_type = H5F_MEM_PAGE_NTYPES;
} /* end if */
else {
- start_type = end_type = type;
- H5_INC_ENUM(H5FD_mem_t, end_type);
+ start_type = end_type = (H5F_mem_page_t)type;
+ if(H5F_PAGED_AGGR(f)) /* set to the corresponding LARGE free-space manager */
+ end_type = (H5F_mem_page_t)(end_type + H5FD_MEM_NTYPES);
+ else
+ H5_INC_ENUM(H5F_mem_page_t, end_type);
} /* end else */
/* Set up user data for section iteration */
@@ -1528,47 +2522,54 @@ H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects,
sect_udata.sect_count = nsects;
sect_udata.sect_idx = 0;
- /* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_FSM, &dxpl, &orig_ring) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ /* Set the ring type in the DXPL. In most cases, we will
+ * need H5AC_RING_RDFSM, so initialy set the ring in
+ * the DXPL to that value. We will alter this later if
+ * needed.
+ */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_RDFSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value(0)")
+ reset_ring = TRUE;
+ curr_ring = H5AC_RING_RDFSM;
/* Iterate over memory types, retrieving the number of sections of each type */
- for(ty = start_type; ty < end_type; H5_INC_ENUM(H5FD_mem_t, ty)) {
- hbool_t fs_started = FALSE;
+ for(ty = start_type; ty < end_type; H5_INC_ENUM(H5F_mem_page_t, ty)) {
+ hbool_t fs_started = FALSE; /* The free-space manager is opened or not */
+ size_t nums = 0; /* The number of free-space sections */
+
+ /* Test to see if we need to switch rings -- do so if required */
+ if(H5MF__fsm_type_is_self_referential(f, ty))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value (1)")
+ curr_ring = needed_ring;
+ } /* end if */
- /* Open free space manager of this type, if it isn't already */
if(!f->shared->fs_man[ty] && H5F_addr_defined(f->shared->fs_addr[ty])) {
- if(H5MF_alloc_open(f, dxpl_id, ty) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTOPENOBJ, FAIL, "can't initialize file free space")
+ if(H5MF_open_fstype(f, dxpl_id, ty) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't open the free space manager")
HDassert(f->shared->fs_man[ty]);
- fs_started = TRUE;
- } /* end if */
-
- /* Check if f there's free space sections of this type */
- if(f->shared->fs_man[ty]) {
- hsize_t hnums = 0; /* Total # of sections */
- size_t nums; /* Total # of sections, cast to a size_t */
-
- /* Query how many sections of this type */
- if(H5FS_sect_stats(f->shared->fs_man[ty], NULL, &hnums) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query free space stats")
- H5_CHECKED_ASSIGN(nums, size_t, hnums, hsize_t);
-
- /* Increment total # of sections */
- total_sects += nums;
-
- /* Check if we should retrieve the section info */
- if(sect_info && nums > 0) {
- /* Iterate over all the free space sections of this type, adding them to the user's section info */
- if(H5FS_sect_iterate(f, dxpl_id, f->shared->fs_man[ty], H5MF_sects_cb, &sect_udata) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_BADITER, FAIL, "can't iterate over sections")
- } /* end if */
+ fs_started = TRUE;
} /* end if */
- /* Close the free space manager of this type, if we started it here */
+ /* Check if there's free space sections of this type */
+ if(f->shared->fs_man[ty])
+ if(H5MF__get_free_sects(f, dxpl_id, f->shared->fs_man[ty], &sect_udata, &nums) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't get section info for the free space manager")
+
+ /* Increment total # of sections */
+ total_sects += nums;
+
+ /* Close the free space manager of this type, if we started it here */
if(fs_started)
- if(H5MF__alloc_close(f, dxpl_id, ty) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTCLOSEOBJ, FAIL, "can't close file free space")
+ if(H5MF__close_fstype(f, dxpl_id, ty) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTCLOSEOBJ, FAIL, "can't close file free space")
+ if((H5F_PAGED_AGGR(f)) && (type != H5FD_MEM_DEFAULT))
+ ty = (H5F_mem_page_t)(ty + H5FD_MEM_NTYPES - 2);
} /* end for */
/* Set return value */
@@ -1576,9 +2577,1343 @@ H5MF_get_free_sections(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, size_t nsects,
done:
/* Reset the ring in the DXPL */
- if(H5AC_reset_ring(dxpl, orig_ring) < 0)
- HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
- FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+ FUNC_LEAVE_NOAPI_TAG(ret_value, -1)
} /* H5MF_get_free_sections() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sects_cb()
+ *
+ * Purpose: Iterator callback for each free-space section
+ * Retrieve address and size into user data
+ *
+ * Return: Always succeed
+ *
+ * Programmer: Vailin Choi
+ * July 1st, 2009
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5MF_sects_cb(H5FS_section_info_t *_sect, void *_udata)
+{
+ H5MF_free_section_t *sect = (H5MF_free_section_t *)_sect;
+ H5MF_sect_iter_ud_t *udata = (H5MF_sect_iter_ud_t *)_udata;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ if(udata->sect_idx < udata->sect_count) {
+ udata->sects[udata->sect_idx].addr = sect->sect_info.addr;
+ udata->sects[udata->sect_idx].size = sect->sect_info.size;
+ udata->sect_idx++;
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5MF_sects_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF__get_free_sects
+ *
+ * Purpose: Retrieve section information for the specified free-space manager.
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5MF__get_free_sects(H5F_t *f, hid_t dxpl_id, H5FS_t *fspace, H5MF_sect_iter_ud_t *sect_udata, size_t *nums)
+{
+ hsize_t hnums = 0; /* # of sections */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* check args */
+ HDassert(f);
+ HDassert(sect_udata);
+ HDassert(nums);
+ HDassert(fspace);
+
+ /* Query how many sections of this type */
+ if(H5FS_sect_stats(fspace, NULL, &hnums) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query free space stats")
+ H5_CHECKED_ASSIGN(*nums, size_t, hnums, hsize_t);
+
+ /* Check if we should retrieve the section info */
+ if(sect_udata->sects && *nums > 0)
+ /* Iterate over all the free space sections of this type, adding them to the user's section info */
+ if(H5FS_sect_iterate(f, dxpl_id, fspace, H5MF_sects_cb, sect_udata) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_BADITER, FAIL, "can't iterate over sections")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF__get_free_sects() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_settle_raw_data_fsm()
+ *
+ * Purpose: Handle any tasks required before the metadata cache
+ * can serialize or flush the raw data free space manager
+ * and any metadata free space managers that reside in the
+ * raw data free space manager ring.
+ *
+ * Specifically, this means any metadata managers that DON'T
+ * handle space allocation for free space manager header or
+ * section info will reside in the raw data free space manager
+ * ring.
+ *
+ * In the absence of page allocation, there is at most one
+ * free space manager per memory type defined in H5F_mem_t.
+ * Of these, the one that allocates H5FD_MEM_DRAW will
+ * always reside in the raw data free space manager ring.
+ * If there is more than one metadata free space manager,
+ * all that don't handle H5FD_MEM_FSPACE_HDR or
+ * H5FD_MEM_FSPACE_SINFO (which map to H5FD_MEM_OHDR and
+ * H5FD_MEM_LHEAP respectively) will reside in the raw
+ * data free space manager ring as well
+ *
+ * With page allocation, the situation is conceptually
+ * identical, but more complex in practice.
+ *
+ * In the worst case (multi file driver) page allocation
+ * can result in two free space managers for each memory
+ * type -- one for small (less than on equal to one page)
+ * allocations, and one for large (greater than one page)
+ * allocations.
+ *
+ * In the more common one file case, page allocation will
+ * result in a total of three free space managers -- one for
+ * small (<= one page) raw data allocations, one for small
+ * metadata allocations (i.e, all memory types other than
+ * H5FD_MEM_DRAW), and one for all large (> one page)
+ * allocations.
+ *
+ * Despite these complications, the solution is the same in
+ * the page allocation case -- free space managers (be they
+ * small data or large) are assigned to the raw data free
+ * space manager ring if they don't allocate file space for
+ * free space managers. Note that in the one file case, the
+ * large free space manager must be assigned to the metadata
+ * free space manager ring, as it both allocates pages for
+ * the metadata free space manager, and allocates space for
+ * large (> 1 page) metadata cache entries.
+ *
+ * At present, the task list for this routine is:
+ *
+ * 1) Reduce the EOA to the extent possible. To do this:
+ *
+ * a) Free both aggregators. Space not at EOA will be
+ * added to the appropriate free space manager.
+ *
+ * The raw data aggregator should not be restarted
+ * after this point. It is possible that the metadata
+ * aggregator will be.
+ *
+ * b) Free all file space currently allocated to free
+ * space managers.
+ *
+ * c) Delete the free space manager superblock
+ * extension message if allocated.
+ *
+ * This done, reduce the EOA by moving it to just before
+ * the last piece of free memory in the file.
+ *
+ * 2) Ensure that space is allocated for the free space
+ * manager superblock extension message. Must do this
+ * now, before reallocating file space for free space
+ * managers, as it is possible that this allocation may
+ * grab the last section in a FSM -- making it unnecessary
+ * to re-allocate file space for it.
+ *
+ * 3) Scan all free space managers not involved in allocating
+ * space for free space managers. For each such free space
+ * manager, test to see if it contains free space. If
+ * it does, allocate file space for its header and section
+ * data. If it contains no free space, leave it without
+ * allocated file space as there is no need to save it to
+ * file.
+ *
+ * Note that all free space managers in this class should
+ * see no further space allocations / deallocations as
+ * at this point, all raw data allocations should be
+ * finalized, as should all metadata allocations not
+ * involving free space managers.
+ *
+ * We will allocate space for free space managers involved
+ * in the allocation of file space for free space managers
+ * in H5MF_settle_meta_data_fsm()
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: John Mainzer
+ * 5/25/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
+{
+ int pass_count;
+ hsize_t alloc_size;
+ H5F_mem_t mem_type; /* Memory type for iteration */
+ H5F_mem_page_t fsm_type; /* FSM type for iteration */
+ H5O_fsinfo_t fsinfo; /* Free space manager info message */
+ H5FS_stat_t fs_stat; /* Information for free-space manager */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ H5AC_ring_t curr_ring = H5AC_RING_INV; /* Current ring value */
+ H5AC_ring_t needed_ring = H5AC_RING_INV; /* Ring value needed for this iteration */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+
+ /* Check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(fsm_settled);
+
+ /* Only need to settle things if we are persisting the free space info
+ * and allocation/deallocation has occurred.
+ */
+ if(f->shared->fs_persist && !f->shared->first_alloc_dealloc) {
+ hbool_t fsm_opened[H5F_MEM_PAGE_NTYPES]; /* State of FSM */
+ hbool_t fsm_visited[H5F_MEM_PAGE_NTYPES]; /* State of FSM */
+
+ /* Sanity check */
+ HDassert(f->shared->sblock);
+
+ /* should only be called if file is opened R/W */
+ HDassert(H5F_INTENT(f) & H5F_ACC_RDWR);
+
+ /* shouldn't be called unless we have a superblock supporting the
+ * superblock extension.
+ */
+ HDassert(f->shared->sblock->super_vers >= HDF5_SUPERBLOCK_VERSION_2);
+
+ /* Initialize fsm_opened and fsm_visited */
+ HDmemset(fsm_opened, 0, sizeof(fsm_opened));
+ HDmemset(fsm_visited, 0, sizeof(fsm_visited));
+
+ /* 1) Reduce the EOA to the extent possible. */
+
+ /* a) Free the space in aggregators:
+ *
+ * (for space not at EOF, it may be put into free space managers)
+ *
+ * Do this now so that the raw data FSM (and any other FSM that isn't
+ * involved in space allocation for FSMs) will have no further activity.
+ *
+ * Note that while the raw data aggregator should not be restarted during
+ * the close process, this need not be the case for the metadata aggregator.
+ *
+ * Note also that the aggregators will not exist if page aggregation
+ * is enabled -- skip this if so.
+ */
+ /* Vailin -- is this correct? */
+ if(!H5F_PAGED_AGGR(f) && (H5MF_free_aggrs(f, dxpl_id) < 0))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free aggregators")
+
+ /* Set the ring type in the DXPL. In most cases, we will
+ * need H5AC_RING_MDFSM first, so initialy set the ring in
+ * the DXPL to that value. We will alter this later if
+ * needed.
+ */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_MDFSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value(0)")
+ reset_ring = TRUE;
+ curr_ring = H5AC_RING_MDFSM;
+
+ /* b) Free the file space (if any) allocated to each free space manager.
+ *
+ * Do this to facilitate reduction of the size of the file to the
+ * extent possible. We will re-allocate space to free space managers
+ * that have free space to save after this reduction.
+ *
+ * In the case of the raw data free space manager, and any other free
+ * space manager that does not allocate space for free space managers,
+ * allocations should be complete at this point, as all raw data should
+ * have space allocated and be flushed to file by now. Thus we
+ * can examine such free space managers and only re-allocate space for
+ * them if they contain free space. Do this later in this function after
+ * the EOA has been reduced to the extent possible.
+ *
+ * For free space managers that allocate file space for free space
+ * managers (usually just a single metadata free space manager, but for
+ * now at least, free space managers for different types of metadata
+ * are possible), the matter is more ticklish due to the self-
+ * referential nature of the problem. These FSMs are dealt with in
+ * H5MF_settle_meta_data_fsm().
+ *
+ * Since paged allocation may be enabled, there may be up to two
+ * free space managers per memory type -- one for small and one for
+ * large allocation. Hence we must loop over the memory types twice
+ * setting the allocation size accordingly if paged allocation is
+ * enabled.
+ */
+ for(pass_count = 0; pass_count <= 1; pass_count++) {
+ if(pass_count == 0)
+ alloc_size = 1;
+ else if ( H5F_PAGED_AGGR(f) )
+ alloc_size = f->shared->fs_page_size + 1;
+ else /* no need for a second pass */
+ break;
+
+ for(mem_type = H5FD_MEM_SUPER; mem_type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5F_mem_t, mem_type)) {
+ H5MF_alloc_to_fs_type(f, mem_type, alloc_size, &fsm_type);
+
+ if(pass_count == 0) { /* this is the first pass */
+ HDassert(fsm_type > H5F_MEM_PAGE_DEFAULT);
+ HDassert(fsm_type < H5F_MEM_PAGE_LARGE_SUPER);
+ } /* end if */
+ else if(H5F_PAGED_AGGR(f)) { /* page alloc active */
+ HDassert(fsm_type >= H5F_MEM_PAGE_LARGE_SUPER);
+ HDassert(fsm_type < H5F_MEM_PAGE_NTYPES);
+ } /* end else-if */
+ else /* paged allocation disabled -- should be unreachable */
+ HDassert(FALSE);
+
+ if(!fsm_visited[fsm_type]) {
+ fsm_visited[fsm_type] = TRUE;
+
+ /* If there is no active FSM for this type, but such a FSM has
+ * space allocated in file, open it so that we can free its file
+ * space.
+ */
+ if(NULL == f->shared->fs_man[fsm_type]) {
+ if(H5F_addr_defined(f->shared->fs_addr[fsm_type])) {
+ /* Sanity check */
+ HDassert(fsm_opened[fsm_type] == FALSE);
+
+ if(H5MF_open_fstype(f, dxpl_id, fsm_type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space manager")
+ fsm_opened[fsm_type] = TRUE;
+ } /* end if */
+ } /* end if */
+
+ if(f->shared->fs_man[fsm_type]) {
+ /* Test to see if we need to switch rings -- do so if required */
+ if(H5MF__fsm_type_is_self_referential(f, fsm_type))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ /* Query free space manager info for this type */
+ if(H5FS_stat_info(f, f->shared->fs_man[fsm_type], &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
+
+ /* Check if the free space manager has space in the file */
+ if(H5F_addr_defined(fs_stat.addr) || H5F_addr_defined(fs_stat.sect_addr)) {
+ /* Delete the free space manager in the file. Will
+ * reallocate later if the free space manager contains
+ * any free space.
+ */
+ if(H5FS_free(f, f->shared->fs_man[fsm_type], dxpl_id, TRUE) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't release free-space headers")
+ f->shared->fs_addr[fsm_type] = HADDR_UNDEF;
+ } /* end if */
+ } /* end if */
+
+ /* note that we are tracking opened FSM -- we will close them
+ * at the end of the function.
+ */
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+
+ /* c) Delete the free space manager superblock extension message
+ * if allocated.
+ *
+ * Must do this since the routine that writes / creates superblock
+ * extension messages will choke if the target message is
+ * unexpectedly either absent or present.
+ *
+ * Update: This is probably unecessary, as I gather that the
+ * file space manager info message is guaranteed to exist.
+ * Leave it in for now, but consider removing it.
+ */
+ if(H5F_addr_defined(f->shared->sblock->ext_addr))
+ if(H5F_super_ext_remove_msg(f, dxpl_id, H5O_FSINFO_ID) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "error in removing message from superblock extension")
+
+ /* As the final element in 1), shrink the EOA for the file */
+ if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
+
+
+ /* 2) Ensure that space is allocated for the free space manager superblock
+ * extension message. Must do this now, before reallocating file space
+ * for free space managers, as it is possible that this allocation may
+ * grab the last section in a FSM -- making it unnecessary to
+ * re-allocate file space for it.
+ *
+ * Do this by writing a free space manager superblock extension message.
+ *
+ * Since no free space manager has file space allocated for it, this
+ * message must be invalid since we can't save addresses of FSMs when
+ * those addresses are unknown. This is OK -- we will write the correct
+ * values to the message at free space manager shutdown.
+ */
+ for(fsm_type = H5F_MEM_PAGE_SUPER; fsm_type < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, fsm_type))
+ fsinfo.fs_addr[fsm_type - 1] = HADDR_UNDEF;
+ fsinfo.strategy = f->shared->fs_strategy;
+ fsinfo.persist = f->shared->fs_persist;
+ fsinfo.threshold = f->shared->fs_threshold;
+ fsinfo.page_size = f->shared->fs_page_size;
+ fsinfo.pgend_meta_thres = f->shared->pgend_meta_thres;
+ fsinfo.eoa_pre_fsm_fsalloc = HADDR_UNDEF;
+
+ if(H5F_super_ext_write_msg(f, dxpl_id, H5O_FSINFO_ID, &fsinfo, TRUE, H5O_MSG_FLAG_MARK_IF_UNKNOWN) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_WRITEERROR, FAIL, "error in writing fsinfo message to superblock extension")
+
+
+ /* 3) Scan all free space managers not involved in allocating
+ * space for free space managers. For each such free space
+ * manager, test to see if it contains free space. If
+ * it does, allocate file space for its header and section
+ * data. If it contains no free space, leave it without
+ * allocated file space as there is no need to save it to
+ * file.
+ *
+ * Note that all free space managers in this class should
+ * see no further space allocations / deallocations as
+ * at this point, all raw data allocations should be
+ * finalized, as should all metadata allocations not involving
+ * free space managers.
+ *
+ * We will allocate space for free space managers involved
+ * in the allocation of file space for free space managers
+ * in H5MF_settle_meta_data_fsm()
+ */
+
+ /* Reinitialize fsm_visited */
+ for(fsm_type = H5F_MEM_PAGE_SUPER; fsm_type < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, fsm_type))
+ fsm_visited[fsm_type] = FALSE;
+
+ for(pass_count = 0; pass_count <= 1; pass_count++) {
+ if(pass_count == 0)
+ alloc_size = 1;
+ else if(H5F_PAGED_AGGR(f))
+ alloc_size = f->shared->fs_page_size + 1;
+ else /* no need for a second pass */
+ break;
+
+ for(mem_type = H5FD_MEM_SUPER; mem_type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5F_mem_t, mem_type)) {
+ H5MF_alloc_to_fs_type(f, mem_type, alloc_size, &fsm_type);
+
+ if(pass_count == 0) { /* this is the first pass */
+ HDassert(fsm_type > H5F_MEM_PAGE_DEFAULT);
+ HDassert(fsm_type < H5F_MEM_PAGE_LARGE_SUPER);
+ } /* end if */
+ else if(H5F_PAGED_AGGR(f)) { /* page alloc active */
+ HDassert(fsm_type >= H5F_MEM_PAGE_LARGE_SUPER);
+ HDassert(fsm_type < H5F_MEM_PAGE_NTYPES);
+ } /* end else-if */
+ else /* paged allocation disabled -- should be unreachable */
+ HDassert(FALSE);
+
+ /* test to see if we need to switch rings -- do so if required */
+ if(H5MF__fsm_type_is_self_referential(f, fsm_type))
+ needed_ring = H5AC_RING_MDFSM;
+ else
+ needed_ring = H5AC_RING_RDFSM;
+
+ if(needed_ring != curr_ring) {
+ if(H5AC_set_ring(dxpl_id, needed_ring, &dxpl, &curr_ring)< 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ curr_ring = needed_ring;
+ } /* end if */
+
+ /* Since there can be a many-to-one mapping from memory types
+ * to free space managers, ensure that we don't visit any FSM
+ * more than once.
+ */
+ if(!fsm_visited[fsm_type]) {
+ fsm_visited[fsm_type] = TRUE;
+
+ if(f->shared->fs_man[fsm_type]) {
+ /* Only allocate file space if the target free space manager
+ * doesn't allocate file space for free space managers. Note
+ * that this is also the deciding factor as to whether a FSM
+ * in in the raw data FSM ring.
+ */
+ if(!H5MF__fsm_type_is_self_referential(f, fsm_type)) {
+ /* The current ring should be H5AC_RING_RDFSM */
+ HDassert(curr_ring == H5AC_RING_RDFSM);
+
+ /* Query free space manager info for this type */
+ if(H5FS_stat_info(f, f->shared->fs_man[fsm_type], &fs_stat) < 0 )
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
+
+ /* If the free space manager contains section info,
+ * allocate space for the header and sinfo (note that
+ * space must not be allocated at present -- verify
+ * verify this with assertions).
+ */
+ if(fs_stat.serial_sect_count > 0) {
+ /* Sanity check */
+ HDassert(!H5F_addr_defined(fs_stat.addr));
+
+ /* Allocate FSM header */
+ if(H5FS_alloc_hdr(f, f->shared->fs_man[fsm_type], &f->shared->fs_addr[fsm_type], dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't allocated free-space header")
+
+ /* Allocate FSM section info */
+ HDassert(!H5F_addr_defined(fs_stat.sect_addr));
+ HDassert(fs_stat.alloc_sect_size == 0);
+ if(H5FS_alloc_sect(f, f->shared->fs_man[fsm_type], dxpl_id) < 0 )
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't allocate free-space section info")
+
+#ifndef NDEBUG
+ /* Re-Query free space manager info for this type */
+ if(H5FS_stat_info(f, f->shared->fs_man[fsm_type], &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
+
+ HDassert(H5F_addr_defined(fs_stat.addr));
+ HDassert(H5F_addr_defined(fs_stat.sect_addr));
+ HDassert(fs_stat.serial_sect_count > 0);
+ HDassert(fs_stat.alloc_sect_size > 0);
+ HDassert(fs_stat.alloc_sect_size == fs_stat.sect_size);
+#endif /* NDEBUG */
+ } /* end if */
+ else {
+ HDassert(!H5F_addr_defined(fs_stat.addr));
+ HDassert(!H5F_addr_defined(fs_stat.sect_addr));
+ HDassert(fs_stat.serial_sect_count == 0);
+ HDassert(fs_stat.alloc_sect_size == 0);
+ } /* end else */
+ } /* end if */
+ } /* end if */
+
+ /* Close any opened FSMs */
+ if(fsm_opened[fsm_type]) {
+ if(H5MF__close_fstype(f, dxpl_id, fsm_type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't close file free space manager")
+ fsm_opened[fsm_type] = FALSE;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+ } /* end for */
+
+ /* verify that all opened FSMs were closed */
+ for(fsm_type = H5F_MEM_PAGE_SUPER; fsm_type < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, fsm_type))
+ HDassert(!fsm_opened[fsm_type]);
+
+ /* Indicate that the FSM was settled successfully */
+ *fsm_settled = TRUE;
+ } /* end if */
+
+done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* H5MF_settle_raw_data_fsm() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_settle_meta_data_fsm()
+ *
+ * Purpose: If the free space manager is persistent, handle any tasks
+ * required before the metadata cache can serialize or flush
+ * the metadata free space manager(s) that handle file space
+ * allocation for free space managers.
+ *
+ * In most cases, there will be only one manager assigned
+ * to this role. However, since for reasons unknown,
+ * free space manager headers and section info blocks are
+ * different classes of memory, it is possible that two free
+ * space managers will be involved.
+ *
+ * On entry to this function, the raw data settle routine
+ * (H5MF_settle_raw_data_fsm()) should have:
+ *
+ * 1) Freed the aggregators.
+ *
+ * 2) Freed all file space allocated to the free space managers.
+ *
+ * 3) Deleted the free space manager superblock extension message
+ *
+ * 4) Reduced the EOA to the extent possible.
+ *
+ * 5) Re-created the free space manager superblock extension
+ * message.
+ *
+ * 6) Reallocated file space for all non-empty free space
+ * managers NOT involved in allocation of space for free
+ * space managers.
+ *
+ * Note that these free space managers (if not empty) should
+ * have been written to file by this point, and that no
+ * further space allocations involving them should take
+ * place during file close.
+ *
+ * On entry to this routine. the free space manager(s) involved
+ * in allocation of file space for free space managers should
+ * still be floating. (i.e. should not have any file space
+ * allocated to them.)
+ *
+ * Similarly, the raw data aggregator should not have been
+ * restarted. Note that it is probable that reallocation of
+ * space in 5) and 6) above will have re-started the metadata
+ * aggregator.
+ *
+ *
+ * In this routine, we proceed as follows:
+ *
+ * 1) Verify that the free space manager(s) involved in file
+ * space allocation for free space managers are still floating.
+ *
+ * 2) Free the aggregators.
+ *
+ * 3) Reduce the EOA to the extent possible, and make note
+ * of the resulting value. This value will be stored
+ * in the fsinfo superblock extension message and be used
+ * in the subsequent file open.
+ *
+ * 4) Re-allocate space for any free space manager(s) that:
+ *
+ * a) are involved in allocation of space for free space
+ * managers, and
+ *
+ * b) contain free space.
+ *
+ * It is possible that we could allocate space for one
+ * of these free space manager(s) only to have the allocation
+ * result in the free space manager being empty and thus
+ * obliging us to free the space again. Thus there is the
+ * potential for an infinte loop if we want to avoid saving
+ * empty free space managers.
+ *
+ * Similarly, it is possible that we could allocate space
+ * for a section info block, only to discover that this
+ * allocation has changed the size of the section info --
+ * forcing us to deallocate and start the loop over again.
+ *
+ * To avoid this, simply allocate file space for these
+ * FSM(s) directly from the VFD layer if allocation is
+ * indicated. This avoids the issue by bypassing the FSMs
+ * in this case.
+ *
+ * Note that this may increase the size of the file needlessly.
+ * A better solution would be to modify the FSM code to
+ * save empty FSMs to file, and to allow section info blocks
+ * to be oversized. However, given that the FSM code is
+ * also used by the fractal heaps, and that we are under
+ * severe time pressure at the moment, the above brute
+ * force solution is attractive.
+ *
+ * 5) Make note of the EOA -- used for sanity checking on
+ * FSM shutdown.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: John Mainzer
+ * 5/25/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled)
+{
+ H5F_mem_page_t sm_fshdr_fs_type; /* small fs hdr fsm */
+ H5F_mem_page_t sm_fssinfo_fs_type; /* small fs sinfo fsm */
+ H5F_mem_page_t lg_fshdr_fs_type; /* large fs hdr fsm */
+ H5F_mem_page_t lg_fssinfo_fs_type; /* large fs sinfo fsm */
+ H5FS_t *sm_hdr_fspace = NULL; /* ptr to sm FSM hdr alloc FSM */
+ H5FS_t *sm_sinfo_fspace = NULL; /* ptr to sm FSM sinfo alloc FSM */
+ H5FS_t *lg_hdr_fspace = NULL; /* ptr to lg FSM hdr alloc FSM */
+ H5FS_t *lg_sinfo_fspace = NULL; /* ptr to lg FSM sinfo alloc FSM */
+ haddr_t eoa_pre_fsm_fsalloc; /* eoa pre file space allocation */
+ /* for self referential FSMs */
+ haddr_t eoa_post_fsm_fsalloc; /* eoa post file space allocation */
+ /* for self referential FSMs */
+ H5FS_stat_t fs_stat; /* Information for hdr FSM */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ hbool_t reset_ring = FALSE; /* Whether we set the ring */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+
+ /* Check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(fsm_settled);
+
+ /* Only need to settle things if we are persisting the free space info
+ * and allocation/deallocation has occurred.
+ */
+ if(f->shared->fs_persist && !f->shared->first_alloc_dealloc) {
+ /* Sanity check */
+ HDassert(f->shared->lf);
+
+ /* should only be called if file is opened R/W */
+ HDassert(H5F_INTENT(f) & H5F_ACC_RDWR);
+
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, (size_t)1, &sm_fshdr_fs_type);
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_SINFO, (size_t)1, &sm_fssinfo_fs_type);
+
+ HDassert(sm_fshdr_fs_type > H5F_MEM_PAGE_DEFAULT);
+ HDassert(sm_fshdr_fs_type < H5F_MEM_PAGE_LARGE_SUPER);
+
+ HDassert(sm_fssinfo_fs_type > H5F_MEM_PAGE_DEFAULT);
+ HDassert(sm_fssinfo_fs_type < H5F_MEM_PAGE_LARGE_SUPER);
+
+ HDassert(!H5F_addr_defined(f->shared->fs_addr[sm_fshdr_fs_type]));
+ HDassert(!H5F_addr_defined(f->shared->fs_addr[sm_fssinfo_fs_type]));
+
+ /* Note that in most cases, sm_hdr_fspace will equal sm_sinfo_fspace. */
+ sm_hdr_fspace = f->shared->fs_man[sm_fshdr_fs_type];
+ sm_sinfo_fspace = f->shared->fs_man[sm_fssinfo_fs_type];
+
+ if(H5F_PAGED_AGGR(f)) {
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, f->shared->fs_page_size + 1, &lg_fshdr_fs_type);
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_SINFO, f->shared->fs_page_size + 1, &lg_fssinfo_fs_type);
+
+ HDassert(lg_fshdr_fs_type >= H5F_MEM_PAGE_LARGE_SUPER);
+ HDassert(lg_fshdr_fs_type < H5F_MEM_PAGE_NTYPES);
+
+ HDassert(lg_fssinfo_fs_type >= H5F_MEM_PAGE_LARGE_SUPER);
+ HDassert(lg_fssinfo_fs_type < H5F_MEM_PAGE_NTYPES);
+
+ HDassert(!H5F_addr_defined(f->shared->fs_addr[lg_fshdr_fs_type]));
+ HDassert(!H5F_addr_defined(f->shared->fs_addr[lg_fssinfo_fs_type]));
+
+ /* Note that in most cases, lg_hdr_fspace will equal lg_sinfo_fspace. */
+ lg_hdr_fspace = f->shared->fs_man[lg_fshdr_fs_type];
+ lg_sinfo_fspace = f->shared->fs_man[lg_fssinfo_fs_type];
+ } /* end if */
+
+ /* Set the ring in the dxpl appropriately for subsequent calls */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_MDFSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+
+#ifndef NDEBUG
+ /* Verify that sm_hdr_fspace is floating if it exists */
+ if(sm_hdr_fspace) {
+ /* Query free space manager info for this type */
+ if(H5FS_stat_info(f, sm_hdr_fspace, &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
+
+ HDassert(!H5F_addr_defined(fs_stat.addr));
+ HDassert(!H5F_addr_defined(fs_stat.sect_addr));
+ HDassert(fs_stat.alloc_sect_size == 0);
+ } /* end if */
+
+ /* Verify that sm_sinfo_fspace is floating if it exists and is distinct */
+ if((sm_sinfo_fspace) && (sm_hdr_fspace != sm_sinfo_fspace)) {
+ /* Query free space manager info for this type */
+ if(H5FS_stat_info(f, sm_sinfo_fspace, &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
+
+ HDassert(!H5F_addr_defined(fs_stat.addr));
+ HDassert(!H5F_addr_defined(fs_stat.sect_addr));
+ HDassert(fs_stat.alloc_sect_size == 0);
+ } /* end if */
+
+ if(H5F_PAGED_AGGR(f)) {
+ /* Verify that lg_hdr_fspace is floating if it exists */
+ if(lg_hdr_fspace) {
+ /* Query free space manager info for this type */
+ if(H5FS_stat_info(f, lg_hdr_fspace, &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info (3)")
+
+ HDassert(!H5F_addr_defined(fs_stat.addr));
+ HDassert(!H5F_addr_defined(fs_stat.sect_addr));
+ HDassert(fs_stat.alloc_sect_size == 0);
+ } /* end if */
+
+ /* Verify that lg_sinfo_fspace is floating if it
+ * exists and is distinct
+ */
+ if((lg_sinfo_fspace) && (lg_hdr_fspace != lg_sinfo_fspace)) {
+ /* Query free space manager info for this type */
+ if(H5FS_stat_info(f, lg_sinfo_fspace, &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info (4)")
+
+ HDassert(!H5F_addr_defined(fs_stat.addr));
+ HDassert(!H5F_addr_defined(fs_stat.sect_addr));
+ HDassert(fs_stat.alloc_sect_size == 0);
+ } /* end if */
+ } /* end if */
+#endif /* NDEBUG */
+
+ /* Free the space in the metadata aggregator. Do this via the
+ * H5MF_free_aggrs() call. Note that the raw data aggregator must
+ * have already been freed. Sanity checks for this?
+ *
+ * Note that the aggregators will not exist if paged aggregation
+ * is enabled -- don't attempt to free if this is the case.
+ */
+ /* Vailin -- is this correct? */
+ /* (for space not at EOF, it may be put into free space managers) */
+ if((!H5F_PAGED_AGGR(f)) && (H5MF_free_aggrs(f, dxpl_id) < 0))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free aggregators")
+
+ /* Trying shrinking the EOA for the file */
+ if(H5MF__close_shrink_eoa(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't shrink eoa")
+
+ /* At this point, the EOA should be set to a value that contains
+ * the allocation for all user data, all non self referential FSMs,
+ * the superblock and all superblock extension messages.
+ *
+ * Make note of the current EOA. We will store this value in the
+ * free space manager superblock extension message. Since space for
+ * everything other than the self referential FSMs (and possibly the
+ * cache image) has been allocated at this point, this allows us to
+ * to float the self referential FSMs on the first file space allocation /
+ * deallocaiton and then set the EOA to this value before we handle
+ * the allocation / deallocation. (If a cache image exists, the
+ * first allocation / deallocation will be the deallocation of space
+ * for the cache image).
+ *
+ * WARNING: This approach settling the self referential free space
+ * managers and allocating space for them in the file will
+ * not work as currently implemented with the split and
+ * multi file drivers, as the self referential free space
+ * manager header and section info can be stored in up to
+ * two different files -- requiring that up to two EOA's
+ * be stored in the the free space managers super block
+ * extension message.
+ *
+ * As of this writing, we are solving this problem by
+ * simply not supporting persistant FSMs with the split
+ * and multi file drivers.
+ *
+ * Current plans are to do away with the multi file
+ * driver, so this should be a non-issue in this case.
+ *
+ * We should be able to support the split file driver
+ * without a file format change. However, the code to
+ * do so does not exist at present.
+ */
+ if(HADDR_UNDEF == (eoa_pre_fsm_fsalloc = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get EOA")
+
+
+ /* ******************* PROBLEM: ********************
+ *
+ * If the file has an alignement other than 1, and if
+ * the EOA is not a multiple of this alignment, allocating sapce
+ * for the section via the VFD info has the potential of generating
+ * a fragment that will be added to the free space manager. This
+ * of course undoes everything we have been doing here.
+ *
+ * Need a way around this. Obvious solution is to force the EOA to
+ * be a multiple of the alignment.
+ *
+ * Fortunately, alignment is typically 1, so this is a non-issue in
+ * most cases. In cases where the alignment is not 1, for now we
+ * have decided to drop the fragment on the floor.
+ *
+ * Eventually, we should fix this by modifying the on disk representations
+ * of free space managers to allow for empty space, so as to bypass the
+ * issues created by self-referential free space managers, and make
+ * this issue moot.
+ */
+ /* HDassert(f->shared->alignment == 1); */
+
+
+ /* The free space manager(s) that handle space allocations for free
+ * space managers should be settled now, albeit without file space
+ * allocated to them. To avoid the possibility of changing the sizes
+ * of their section info blocks, allocate space for them now at the
+ * end of file via H5FD_alloc().
+ *
+ * In the past, this issue of allocating space without touching the
+ * free space managers has been deal with by calling
+ * H5MF_aggr_vfd_alloc(), which in turn calls H5MF_aggr_alloc().
+ * This is problematic since (if I read the code correctly) it will
+ * re-constitute the metadata aggregator, which will add any leftover
+ * space to one of the free space managers when freed.
+ *
+ * This is a non-starter, since the entire objective is to settle the
+ * free space managers.
+ *
+ * Hence the decision to call H5FD_alloc() directly.
+ *
+ * As discussed in PROBLEM above, if f->shared->alignment is not 1,
+ * this has the possibility of generating a fragment of file space
+ * that would typically be inserted into one of the free space managers.
+ *
+ * This is isn't good, but due to schedule pressure, we will just drop
+ * the fragment on the floor for now.
+ */
+ if(sm_hdr_fspace)
+ if(H5FS_vfd_alloc_hdr_and_section_info_if_needed(f, dxpl_id, sm_hdr_fspace, &(f->shared->fs_addr[sm_fshdr_fs_type])) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't vfd allocate sm hdr FSM file space")
+
+ if(sm_sinfo_fspace && (sm_sinfo_fspace != sm_hdr_fspace))
+ if(H5FS_vfd_alloc_hdr_and_section_info_if_needed(f, dxpl_id, sm_sinfo_fspace, &(f->shared->fs_addr[sm_fssinfo_fs_type])) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't vfd allocate sm sinfo FSM file space")
+
+ if(H5F_PAGED_AGGR(f)) {
+ if(lg_hdr_fspace)
+ if(H5FS_vfd_alloc_hdr_and_section_info_if_needed(f, dxpl_id, lg_hdr_fspace, &(f->shared->fs_addr[lg_fshdr_fs_type])) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't vfd allocate lg hdr FSM file space")
+
+ if(lg_sinfo_fspace && (lg_sinfo_fspace != lg_hdr_fspace))
+ if(H5FS_vfd_alloc_hdr_and_section_info_if_needed(f, dxpl_id, lg_sinfo_fspace, &(f->shared->fs_addr[lg_fssinfo_fs_type])) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, FAIL, "can't vfd allocate lg sinfo FSM file space")
+ } /* end if */
+
+ /* Get the eoa after allocation of file space for the self referential
+ * free space managers. Assuming no cache image, this should be the
+ * final EOA of the file.
+ */
+ if(HADDR_UNDEF == (eoa_post_fsm_fsalloc = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get file size")
+
+ /* All free space managers should have file space allocated for them
+ * now, and should see no further allocations / deallocations. Store
+ * the pre and post file space allocaton for self referential FSMs EOA
+ * for use when we actually write the free space manager superblock
+ * extension message.
+ */
+ f->shared->eoa_pre_fsm_fsalloc = eoa_pre_fsm_fsalloc;
+ f->shared->eoa_post_fsm_fsalloc = eoa_post_fsm_fsalloc;
+
+ /* Indicate that the FSM was settled successfully */
+ *fsm_settled = TRUE;
+ } /* end if */
+
+done:
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* H5MF_settle_meta_data_fsm() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF__fsm_type_is_self_referential()
+ *
+ * Purpose: Return TRUE if the indicated free space manager allocates
+ * file space for free space managers. Return FALSE otherwise.
+ *
+ * Return: TRUE/FALSE
+ *
+ * Programmer: John Mainzer
+ * 12/6/16
+ *
+ *-------------------------------------------------------------------------
+ */
+hbool_t
+H5MF__fsm_type_is_self_referential(H5F_t *f, H5F_mem_page_t fsm_type)
+{
+ H5F_mem_page_t sm_fshdr_fsm;
+ H5F_mem_page_t sm_fssinfo_fsm;
+ H5F_mem_page_t lg_fshdr_fsm;
+ H5F_mem_page_t lg_fssinfo_fsm;
+ hbool_t result = FALSE;
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(fsm_type >= H5F_MEM_PAGE_DEFAULT);
+ HDassert(fsm_type < H5F_MEM_PAGE_NTYPES);
+
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, (size_t)1, &sm_fshdr_fsm);
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_SINFO, (size_t)1, &sm_fssinfo_fsm);
+
+ if(H5F_PAGED_AGGR(f)) {
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, f->shared->fs_page_size + 1, &lg_fshdr_fsm);
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_SINFO, f->shared->fs_page_size + 1, &lg_fssinfo_fsm);
+
+ result = (fsm_type == sm_fshdr_fsm) || (fsm_type == sm_fssinfo_fsm)
+ || (fsm_type == lg_fshdr_fsm) || (fsm_type == lg_fssinfo_fsm);
+ } /* end if */
+ else {
+ /* In principle, fsm_type should always be less than
+ * H5F_MEM_PAGE_LARGE_SUPER whenever paged aggregation
+ * is not enabled. However, since there is code that does
+ * not observe this prinicple, force the result to FALSE if
+ * fsm_type is greater than or equal to H5F_MEM_PAGE_LARGE_SUPER.
+ */
+ if(fsm_type >= H5F_MEM_PAGE_LARGE_SUPER)
+ result = FALSE;
+ else
+ result = (fsm_type == sm_fshdr_fsm) || (fsm_type == sm_fssinfo_fsm);
+ } /* end else */
+
+ FUNC_LEAVE_NOAPI(result)
+} /* H5MF__fsm_type_is_self_referential() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF__fsm_is_self_referential()
+ *
+ * Purpose: Return TRUE if the indicated free space manager allocates
+ * file space for free space managers. Return FALSE otherwise.
+ *
+ * Return: TRUE/FALSE
+ *
+ * Programmer: John Mainzer
+ * 12/6/16
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+H5MF__fsm_is_self_referential(H5F_t *f, H5FS_t *fspace)
+{
+ H5F_mem_page_t sm_fshdr_fsm;
+ H5F_mem_page_t sm_fssinfo_fsm;
+ hbool_t result = FALSE;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(fspace);
+
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, (size_t)1, &sm_fshdr_fsm);
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_SINFO, (size_t)1, &sm_fssinfo_fsm);
+
+ if(H5F_PAGED_AGGR(f)) {
+ H5F_mem_page_t lg_fshdr_fsm;
+ H5F_mem_page_t lg_fssinfo_fsm;
+
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, f->shared->fs_page_size + 1, &lg_fshdr_fsm);
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_SINFO, f->shared->fs_page_size + 1, &lg_fssinfo_fsm);
+
+ result = (fspace == f->shared->fs_man[sm_fshdr_fsm]) ||
+ (fspace == f->shared->fs_man[sm_fssinfo_fsm]) ||
+ (fspace == f->shared->fs_man[lg_fshdr_fsm]) ||
+ (fspace == f->shared->fs_man[lg_fssinfo_fsm]);
+ } /* end if */
+ else
+ result = (fspace == f->shared->fs_man[sm_fshdr_fsm]) ||
+ (fspace == f->shared->fs_man[sm_fssinfo_fsm]);
+
+ FUNC_LEAVE_NOAPI(result)
+} /* H5MF__fsm_is_self_referential() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_tidy_self_referential_fsm_hack
+ *
+ * Purpose: As discussed in the comments of the settle routines above,
+ * the existence of self referential free space managers
+ * as currently implemented creates the possibility of
+ * infinite loops at file close.
+ *
+ * As a hack to avoid this, we have added code to settle
+ * self referential free space managers, and then allocate
+ * space for them directly from the file driver.
+ *
+ * To avoid dropping ever increasing amounts of file space
+ * on the floor with each subsequent file close/open cycle,
+ * we need to clean this up on file open. To avoid this,
+ * this function is called on the first file space allocation
+ * or deallocation after file open to float the self referential
+ * free space managers and reduce the EOA to the value it
+ * had before the direct allocation of space for the self
+ * referential free space managers.
+ *
+ * The function proceeds as follows:
+ *
+ * 1) Verify that f->shared->first_alloc_dealloc is TRUE,
+ * and then set it to FALSE.
+ *
+ * 2) Get the current EOA. Verify that it is greater than
+ * or equal to f->shared->eoa_pre_fsm_fsalloc. If the
+ * current eoa is equal to f->shared->eoa_pre_fsm_fsalloc,
+ * no self referential FSMs were stored, and we are done.
+ *
+ * NOTE: This will have to be reworked somewhat for
+ * cache image.
+ *
+ * 3) Load the self referential FSMs. In passing verify that
+ * the lowest address of a FSM header is equal to
+ * f->shared->eoa_pre_fsm_fsalloc.'
+ *
+ * Note that we don't have to use any special I/O for
+ * this -- we can use the regular I/O methods even if
+ * paged aggregation and page buffering is enabled.
+ *
+ * 4) Float the FSMs. Ensure that the file space is NOT
+ * released.
+ *
+ * 5) Set EOA equal to f->shared->eoa_pre_fsm_fsalloc,
+ * and then set f->shared->eoa_pre_fsm_fsalloc to
+ * HADDR_UNDEF.
+ *
+ * If page buffering, verify that the new EOA is
+ * on a page boundary, and expunge any pages in the
+ * page buffer after the new EOA.
+ *
+ * Note that this function is also called from test code
+ * when it is necessary to startup a self referential
+ * free space manager prior to the first file space
+ * allocation / deallocation. Failure to do so will
+ * result in assertion failures in this function on
+ * the first file space allocation / deallocation.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: John Mainzer
+ * 12/11/16
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5MF_tidy_self_referential_fsm_hack(H5F_t *f, hid_t dxpl_id)
+{
+ haddr_t eoa; /* EOA of file */
+ hsize_t tail_size = 0; /* Size of chunk to free */
+ H5P_genplist_t *dxpl = NULL; /* DXPL for setting ring */
+ H5AC_ring_t orig_ring = H5AC_RING_INV; /* Original ring value */
+ haddr_t first_srfsm_hdr = HADDR_UNDEF; /* Addr of first self referential */
+ /* fsm header in file */
+ H5FS_stat_t fs_stat; /* Information for hdr FSM */
+ H5F_mem_page_t sm_fshdr_fs_type; /* Small fs hdr fsm */
+ H5F_mem_page_t sm_fssinfo_fs_type; /* Small fs sinfo fsm */
+ H5F_mem_page_t lg_fshdr_fs_type; /* Large fs hdr fsm */
+ H5F_mem_page_t lg_fssinfo_fs_type; /* Large fs sinfo fsm */
+ hbool_t reset_ring = FALSE; /* Whether the ring was set */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_TAG(dxpl_id, H5AC__FREESPACE_TAG, FAIL)
+
+ /* check args */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->fs_persist);
+ HDassert(f->shared->first_alloc_dealloc);
+
+ /* Set the ring type in the DXPL. Since we are only dealing with
+ * self referential FSMs, we will only need H5AC_RING_MDFSM.
+ */
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_MDFSM, &dxpl, &orig_ring) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set ring value")
+ reset_ring = TRUE;
+
+ /* 1) Verify that f->shared->first_alloc_dealloc is TRUE,
+ * and then set it to FALSE.
+ */
+ HDassert(f->shared->first_alloc_dealloc);
+ f->shared->first_alloc_dealloc = FALSE;
+
+
+ /* 2) Get the current EOA. Verify that it is greater than
+ * or equal to f->shared->eoa_pre_fsm_fsalloc. If the
+ * current eoa is equal to f->shared->eoa_pre_fsm_fsalloc,
+ * no self referential FSMs were stored, and we are done.
+ *
+ * NOTE: This will have to be reworked somewhat for
+ * cache image.
+ */
+ if(HADDR_UNDEF == (eoa = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get EOA")
+ HDassert(H5F_addr_le(f->shared->eoa_pre_fsm_fsalloc, eoa));
+
+ if(H5F_addr_eq(f->shared->eoa_pre_fsm_fsalloc, eoa))
+ HGOTO_DONE(SUCCEED)
+
+
+ /* 3) Load the self referential FSMs. In passing verify that
+ * the lowest address of a FSM header is equal to
+ * f->shared->eoa_pre_fsm_fsalloc.'
+ *
+ * Note that we don't have to use any special I/O for
+ * this -- we can use the regular I/O methods even if
+ * paged aggregation and page buffering is enabled.
+ */
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, (size_t)1, &sm_fshdr_fs_type);
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_SINFO, (size_t)1, &sm_fssinfo_fs_type);
+ HDassert(sm_fshdr_fs_type > H5F_MEM_PAGE_DEFAULT);
+ HDassert(sm_fshdr_fs_type < H5F_MEM_PAGE_LARGE_SUPER);
+
+ HDassert(sm_fssinfo_fs_type > H5F_MEM_PAGE_DEFAULT);
+ HDassert(sm_fssinfo_fs_type < H5F_MEM_PAGE_LARGE_SUPER);
+
+ HDassert(NULL == f->shared->fs_man[sm_fshdr_fs_type]);
+ HDassert(NULL == f->shared->fs_man[sm_fssinfo_fs_type]);
+
+ if(H5F_addr_defined(f->shared->fs_addr[sm_fshdr_fs_type])) {
+ first_srfsm_hdr = f->shared->fs_addr[sm_fshdr_fs_type];
+
+ /* open the FSM */
+ if(H5MF_open_fstype(f, dxpl_id, sm_fshdr_fs_type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space manager")
+
+ HDassert(f->shared->fs_man[sm_fshdr_fs_type]);
+ } /* end if */
+
+ if((sm_fshdr_fs_type != sm_fssinfo_fs_type) &&
+ (H5F_addr_defined(f->shared->fs_addr[sm_fssinfo_fs_type]))) {
+
+ if(!H5F_addr_defined(first_srfsm_hdr) ||
+ (H5F_addr_defined(first_srfsm_hdr) &&
+ H5F_addr_lt(f->shared->fs_addr[sm_fssinfo_fs_type], first_srfsm_hdr)))
+ first_srfsm_hdr = f->shared->fs_addr[sm_fssinfo_fs_type];
+
+ HDassert(NULL == f->shared->fs_man[sm_fssinfo_fs_type]);
+
+ /* open the FSM */
+ if(H5MF_open_fstype(f, dxpl_id, sm_fssinfo_fs_type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space manager")
+
+ HDassert(f->shared->fs_man[sm_fssinfo_fs_type]);
+ } /* end if */
+
+ if(H5F_PAGED_AGGR(f)) {
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_HDR, f->shared->fs_page_size + 1, &lg_fshdr_fs_type);
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_FSPACE_SINFO, f->shared->fs_page_size + 1, &lg_fssinfo_fs_type);
+
+ HDassert(lg_fshdr_fs_type >= H5F_MEM_PAGE_LARGE_SUPER);
+ HDassert(lg_fshdr_fs_type < H5F_MEM_PAGE_NTYPES);
+
+ HDassert(lg_fssinfo_fs_type >= H5F_MEM_PAGE_LARGE_SUPER);
+ HDassert(lg_fssinfo_fs_type < H5F_MEM_PAGE_NTYPES);
+
+ HDassert(NULL == f->shared->fs_man[lg_fshdr_fs_type]);
+ HDassert(NULL == f->shared->fs_man[lg_fssinfo_fs_type]);
+
+ if(H5F_addr_defined(f->shared->fs_addr[lg_fshdr_fs_type])) {
+ if(!H5F_addr_defined(first_srfsm_hdr) ||
+ (H5F_addr_defined(first_srfsm_hdr) &&
+ H5F_addr_lt(f->shared->fs_addr[lg_fshdr_fs_type], first_srfsm_hdr)))
+ first_srfsm_hdr = f->shared->fs_addr[lg_fshdr_fs_type];
+
+ HDassert(NULL == f->shared->fs_man[lg_fshdr_fs_type]);
+
+ /* open the FSM */
+ if(H5MF_open_fstype(f, dxpl_id, lg_fshdr_fs_type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space manager")
+ HDassert(f->shared->fs_man[lg_fshdr_fs_type]);
+ } /* end if */
+
+ if(lg_fshdr_fs_type != lg_fssinfo_fs_type && H5F_addr_defined(f->shared->fs_addr[lg_fssinfo_fs_type])) {
+ if(!H5F_addr_defined(first_srfsm_hdr) ||
+ (H5F_addr_defined(first_srfsm_hdr) &&
+ H5F_addr_lt(f->shared->fs_addr[lg_fssinfo_fs_type], first_srfsm_hdr)))
+ first_srfsm_hdr = f->shared->fs_addr[lg_fssinfo_fs_type];
+
+ HDassert(NULL == f->shared->fs_man[lg_fssinfo_fs_type]);
+
+ /* open the FSM */
+ if(H5MF_open_fstype(f, dxpl_id, lg_fssinfo_fs_type) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space manager")
+ HDassert(f->shared->fs_man[lg_fssinfo_fs_type]);
+ } /* end if */
+ } /* end if */
+ HDassert(H5F_addr_eq(first_srfsm_hdr, f->shared->eoa_pre_fsm_fsalloc));
+
+ /* 4) Float the FSMs. Ensure that the file space is NOT released. */
+ if(f->shared->fs_man[sm_fshdr_fs_type]) {
+ /* Sanity check: Query free space manager info for this type */
+ if(H5FS_stat_info(f, f->shared->fs_man[sm_fshdr_fs_type], &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
+
+ HDassert(H5F_addr_defined(fs_stat.addr));
+ HDassert(H5F_addr_defined(fs_stat.sect_addr));
+ if(H5FS_free(f, f->shared->fs_man[sm_fshdr_fs_type], dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't release free-space headers")
+ f->shared->fs_addr[sm_fshdr_fs_type] = HADDR_UNDEF;
+ } /* end if */
+
+ if(sm_fshdr_fs_type != sm_fssinfo_fs_type && f->shared->fs_man[sm_fssinfo_fs_type]) {
+ /* Sanity check: Query free space manager info for this type */
+ if(H5FS_stat_info(f, f->shared->fs_man[sm_fssinfo_fs_type], &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't get free-space info")
+
+ HDassert(H5F_addr_defined(fs_stat.addr));
+ HDassert(H5F_addr_defined(fs_stat.sect_addr));
+ if(H5FS_free(f, f->shared->fs_man[sm_fssinfo_fs_type], dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't release free-space headers")
+ f->shared->fs_addr[sm_fssinfo_fs_type] = HADDR_UNDEF;
+ } /* end if */
+
+ if(H5F_PAGED_AGGR(f)) {
+ if(f->shared->fs_man[lg_fshdr_fs_type]) {
+ /* Sanity check: Query free space manager info for this type */
+ if(H5FS_stat_info(f, f->shared->fs_man[lg_fshdr_fs_type], &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
+
+ HDassert(H5F_addr_defined(fs_stat.addr));
+ HDassert(H5F_addr_defined(fs_stat.sect_addr));
+ if(H5FS_free(f, f->shared->fs_man[lg_fshdr_fs_type], dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't float free-space headers")
+ f->shared->fs_addr[lg_fshdr_fs_type] = HADDR_UNDEF;
+ } /* end if */
+
+ if(lg_fshdr_fs_type != lg_fssinfo_fs_type && f->shared->fs_man[lg_fssinfo_fs_type]) {
+ /* Sanity check: Query free space manager info for this type */
+ if(H5FS_stat_info(f, f->shared->fs_man[lg_fssinfo_fs_type], &fs_stat) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't get free-space info")
+
+ HDassert(H5F_addr_defined(fs_stat.addr));
+ HDassert(H5F_addr_defined(fs_stat.sect_addr));
+ if(H5FS_free(f, f->shared->fs_man[lg_fssinfo_fs_type], dxpl_id, FALSE) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't float free-space headers")
+ f->shared->fs_addr[lg_fssinfo_fs_type] = HADDR_UNDEF;
+ } /* end if */
+ } /* end if */
+
+ /* 5) Set EOA equal to f->shared->eoa_pre_fsm_fsalloc,
+ * and then set f->shared->eoa_pre_fsm_fsalloc to
+ * HADDR_UNDEF.
+ *
+ * If page buffering, verify that the new EOA is
+ * on a page boundary, and expunge any pages in the
+ * page buffer after the new EOA.
+ */
+ if(!H5F_PAGED_AGGR(f)) {
+ /* Verify that the aggregators are still shutdown. */
+ HDassert(f->shared->sdata_aggr.tot_size == 0);
+ HDassert(f->shared->sdata_aggr.addr == 0);
+ HDassert(f->shared->sdata_aggr.size == 0);
+
+ HDassert(f->shared->meta_aggr.tot_size == 0);
+ HDassert(f->shared->meta_aggr.addr == 0);
+ HDassert(f->shared->meta_aggr.size == 0);
+ } /* end if */
+
+ tail_size = (hsize_t)(eoa - f->shared->eoa_pre_fsm_fsalloc);
+
+ /* Release file space allocated to self referential FSMs */
+ if(H5F_free(f, dxpl_id, H5FD_MEM_DEFAULT, f->shared->eoa_pre_fsm_fsalloc, tail_size) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "driver free request failed")
+ if(HADDR_UNDEF == (eoa = H5FD_get_eoa(f->shared->lf, H5FD_MEM_DEFAULT)))
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, FAIL, "unable to get EOA")
+ HDassert(H5F_addr_eq(f->shared->eoa_pre_fsm_fsalloc, eoa));
+
+ f->shared->eoa_pre_fsm_fsalloc = HADDR_UNDEF;
+
+ HDassert((!H5F_PAGED_AGGR(f)) || (0 == (eoa % f->shared->fs_page_size)));
+
+done:
+ /* Reset the ring in the DXPL */
+ if(reset_ring)
+ if(H5AC_reset_ring(dxpl, orig_ring) < 0)
+ HDONE_ERROR(H5E_RESOURCE, H5E_CANTSET, FAIL, "unable to set property value")
+
+ FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
+} /* H5MF_tidy_self_referential_fsm_hack() */
+
diff --git a/src/H5MFaggr.c b/src/H5MFaggr.c
index c45b473..7c05e0d 100644
--- a/src/H5MFaggr.c
+++ b/src/H5MFaggr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -57,8 +55,10 @@
/********************/
/* Local Prototypes */
/********************/
-static herr_t H5MF_aggr_free(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type,
+static herr_t H5MF__aggr_free(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type,
H5F_blk_aggr_t *aggr);
+static haddr_t H5MF__aggr_alloc(H5F_t *f, hid_t dxpl_id, H5F_blk_aggr_t *aggr,
+ H5F_blk_aggr_t *other_aggr, H5FD_mem_t type, hsize_t size);
/*********************/
@@ -80,7 +80,7 @@ static herr_t H5MF_aggr_free(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type,
/*-------------------------------------------------------------------------
* Function: H5MF_aggr_vfd_alloc
*
- * Purpose: Allocate SIZE bytes of file memory via H5MF_aggr_alloc()
+ * Purpose: Allocate SIZE bytes of file memory via H5MF__aggr_alloc()
* and return the relative address where that contiguous chunk
* of file memory exists.
* The TYPE argument describes the purpose for which the storage
@@ -100,9 +100,9 @@ H5MF_aggr_vfd_alloc(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, hsize_t size
haddr_t ret_value = HADDR_UNDEF; /* Return value */
FUNC_ENTER_NOAPI(HADDR_UNDEF)
-#ifdef H5MF_ALLOC_DEBUG
+#ifdef H5MF_AGGR_DEBUG
HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_type, size);
-#endif /* H5MF_ALLOC_DEBUG */
+#endif /* H5MF_AGGR_DEBUG */
/* check arguments */
HDassert(f);
@@ -113,12 +113,12 @@ HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_typ
/* Couldn't find anything from the free space manager, go allocate some */
if(alloc_type != H5FD_MEM_DRAW && alloc_type != H5FD_MEM_GHEAP) {
/* Handle metadata differently from "raw" data */
- if(HADDR_UNDEF == (ret_value = H5MF_aggr_alloc(f, dxpl_id, &(f->shared->meta_aggr), &(f->shared->sdata_aggr), alloc_type, size)))
+ if(HADDR_UNDEF == (ret_value = H5MF__aggr_alloc(f, dxpl_id, &(f->shared->meta_aggr), &(f->shared->sdata_aggr), alloc_type, size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate metadata")
} /* end if */
else {
/* Allocate "raw" data: H5FD_MEM_DRAW and H5FD_MEM_GHEAP */
- if(HADDR_UNDEF == (ret_value = H5MF_aggr_alloc(f, dxpl_id, &(f->shared->sdata_aggr), &(f->shared->meta_aggr), H5FD_MEM_DRAW, size)))
+ if(HADDR_UNDEF == (ret_value = H5MF__aggr_alloc(f, dxpl_id, &(f->shared->sdata_aggr), &(f->shared->meta_aggr), H5FD_MEM_DRAW, size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate raw data")
} /* end else */
@@ -126,19 +126,16 @@ HDfprintf(stderr, "%s: alloc_type = %u, size = %Hu\n", FUNC, (unsigned)alloc_typ
HDassert(H5F_addr_le((ret_value + size), f->shared->tmp_addr));
done:
-#ifdef H5MF_ALLOC_DEBUG
+#ifdef H5MF_AGGR_DEBUG
HDfprintf(stderr, "%s: Leaving: ret_value = %a, size = %Hu\n", FUNC, ret_value, size);
-#endif /* H5MF_ALLOC_DEBUG */
-#ifdef H5MF_ALLOC_DEBUG_DUMP
-H5MF_sects_dump(f, dxpl_id, stderr);
-#endif /* H5MF_ALLOC_DEBUG_DUMP */
+#endif /* H5MF_AGGR_DEBUG */
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5MF_aggr_vfd_alloc() */
/*-------------------------------------------------------------------------
- * Function: H5MF_aggr_alloc
+ * Function: H5MF__aggr_alloc
*
* Purpose: Try to allocate SIZE bytes of memory from an aggregator
* block if possible.
@@ -151,16 +148,16 @@ H5MF_sects_dump(f, dxpl_id, stderr);
*
*-------------------------------------------------------------------------
*/
-haddr_t
-H5MF_aggr_alloc(H5F_t *f, hid_t dxpl_id, H5F_blk_aggr_t *aggr,
+static haddr_t
+H5MF__aggr_alloc(H5F_t *f, hid_t dxpl_id, H5F_blk_aggr_t *aggr,
H5F_blk_aggr_t *other_aggr, H5FD_mem_t type, hsize_t size)
{
- haddr_t eoa_frag_addr = HADDR_UNDEF; /* Address of fragment at EOA */
- hsize_t eoa_frag_size = 0; /* Size of fragment at EOA */
- haddr_t eoa = HADDR_UNDEF; /* Initial EOA for the file */
+ haddr_t eoa_frag_addr = HADDR_UNDEF; /* Address of fragment at EOA */
+ hsize_t eoa_frag_size = 0; /* Size of fragment at EOA */
+ haddr_t eoa = HADDR_UNDEF; /* Initial EOA for the file */
haddr_t ret_value = HADDR_UNDEF; /* Return value */
- FUNC_ENTER_NOAPI(HADDR_UNDEF)
+ FUNC_ENTER_STATIC
#ifdef H5MF_AGGR_DEBUG
HDfprintf(stderr, "%s: type = %u, size = %Hu\n", FUNC, (unsigned)type, size);
#endif /* H5MF_AGGR_DEBUG */
@@ -180,11 +177,11 @@ HDfprintf(stderr, "%s: type = %u, size = %Hu\n", FUNC, (unsigned)type, size);
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, HADDR_UNDEF, "Unable to get eoa")
/*
- * If the aggregation feature is enabled for this file and strategy is not H5F_FILE_SPACE_VFD,
+ * If the aggregation feature is enabled for this file and strategy is not H5F_FILE_SPACE_NONE,
* allocate "generic" space and sub-allocate out of that, if possible.
- * Otherwise just allocate through H5FD_alloc().
+ * Otherwise just allocate through H5F_alloc().
*/
- if((f->shared->feature_flags & aggr->feature_flag) && f->shared->fs_strategy != H5F_FILE_SPACE_VFD) {
+ if((f->shared->feature_flags & aggr->feature_flag) && f->shared->fs_strategy != H5F_FSPACE_STRATEGY_NONE) {
haddr_t aggr_frag_addr = HADDR_UNDEF; /* Address of aggregrator fragment */
hsize_t aggr_frag_size = 0; /* Size of aggregator fragment */
hsize_t alignment; /* Alignment of this section */
@@ -196,12 +193,12 @@ HDfprintf(stderr, "%s: aggr = {%a, %Hu, %Hu}\n", FUNC, aggr->addr, aggr->tot_siz
#endif /* H5MF_AGGR_DEBUG */
/* Turn off alignment if allocation < threshold */
- alignment = f->shared->alignment;
- if(!((alignment > 1) && (size >= f->shared->threshold)))
+ alignment = H5F_ALIGNMENT(f);
+ if(!((alignment > 1) && (size >= H5F_THRESHOLD(f))))
alignment = 0; /* no alignment */
/* Generate fragment if aggregator is mis-aligned */
- if(alignment && aggr->addr > 0 && (aggr_mis_align = (aggr->addr + H5FD_get_base_addr(f->shared->lf)) % alignment)) {
+ if(alignment && H5F_addr_gt(aggr->addr, 0) && (aggr_mis_align = (aggr->addr + H5F_BASE_ADDR(f)) % alignment)) {
aggr_frag_addr = aggr->addr;
aggr_frag_size = alignment - aggr_mis_align;
} /* end if */
@@ -211,7 +208,7 @@ HDfprintf(stderr, "%s: aggr = {%a, %Hu, %Hu}\n", FUNC, aggr->addr, aggr->tot_siz
/* Check if the space requested is larger than the space left in the block */
if((size + aggr_frag_size) > aggr->size) {
- htri_t was_extended = FALSE; /* Whether the file was extended */
+ htri_t extended = FALSE; /* Whether the file was extended */
/* Check if the block asked for is too large for 'normal' aggregator block */
if(size >= aggr->alloc_size) {
@@ -221,31 +218,27 @@ HDfprintf(stderr, "%s: aggr = {%a, %Hu, %Hu}\n", FUNC, aggr->addr, aggr->tot_siz
if(H5F_addr_gt((aggr->addr + aggr->size + ext_size), f->shared->tmp_addr))
HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, HADDR_UNDEF, "'normal' file space allocation request will overlap into 'temporary' file space")
- if ((aggr->addr > 0) && (was_extended = H5FD_try_extend(f->shared->lf, alloc_type, f, aggr->addr + aggr->size, ext_size)) < 0)
+ if((aggr->addr > 0) && (extended = H5F_try_extend(f, dxpl_id, alloc_type, (aggr->addr + aggr->size), ext_size)) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't extending space")
- else if (was_extended) {
+ else if (extended) {
/* aggr->size is unchanged */
ret_value = aggr->addr + aggr_frag_size;
aggr->addr += ext_size;
aggr->tot_size += ext_size;
} else {
- /* Check for overlapping into file's temporary allocation space */
- if(H5F_addr_gt((eoa + size), f->shared->tmp_addr))
- HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, HADDR_UNDEF, "'normal' file space allocation request will overlap into 'temporary' file space")
-
/* Release "other" aggregator, if it exists, is at the end of the allocated space,
* has allocated more than one block and the unallocated space is greater than its
* allocation block size.
*/
- if ((other_aggr->size > 0) && (H5F_addr_eq((other_aggr->addr + other_aggr->size), eoa)) &&
- (other_aggr->tot_size > other_aggr->size) && ((other_aggr->tot_size - other_aggr->size) >= other_aggr->alloc_size)) {
- if(H5MF_aggr_free(f, dxpl_id, other_alloc_type, other_aggr) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, HADDR_UNDEF, "can't free aggregation block")
+ if((other_aggr->size > 0) && (H5F_addr_eq((other_aggr->addr + other_aggr->size), eoa)) &&
+ (other_aggr->tot_size > other_aggr->size) && ((other_aggr->tot_size - other_aggr->size) >= other_aggr->alloc_size)) {
+ if(H5MF__aggr_free(f, dxpl_id, other_alloc_type, other_aggr) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, HADDR_UNDEF, "can't free aggregation block")
} /* end if */
/* Allocate space from the VFD (i.e. at the end of the file) */
- if(HADDR_UNDEF == (ret_value = H5FD_alloc(f->shared->lf, dxpl_id, alloc_type, f, size, &eoa_frag_addr, &eoa_frag_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate aggregation block")
+ if(HADDR_UNDEF == (ret_value = H5F_alloc(f, dxpl_id, alloc_type, size, &eoa_frag_addr, &eoa_frag_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate file space")
} /* end else */
} /* end if */
else {
@@ -263,32 +256,29 @@ HDfprintf(stderr, "%s: Allocating block\n", FUNC);
if(H5F_addr_gt((aggr->addr + aggr->size + ext_size), f->shared->tmp_addr))
HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, HADDR_UNDEF, "'normal' file space allocation request will overlap into 'temporary' file space")
- if((aggr->addr > 0) && (was_extended = H5FD_try_extend(f->shared->lf, alloc_type, f, aggr->addr + aggr->size, ext_size)) < 0)
+ if((aggr->addr > 0) && (extended = H5F_try_extend(f, dxpl_id, alloc_type, (aggr->addr + aggr->size), ext_size)) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't extending space")
- else if (was_extended) {
+ else if(extended) {
aggr->addr += aggr_frag_size;
aggr->size += (ext_size - aggr_frag_size);
aggr->tot_size += ext_size;
- } else {
+ } /* end else-if */
+ else {
haddr_t new_space; /* Address of new space allocated */
- /* Check for overlapping into file's temporary allocation space */
- if(H5F_addr_gt((eoa + aggr->alloc_size), f->shared->tmp_addr))
- HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, HADDR_UNDEF, "'normal' file space allocation request will overlap into 'temporary' file space")
-
/* Release "other" aggregator, if it exists, is at the end of the allocated space,
* has allocated more than one block and the unallocated space is greater than its
* allocation block size.
*/
if((other_aggr->size > 0) && (H5F_addr_eq((other_aggr->addr + other_aggr->size), eoa)) &&
- (other_aggr->tot_size > other_aggr->size) && ((other_aggr->tot_size - other_aggr->size) >= other_aggr->alloc_size)) {
- if(H5MF_aggr_free(f, dxpl_id, other_alloc_type, other_aggr) < 0)
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, HADDR_UNDEF, "can't free aggregation block")
+ (other_aggr->tot_size > other_aggr->size) && ((other_aggr->tot_size - other_aggr->size) >= other_aggr->alloc_size)) {
+ if(H5MF__aggr_free(f, dxpl_id, other_alloc_type, other_aggr) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, HADDR_UNDEF, "can't free aggregation block")
} /* end if */
/* Allocate space from the VFD (i.e. at the end of the file) */
- if(HADDR_UNDEF == (new_space = H5FD_alloc(f->shared->lf, dxpl_id, alloc_type, f, aggr->alloc_size, &eoa_frag_addr, &eoa_frag_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate aggregation block")
+ if(HADDR_UNDEF == (new_space = H5F_alloc(f, dxpl_id, alloc_type, aggr->alloc_size, &eoa_frag_addr, &eoa_frag_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate file space")
/* Return the unused portion of the block to a free list */
if(aggr->size > 0)
@@ -314,7 +304,7 @@ HDfprintf(stderr, "%s: Allocating block\n", FUNC);
aggr->addr = new_space;
aggr->size = aggr->alloc_size;
aggr->tot_size = aggr->alloc_size;
- }
+ } /* end else */
} /* end else */
/* Allocate space out of the metadata block */
@@ -329,7 +319,7 @@ HDfprintf(stderr, "%s: Allocating block\n", FUNC);
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, HADDR_UNDEF, "can't free eoa fragment")
/* Freeing any possible fragment due to alignment in the block after extension */
- if(was_extended && aggr_frag_size)
+ if(extended && aggr_frag_size)
if(H5MF_xfree(f, alloc_type, dxpl_id, aggr_frag_addr, aggr_frag_size) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, HADDR_UNDEF, "can't free aggregation fragment")
} /* end if */
@@ -346,12 +336,8 @@ HDfprintf(stderr, "%s: Allocating block\n", FUNC);
} /* end else */
} /* end if */
else {
- /* Check for overlapping into file's temporary allocation space */
- if(H5F_addr_gt((eoa + size), f->shared->tmp_addr))
- HGOTO_ERROR(H5E_RESOURCE, H5E_BADRANGE, HADDR_UNDEF, "'normal' file space allocation request will overlap into 'temporary' file space")
-
/* Allocate data from the file */
- if(HADDR_UNDEF == (ret_value = H5FD_alloc(f->shared->lf, dxpl_id, type, f, size, &eoa_frag_addr, &eoa_frag_size)))
+ if(HADDR_UNDEF == (ret_value = H5F_alloc(f, dxpl_id, type, size, &eoa_frag_addr, &eoa_frag_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, HADDR_UNDEF, "can't allocate file space")
/* Check if fragment was generated */
@@ -365,15 +351,15 @@ HDfprintf(stderr, "%s: Allocating block\n", FUNC);
HDassert(H5F_addr_le((ret_value + size), f->shared->tmp_addr));
/* Post-condition sanity check */
- if(f->shared->alignment && size >= f->shared->threshold)
- HDassert(!((ret_value + H5FD_get_base_addr(f->shared->lf)) % f->shared->alignment));
+ if(H5F_ALIGNMENT(f) && size >= H5F_THRESHOLD(f))
+ HDassert(!((ret_value + H5FD_get_base_addr(f->shared->lf)) % H5F_ALIGNMENT(f)));
done:
#ifdef H5MF_AGGR_DEBUG
HDfprintf(stderr, "%s: ret_value = %a\n", FUNC, ret_value);
#endif /* H5MF_AGGR_DEBUG */
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5MF_aggr_alloc() */
+} /* end H5MF__aggr_alloc() */
/*-------------------------------------------------------------------------
@@ -402,8 +388,8 @@ HDfprintf(stderr, "%s: ret_value = %a\n", FUNC, ret_value);
*-------------------------------------------------------------------------
*/
htri_t
-H5MF_aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr, H5FD_mem_t type,
- haddr_t blk_end, hsize_t extra_requested)
+H5MF_aggr_try_extend(H5F_t *f, hid_t dxpl_id, H5F_blk_aggr_t *aggr,
+ H5FD_mem_t type, haddr_t blk_end, hsize_t extra_requested)
{
htri_t ret_value = FALSE; /* Return value */
@@ -436,8 +422,8 @@ H5MF_aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr, H5FD_mem_t type,
/* Indicate success */
HGOTO_DONE(TRUE);
- }
- /*
+ } /* end if */
+ /*
* If extra_requested is above percentage threshold:
* 1) "bubble" up the aggregator by aggr->alloc_size or extra_requested
* 2) extend the block into the aggregator
@@ -445,7 +431,7 @@ H5MF_aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr, H5FD_mem_t type,
else {
hsize_t extra = (extra_requested < aggr->alloc_size) ? aggr->alloc_size : extra_requested;
- if((ret_value = H5FD_try_extend(f->shared->lf, type, f, (aggr->addr + aggr->size), extra)) < 0)
+ if((ret_value = H5F_try_extend(f, dxpl_id, type, (aggr->addr + aggr->size), extra)) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTEXTEND, FAIL, "error extending file")
else if(ret_value == TRUE) {
/* Shift the aggregator block by the extra requested */
@@ -461,10 +447,11 @@ H5MF_aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr, H5FD_mem_t type,
*/
aggr->size += extra;
aggr->size -= extra_requested;
- } /* end if */
- } /* end if */
+ } /* end else-if */
+ } /* end else */
} /* end if */
- else { /* The aggreator is not at end of file */
+ else {
+ /* The aggreator is not at end of file */
/* Check if aggregator has enough internal space to satisfy the extension. */
if(aggr->size >= extra_requested) {
/* Extend block into aggregator */
@@ -825,7 +812,7 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5MF_aggr_free
+ * Function: H5MF__aggr_free
*
* Purpose: Free the aggregator's space in the file.
*
@@ -839,11 +826,11 @@ done:
*-------------------------------------------------------------------------
*/
static herr_t
-H5MF_aggr_free(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, H5F_blk_aggr_t *aggr)
+H5MF__aggr_free(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, H5F_blk_aggr_t *aggr)
{
herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI(FAIL)
+ FUNC_ENTER_STATIC
/* Sanity check */
HDassert(f);
@@ -856,7 +843,7 @@ H5MF_aggr_free(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, H5F_blk_aggr_t *aggr)
HDassert(f->shared->feature_flags & aggr->feature_flag);
/* Free the remaining space at EOA in the aggregator */
- if(H5FD_free(f->shared->lf, dxpl_id, type, f, aggr->addr, aggr->size) < 0)
+ if(H5F_free(f, dxpl_id, type, aggr->addr, aggr->size) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free aggregation block")
/* Reset the aggregator */
@@ -866,7 +853,7 @@ H5MF_aggr_free(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type, H5F_blk_aggr_t *aggr)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5MF_aggr_free() */
+} /* H5MF__aggr_free() */
/*-------------------------------------------------------------------------
@@ -898,13 +885,13 @@ H5MF_aggrs_try_shrink_eoa(H5F_t *f, hid_t dxpl_id)
if((ma_status = H5MF_aggr_can_shrink_eoa(f, H5FD_MEM_DEFAULT, &(f->shared->meta_aggr))) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query metadata aggregator stats")
if(ma_status > 0)
- if(H5MF_aggr_free(f, dxpl_id, H5FD_MEM_DEFAULT, &(f->shared->meta_aggr)) < 0)
+ if(H5MF__aggr_free(f, dxpl_id, H5FD_MEM_DEFAULT, &(f->shared->meta_aggr)) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
if((sda_status = H5MF_aggr_can_shrink_eoa(f, H5FD_MEM_DRAW, &(f->shared->sdata_aggr))) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "can't query small data aggregator stats")
if(sda_status > 0)
- if(H5MF_aggr_free(f, dxpl_id, H5FD_MEM_DRAW, &(f->shared->sdata_aggr)) < 0)
+ if(H5MF__aggr_free(f, dxpl_id, H5FD_MEM_DRAW, &(f->shared->sdata_aggr)) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTSHRINK, FAIL, "can't check for shrinking eoa")
ret_value = (ma_status || sda_status);
diff --git a/src/H5MFdbg.c b/src/H5MFdbg.c
index 6d5d994..49cfbfc 100644
--- a/src/H5MFdbg.c
+++ b/src/H5MFdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -116,7 +114,9 @@ H5MF_sects_debug_cb(H5FS_section_info_t *_sect, void *_udata)
/* Print generic section information */
HDfprintf(udata->stream, "%*s%-*s %s\n", udata->indent, "", udata->fwidth,
"Section type:",
- (sect->sect_info.type == H5MF_FSPACE_SECT_SIMPLE ? "simple" : "unknown"));
+ (sect->sect_info.type == H5MF_FSPACE_SECT_SIMPLE ? "simple" :
+ (sect->sect_info.type == H5MF_FSPACE_SECT_SMALL ? "small" :
+ (sect->sect_info.type == H5MF_FSPACE_SECT_LARGE ? "large" : "unknown"))));
HDfprintf(udata->stream, "%*s%-*s %a\n", udata->indent, "", udata->fwidth,
"Section address:",
sect->sect_info.addr);
@@ -171,7 +171,7 @@ H5MF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t fs_addr, FILE *stream, int ind
for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
if(H5F_addr_eq(f->shared->fs_addr[type], fs_addr)) {
if(!f->shared->fs_man[type])
- if(H5MF_alloc_open(f, dxpl_id, type) < 0)
+ if(H5MF_open_fstype(f, dxpl_id, type) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTINIT, FAIL, "can't initialize file free space")
if(f->shared->fs_man[type]) {
@@ -192,7 +192,7 @@ H5MF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t fs_addr, FILE *stream, int ind
HGOTO_ERROR(H5E_HEAP, H5E_CANTRELEASE, FAIL, "can't release free space info")
} /* end if */
break;
- }
+ } /* end if */
done:
FUNC_LEAVE_NOAPI_TAG(ret_value, FAIL)
@@ -217,11 +217,6 @@ herr_t
H5MF_sects_dump(H5F_t *f, hid_t dxpl_id, FILE *stream)
{
haddr_t eoa; /* End of allocated space in the file */
- haddr_t ma_addr = HADDR_UNDEF; /* Base "metadata aggregator" address */
- hsize_t ma_size = 0; /* Size of "metadata aggregator" */
- haddr_t sda_addr = HADDR_UNDEF; /* Base "small data aggregator" address */
- hsize_t sda_size = 0; /* Size of "small data aggregator" */
- H5FD_mem_t type; /* Memory type for iteration */
int indent = 0; /* Amount to indent */
int fwidth = 50; /* Field width */
herr_t ret_value = SUCCEED; /* Return value */
@@ -244,59 +239,90 @@ HDfprintf(stderr, "%s: Dumping file free space sections\n", FUNC);
HDfprintf(stderr, "%s: for type = H5FD_MEM_DEFAULT, eoa = %a\n", FUNC, eoa);
#endif /* H5MF_ALLOC_DEBUG */
- /* Retrieve metadata aggregator info, if available */
- H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
-#ifdef H5MF_ALLOC_DEBUG
-HDfprintf(stderr, "%s: ma_addr = %a, ma_size = %Hu, end of ma = %a\n", FUNC, ma_addr, ma_size, (haddr_t)((ma_addr + ma_size) - 1));
-#endif /* H5MF_ALLOC_DEBUG */
-
- /* Retrieve 'small data' aggregator info, if available */
- H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sda_addr, &sda_size);
-#ifdef H5MF_ALLOC_DEBUG
-HDfprintf(stderr, "%s: sda_addr = %a, sda_size = %Hu, end of sda = %a\n", FUNC, sda_addr, sda_size, (haddr_t)((sda_addr + sda_size) - 1));
-#endif /* H5MF_ALLOC_DEBUG */
+ if(H5F_PAGED_AGGR(f)) { /* File space paging */
+ H5F_mem_page_t ptype; /* Memory type for iteration -- page fs */
- /* Iterate over all the free space types that have managers and dump each free list's space */
- for(type = H5FD_MEM_DEFAULT; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
- /* Print header for type */
- HDfprintf(stream, "%*sFile Free Space Info for type = %u:\n", indent, "", (unsigned)type);
-
- /* Check for this type being mapped to another type */
- if(H5FD_MEM_DEFAULT == f->shared->fs_type_map[type] ||
- type == f->shared->fs_type_map[type]) {
- /* Retrieve the 'eoa' for this file memory type */
- if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, type)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
- HDfprintf(stream, "%*s%-*s %a\n", indent + 3, "", MAX(0, fwidth - 3),
- "eoa:",
- eoa);
+ for(ptype = H5F_MEM_PAGE_META; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype)) {
+ /* Print header for type */
+ HDfprintf(stream, "%*sFile Free Space Info for type = %u:\n", indent, "", (unsigned)ptype);
/* Print header for sections */
HDfprintf(stream, "%*sSections:\n", indent + 3, "");
/* If there is a free space manager for this type, iterate over them */
- if(f->shared->fs_man[type]) {
+ if(f->shared->fs_man[ptype]) {
H5MF_debug_iter_ud_t udata; /* User data for callbacks */
/* Prepare user data for section iteration callback */
- udata.fspace = f->shared->fs_man[type];
+ udata.fspace = f->shared->fs_man[ptype];
udata.stream = stream;
udata.indent = indent + 6;
udata.fwidth = MAX(0, fwidth - 6);
/* Iterate over all the free space sections */
- if(H5FS_sect_iterate(f, dxpl_id, f->shared->fs_man[type], H5MF_sects_debug_cb, &udata) < 0)
+ if(H5FS_sect_iterate(f, dxpl_id, f->shared->fs_man[ptype], H5MF_sects_debug_cb, &udata) < 0)
HGOTO_ERROR(H5E_HEAP, H5E_BADITER, FAIL, "can't iterate over heap's free space")
} /* end if */
- else {
+ else
/* No sections of this type */
HDfprintf(stream, "%*s<none>\n", indent + 6, "");
- } /* end else */
- } /* end if */
- else {
- HDfprintf(stream, "%*sMapped to type = %u\n", indent, "", (unsigned)f->shared->fs_type_map[type]);
- } /* end else */
- } /* end for */
+ } /* end for */
+ } /* end if */
+ else { /* not file space paging */
+ H5FD_mem_t atype; /* Memory type for iteration -- aggr fs */
+ haddr_t ma_addr = HADDR_UNDEF; /* Base "metadata aggregator" address */
+ hsize_t ma_size = 0; /* Size of "metadata aggregator" */
+ haddr_t sda_addr = HADDR_UNDEF; /* Base "small data aggregator" address */
+ hsize_t sda_size = 0; /* Size of "small data aggregator" */
+
+ /* Retrieve metadata aggregator info, if available */
+ H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: ma_addr = %a, ma_size = %Hu, end of ma = %a\n", FUNC, ma_addr, ma_size, (haddr_t)((ma_addr + ma_size) - 1));
+#endif /* H5MF_ALLOC_DEBUG */
+
+ /* Retrieve 'small data' aggregator info, if available */
+ H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sda_addr, &sda_size);
+#ifdef H5MF_ALLOC_DEBUG
+HDfprintf(stderr, "%s: sda_addr = %a, sda_size = %Hu, end of sda = %a\n", FUNC, sda_addr, sda_size, (haddr_t)((sda_addr + sda_size) - 1));
+#endif /* H5MF_ALLOC_DEBUG */
+
+ /* Iterate over all the free space types that have managers and dump each free list's space */
+ for(atype = H5FD_MEM_DEFAULT; atype < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, atype)) {
+ /* Print header for type */
+ HDfprintf(stream, "%*sFile Free Space Info for type = %u:\n", indent, "", (unsigned)atype);
+
+ /* Check for this type being mapped to another type */
+ if(H5FD_MEM_DEFAULT == f->shared->fs_type_map[atype] || atype == f->shared->fs_type_map[atype]) {
+ /* Retrieve the 'eoa' for this file memory type */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(f, atype)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+ HDfprintf(stream, "%*s%-*s %a\n", indent + 3, "", MAX(0, fwidth - 3), "eoa:", eoa);
+
+ /* Print header for sections */
+ HDfprintf(stream, "%*sSections:\n", indent + 3, "");
+
+ /* If there is a free space manager for this type, iterate over them */
+ if(f->shared->fs.aggr.fs_man[atype]) {
+ H5MF_debug_iter_ud_t udata; /* User data for callbacks */
+
+ /* Prepare user data for section iteration callback */
+ udata.fspace = f->shared->fs_man[atype];
+ udata.stream = stream;
+ udata.indent = indent + 6;
+ udata.fwidth = MAX(0, fwidth - 6);
+
+ /* Iterate over all the free space sections */
+ if(H5FS_sect_iterate(f, dxpl_id, f->shared->fs_man[atype], H5MF_sects_debug_cb, &udata) < 0)
+ HGOTO_ERROR(H5E_HEAP, H5E_BADITER, FAIL, "can't iterate over heap's free space")
+ } /* end if */
+ else /* No sections of this type */
+ HDfprintf(stream, "%*s<none>\n", indent + 6, "");
+ } /* end if */
+ else
+ HDfprintf(stream, "%*sMapped to type = %u\n", indent, "", (unsigned)f->shared->fs_type_map[atype]);
+ } /* end for */
+ } /* end else */
done:
HDfprintf(stderr, "%s: Done dumping file free space sections\n", FUNC);
diff --git a/src/H5MFmodule.h b/src/H5MFmodule.h
index 6e5f8ad..53daabf 100644
--- a/src/H5MFmodule.h
+++ b/src/H5MFmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5MFpkg.h b/src/H5MFpkg.h
index 1a62710..b95a6db 100644
--- a/src/H5MFpkg.h
+++ b/src/H5MFpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -51,9 +49,38 @@
/* Define this to dump free space tracker contents after they've been modified */
/* #define H5MF_ALLOC_DEBUG_DUMP */
-/* Free space section types for file */
+/* Free-space section types for file */
/* (values stored in free space data structures in file) */
-#define H5MF_FSPACE_SECT_SIMPLE 0 /* Section is a range of actual bytes in file */
+#define H5MF_FSPACE_SECT_SIMPLE 0 /* For non-paged aggregation: section is a range of actual bytes in file */
+#define H5MF_FSPACE_SECT_SMALL 1 /* For paged aggregation: "small" meta/raw data section which is < fsp_size) */
+#define H5MF_FSPACE_SECT_LARGE 2 /* For paged aggregation: "large" Section which is >= fsp_size) */
+
+/* For non-paged aggregation: map allocation request type to tracked free-space type */
+/* F -- pointer to H5F_t; T -- H5FD_mem_t */
+#define H5MF_ALLOC_TO_FS_AGGR_TYPE(F, T) \
+ ((H5FD_MEM_DEFAULT == (F)->shared->fs_type_map[T]) ? (T) : (F)->shared->fs_type_map[T])
+
+/* Get section class type based on size */
+#define H5MF_SECT_CLASS_TYPE(F, S) \
+ ((H5F_PAGED_AGGR(F)) ? \
+ ((S >= (F)->shared->fs_page_size) ? H5MF_FSPACE_SECT_LARGE : H5MF_FSPACE_SECT_SMALL) : H5MF_FSPACE_SECT_SIMPLE)
+
+/* Get section class cls */
+#define H5MF_SECT_CLS_TYPE(F, S) \
+ ((H5F_PAGED_AGGR(F)) ? \
+ ((S >= (F)->shared->fs_page_size) ? \
+ H5MF_FSPACE_SECT_CLS_LARGE : H5MF_FSPACE_SECT_CLS_SMALL) : H5MF_FSPACE_SECT_CLS_SIMPLE)
+
+/* Calculate the mis-aligned fragment */
+#define H5MF_EOA_MISALIGN(F, E, A, FR) \
+{ \
+ hsize_t m; \
+ \
+ if(H5F_addr_gt((E), 0) && ((m) = ((E) + H5F_BASE_ADDR(F)) % (A))) \
+ (FR) = (A) - m; \
+ else \
+ (FR) = 0; \
+}
/****************************/
@@ -129,6 +156,15 @@ typedef struct H5MF_sect_ud_t {
H5F_blk_aggr_t *aggr; /* Aggregator block to operate on */
} H5MF_sect_ud_t;
+/* Information about the current free-space manager to use */
+typedef struct H5MF_fs_t {
+ H5F_fs_state_t *fs_state;
+ haddr_t *fs_addr;
+ H5FS_t **fs_man;
+ hsize_t align_thres; /* Threshold for alignment */
+ hsize_t alignment; /* Alignment */
+} H5MF_fs_t;
+
/*****************************/
/* Package Private Variables */
@@ -136,6 +172,8 @@ typedef struct H5MF_sect_ud_t {
/* H5MF single section inherits serializable properties from H5FS_section_class_t */
H5_DLLVAR H5FS_section_class_t H5MF_FSPACE_SECT_CLS_SIMPLE[1];
+H5_DLLVAR H5FS_section_class_t H5MF_FSPACE_SECT_CLS_SMALL[1];
+H5_DLLVAR H5FS_section_class_t H5MF_FSPACE_SECT_CLS_LARGE[1];
/******************************/
@@ -143,23 +181,24 @@ H5_DLLVAR H5FS_section_class_t H5MF_FSPACE_SECT_CLS_SIMPLE[1];
/******************************/
/* Allocator routines */
-H5_DLL herr_t H5MF_alloc_open(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type);
-H5_DLL herr_t H5MF_alloc_start(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type);
+H5_DLL herr_t H5MF_open_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type);
+H5_DLL herr_t H5MF_start_fstype(H5F_t *f, hid_t dxpl_id, H5F_mem_page_t type);
+
+H5_DLL htri_t H5MF_find_sect(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, hsize_t size, H5FS_t *fspace, haddr_t *addr);
+H5_DLL herr_t H5MF_add_sect(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id, H5FS_t *fspace, H5MF_free_section_t *node);
+
H5_DLL herr_t H5MF_sects_dump(H5F_t *f, hid_t dxpl_id, FILE *stream);
-/* 'simple' section routines */
-H5_DLL H5MF_free_section_t *H5MF_sect_simple_new(haddr_t sect_off,
+H5_DLL void H5MF_alloc_to_fs_type(H5F_t *f, H5FD_mem_t alloc_type, hsize_t size, H5F_mem_page_t *fs_type);
+
+/* 'simple/small/large' section routines */
+H5_DLL H5MF_free_section_t *H5MF_sect_new(unsigned ctype, haddr_t sect_off,
hsize_t sect_size);
-H5_DLL htri_t H5MF_sect_simple_can_shrink(const H5FS_section_info_t *_sect,
- void *udata);
-H5_DLL herr_t H5MF_sect_simple_shrink(H5FS_section_info_t **_sect,
- void *udata);
-H5_DLL herr_t H5MF_sect_simple_free(H5FS_section_info_t *sect);
+H5_DLL herr_t H5MF_sect_free(H5FS_section_info_t *sect);
+
/* Block aggregator routines */
-H5_DLL haddr_t H5MF_aggr_alloc(H5F_t *f, hid_t dxpl_id, H5F_blk_aggr_t *aggr,
- H5F_blk_aggr_t *other_aggr, H5FD_mem_t type, hsize_t size);
-H5_DLL htri_t H5MF_aggr_try_extend(H5F_t *f, H5F_blk_aggr_t *aggr,
+H5_DLL htri_t H5MF_aggr_try_extend(H5F_t *f, hid_t dxpl_id, H5F_blk_aggr_t *aggr,
H5FD_mem_t type, haddr_t abs_blk_end, hsize_t extra_requested);
H5_DLL htri_t H5MF_aggr_can_absorb(const H5F_t *f, const H5F_blk_aggr_t *aggr,
const H5MF_free_section_t *sect, H5MF_shrink_type_t *shrink);
diff --git a/src/H5MFprivate.h b/src/H5MFprivate.h
index 22ed308..bfeaa33 100644
--- a/src/H5MFprivate.h
+++ b/src/H5MFprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -51,16 +49,15 @@
/* File space manager routines */
H5_DLL herr_t H5MF_init_merge_flags(H5F_t *f);
-H5_DLL herr_t H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space,
- hsize_t *meta_size);
+H5_DLL herr_t H5MF_get_freespace(H5F_t *f, hid_t dxpl_id, hsize_t *tot_space, hsize_t *meta_size);
H5_DLL herr_t H5MF_close(H5F_t *f, hid_t dxpl_id);
H5_DLL herr_t H5MF_try_close(H5F_t *f, hid_t dxpl_id);
/* File space allocation routines */
H5_DLL haddr_t H5MF_alloc(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
H5_DLL haddr_t H5MF_aggr_vfd_alloc(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, hsize_t size);
-H5_DLL herr_t H5MF_xfree(const H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
- hsize_t size);
+H5_DLL herr_t H5MF_xfree(H5F_t *f, H5FD_mem_t type, hid_t dxpl_id, haddr_t addr,
+ hsize_t size);
H5_DLL herr_t H5MF_try_extend(H5F_t *f, hid_t dxpl_id, H5FD_mem_t type,
haddr_t addr, hsize_t size, hsize_t extra_requested);
H5_DLL htri_t H5MF_try_shrink(H5F_t *f, H5FD_mem_t alloc_type, hid_t dxpl_id,
@@ -75,6 +72,18 @@ H5_DLL haddr_t H5MF_alloc_tmp(H5F_t *f, hsize_t size);
H5_DLL herr_t H5MF_free_aggrs(H5F_t *f, hid_t dxpl_id);
H5_DLL htri_t H5MF_aggrs_try_shrink_eoa(H5F_t *f, hid_t dxpl_id);
+/* Free space manager settling routines */
+H5_DLL herr_t H5MF_settle_raw_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled);
+H5_DLL herr_t H5MF_settle_meta_data_fsm(H5F_t *f, hid_t dxpl_id, hbool_t *fsm_settled);
+
+/* This function has to be declared in H5MFprivate.h as it is needed
+ * in our test code to allow us to manually start a self referential
+ * free space manager prior to the first file space allocations /
+ * deallocation without causing assertion failures on the first
+ * file space allocation / deallocation.
+ */
+H5_DLL herr_t H5MF_tidy_self_referential_fsm_hack(H5F_t *f, hid_t dxpl_id);
+
/* Debugging routines */
#ifdef H5MF_DEBUGGING
H5_DLL herr_t H5MF_sects_debug(H5F_t *f, hid_t dxpl_id, haddr_t addr,
diff --git a/src/H5MFsection.c b/src/H5MFsection.c
index e5a0cf0..0dd26b3 100644
--- a/src/H5MFsection.c
+++ b/src/H5MFsection.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -57,18 +55,47 @@
/* Local Prototypes */
/********************/
-/* 'simple' section callbacks */
-static H5FS_section_info_t *H5MF_sect_simple_deserialize(const H5FS_section_class_t *cls,
+/* 'simple/small/large' section callbacks */
+static H5FS_section_info_t *H5MF_sect_deserialize(const H5FS_section_class_t *cls,
hid_t dxpl_id, const uint8_t *buf, haddr_t sect_addr, hsize_t sect_size,
unsigned *des_flags);
+static herr_t H5MF_sect_valid(const H5FS_section_class_t *cls,
+ const H5FS_section_info_t *sect, hid_t dxpl_id);
+static H5FS_section_info_t *H5MF_sect_split(H5FS_section_info_t *sect,
+ hsize_t frag_size);
+
+
+/* 'simple' section callbacks */
static htri_t H5MF_sect_simple_can_merge(const H5FS_section_info_t *sect1,
const H5FS_section_info_t *sect2, void *udata);
-static herr_t H5MF_sect_simple_merge(H5FS_section_info_t *sect1,
+static herr_t H5MF_sect_simple_merge(H5FS_section_info_t **sect1,
H5FS_section_info_t *sect2, void *udata);
-static herr_t H5MF_sect_simple_valid(const H5FS_section_class_t *cls,
- const H5FS_section_info_t *sect, hid_t dxpl_id);
-static H5FS_section_info_t *H5MF_sect_simple_split(H5FS_section_info_t *sect,
- hsize_t frag_size);
+static htri_t H5MF_sect_simple_can_shrink(const H5FS_section_info_t *_sect,
+ void *udata);
+static herr_t H5MF_sect_simple_shrink(H5FS_section_info_t **_sect,
+ void *udata);
+
+
+/* 'small' section callbacks */
+static herr_t H5MF_sect_small_add(H5FS_section_info_t **_sect, unsigned *flags, void *_udata);
+static htri_t H5MF_sect_small_can_merge(const H5FS_section_info_t *sect1,
+ const H5FS_section_info_t *sect2, void *udata);
+static herr_t H5MF_sect_small_merge(H5FS_section_info_t **sect1,
+ H5FS_section_info_t *sect2, void *udata);
+static htri_t H5MF_sect_small_can_shrink(const H5FS_section_info_t *_sect,
+ void *udata);
+static herr_t H5MF_sect_small_shrink(H5FS_section_info_t **_sect,
+ void *udata);
+
+/* 'large' section callbacks */
+static htri_t H5MF_sect_large_can_merge(const H5FS_section_info_t *sect1,
+ const H5FS_section_info_t *sect2, void *udata);
+static herr_t H5MF_sect_large_merge(H5FS_section_info_t **sect1,
+ H5FS_section_info_t *sect2, void *udata);
+static htri_t H5MF_sect_large_can_shrink(const H5FS_section_info_t *_sect,
+ void *udata);
+static herr_t H5MF_sect_large_shrink(H5FS_section_info_t **_sect,
+ void *udata);
/*********************/
/* Package Variables */
@@ -89,17 +116,68 @@ H5FS_section_class_t H5MF_FSPACE_SECT_CLS_SIMPLE[1] = {{
/* Object methods */
NULL, /* Add section */
NULL, /* Serialize section */
- H5MF_sect_simple_deserialize, /* Deserialize section */
+ H5MF_sect_deserialize, /* Deserialize section */
H5MF_sect_simple_can_merge, /* Can sections merge? */
H5MF_sect_simple_merge, /* Merge sections */
H5MF_sect_simple_can_shrink, /* Can section shrink container?*/
H5MF_sect_simple_shrink, /* Shrink container w/section */
- H5MF_sect_simple_free, /* Free section */
- H5MF_sect_simple_valid, /* Check validity of section */
- H5MF_sect_simple_split, /* Split section node for alignment */
+ H5MF_sect_free, /* Free section */
+ H5MF_sect_valid, /* Check validity of section */
+ H5MF_sect_split, /* Split section node for alignment */
NULL, /* Dump debugging for section */
}};
+/* Class info for "small" free space sections */
+H5FS_section_class_t H5MF_FSPACE_SECT_CLS_SMALL[1] = {{
+ /* Class variables */
+ H5MF_FSPACE_SECT_SMALL, /* Section type */
+ 0, /* Extra serialized size */
+ H5FS_CLS_MERGE_SYM | H5FS_CLS_ADJUST_OK, /* Class flags */
+ NULL, /* Class private info */
+
+ /* Class methods */
+ NULL, /* Initialize section class */
+ NULL, /* Terminate section class */
+
+ /* Object methods */
+ H5MF_sect_small_add, /* Add section */
+ NULL, /* Serialize section */
+ H5MF_sect_deserialize, /* Deserialize section */
+ H5MF_sect_small_can_merge, /* Can sections merge? */
+ H5MF_sect_small_merge, /* Merge sections */
+ H5MF_sect_small_can_shrink, /* Can section shrink container?*/
+ H5MF_sect_small_shrink, /* Shrink container w/section */
+ H5MF_sect_free, /* Free section */
+ H5MF_sect_valid, /* Check validity of section */
+ H5MF_sect_split, /* Split section node for alignment */
+ NULL, /* Dump debugging for section */
+}};
+
+/* Class info for "large" free space sections */
+H5FS_section_class_t H5MF_FSPACE_SECT_CLS_LARGE[1] = {{
+ /* Class variables */
+ H5MF_FSPACE_SECT_LARGE, /* Section type */
+ 0, /* Extra serialized size */
+ H5FS_CLS_MERGE_SYM | H5FS_CLS_ADJUST_OK, /* Class flags */
+ NULL, /* Class private info */
+
+ /* Class methods */
+ NULL, /* Initialize section class */
+ NULL, /* Terminate section class */
+
+ /* Object methods */
+ NULL, /* Add section */
+ NULL, /* Serialize section */
+ H5MF_sect_deserialize, /* Deserialize section */
+ H5MF_sect_large_can_merge, /* Can sections merge? */
+ H5MF_sect_large_merge, /* Merge sections */
+ H5MF_sect_large_can_shrink, /* Can section shrink container?*/
+ H5MF_sect_large_shrink, /* Shrink container w/section */
+ H5MF_sect_free, /* Free section */
+ H5MF_sect_valid, /* Check validity of section */
+ H5MF_sect_split, /* Split section node for alignment */
+ NULL, /* Dump debugging for section */
+}};
/*****************************/
/* Library Private Variables */
@@ -113,12 +191,15 @@ H5FS_section_class_t H5MF_FSPACE_SECT_CLS_SIMPLE[1] = {{
/* Declare a free list to manage the H5MF_free_section_t struct */
H5FL_DEFINE(H5MF_free_section_t);
+/*
+ * "simple/small/large" section callbacks
+ */
/*-------------------------------------------------------------------------
- * Function: H5MF_sect_simple_new
+ * Function: H5MF_sect_new
*
- * Purpose: Create a new 'simple' section and return it to the caller
+ * Purpose: Create a new section of "ctype" and return it to the caller
*
* Return: Pointer to new section on success/NULL on failure
*
@@ -129,9 +210,9 @@ H5FL_DEFINE(H5MF_free_section_t);
*-------------------------------------------------------------------------
*/
H5MF_free_section_t *
-H5MF_sect_simple_new(haddr_t sect_off, hsize_t sect_size)
+H5MF_sect_new(unsigned ctype, haddr_t sect_off, hsize_t sect_size)
{
- H5MF_free_section_t *sect = NULL; /* 'Simple' free space section to add */
+ H5MF_free_section_t *sect; /* 'Simple' free space section to add */
H5MF_free_section_t *ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -148,7 +229,7 @@ H5MF_sect_simple_new(haddr_t sect_off, hsize_t sect_size)
sect->sect_info.size = sect_size;
/* Set the section's class & state */
- sect->sect_info.type = H5MF_FSPACE_SECT_SIMPLE;
+ sect->sect_info.type = ctype;
sect->sect_info.state = H5FS_SECT_LIVE;
/* Set return value */
@@ -156,13 +237,43 @@ H5MF_sect_simple_new(haddr_t sect_off, hsize_t sect_size)
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* end H5MF_sect_simple_new() */
+} /* end H5MF_sect_new() */
/*-------------------------------------------------------------------------
- * Function: H5MF_sect_simple_deserialize
+ * Function: H5MF_sect_free
*
- * Purpose: Deserialize a buffer into a "live" single section
+ * Purpose: Free a 'simple/small/large' section node
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, January 8, 2008
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5MF_sect_free(H5FS_section_info_t *_sect)
+{
+ H5MF_free_section_t *sect = (H5MF_free_section_t *)_sect; /* File free section */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Check arguments. */
+ HDassert(sect);
+
+ /* Release the section */
+ sect = H5FL_FREE(H5MF_free_section_t, sect);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5MF_sect_free() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_deserialize
+ *
+ * Purpose: Deserialize a buffer into a "live" section
*
* Return: Success: non-negative
* Failure: negative
@@ -173,7 +284,7 @@ done:
*-------------------------------------------------------------------------
*/
static H5FS_section_info_t *
-H5MF_sect_simple_deserialize(const H5FS_section_class_t H5_ATTR_UNUSED *cls,
+H5MF_sect_deserialize(const H5FS_section_class_t *cls,
hid_t H5_ATTR_UNUSED dxpl_id, const uint8_t H5_ATTR_UNUSED *buf, haddr_t sect_addr,
hsize_t sect_size, unsigned H5_ATTR_UNUSED *des_flags)
{
@@ -183,11 +294,12 @@ H5MF_sect_simple_deserialize(const H5FS_section_class_t H5_ATTR_UNUSED *cls,
FUNC_ENTER_NOAPI_NOINIT
/* Check arguments. */
+ HDassert(cls);
HDassert(H5F_addr_defined(sect_addr));
HDassert(sect_size);
/* Create free space section for block */
- if(NULL == (sect = H5MF_sect_simple_new(sect_addr, sect_size)))
+ if(NULL == (sect = H5MF_sect_new(cls->type, sect_addr, sect_size)))
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "can't initialize free space section")
/* Set return value */
@@ -195,10 +307,80 @@ H5MF_sect_simple_deserialize(const H5FS_section_class_t H5_ATTR_UNUSED *cls,
done:
FUNC_LEAVE_NOAPI(ret_value)
-} /* H5MF_sect_simple_deserialize() */
+} /* H5MF_sect_deserialize() */
/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_valid
+ *
+ * Purpose: Check the validity of a section
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Quincey Koziol
+ * Tuesday, January 8, 2008
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5MF_sect_valid(const H5FS_section_class_t H5_ATTR_UNUSED *cls,
+ const H5FS_section_info_t
+#ifdef NDEBUG
+ H5_ATTR_UNUSED
+#endif /* NDEBUG */
+ *_sect, hid_t H5_ATTR_UNUSED dxpl_id)
+{
+#ifndef NDEBUG
+ const H5MF_free_section_t *sect = (const H5MF_free_section_t *)_sect; /* File free section */
+#endif /* NDEBUG */
+
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ /* Check arguments. */
+ HDassert(sect);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5MF_sect_valid() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_split
+ *
+ * Purpose: Split SECT into 2 sections: fragment for alignment & the aligned section
+ * SECT's addr and size are updated to point to the aligned section
+ *
+ * Return: Success: the fragment for aligning sect
+ * Failure: null
+ *
+ * Programmer: Vailin Choi, July 29, 2008
+ *
+ *-------------------------------------------------------------------------
+ */
+static H5FS_section_info_t *
+H5MF_sect_split(H5FS_section_info_t *sect, hsize_t frag_size)
+{
+ H5MF_free_section_t *ret_value; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Allocate space for new section */
+ if(NULL == (ret_value = H5MF_sect_new(sect->type, sect->addr, frag_size)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "can't initialize free space section")
+
+ /* Set new section's info */
+ sect->addr += frag_size;
+ sect->size -= frag_size;
+
+done:
+ FUNC_LEAVE_NOAPI((H5FS_section_info_t *)ret_value)
+} /* end H5MF_sect_split() */
+
+/*
+ * "simple" section callbacks
+ */
+
+/*-------------------------------------------------------------------------
* Function: H5MF_sect_simple_can_merge
*
* Purpose: Can two sections of this type merge?
@@ -252,10 +434,10 @@ H5MF_sect_simple_can_merge(const H5FS_section_info_t *_sect1,
*-------------------------------------------------------------------------
*/
static herr_t
-H5MF_sect_simple_merge(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
+H5MF_sect_simple_merge(H5FS_section_info_t **_sect1, H5FS_section_info_t *_sect2,
void H5_ATTR_UNUSED *_udata)
{
- H5MF_free_section_t *sect1 = (H5MF_free_section_t *)_sect1; /* File free section */
+ H5MF_free_section_t **sect1 = (H5MF_free_section_t **)_sect1; /* File free section */
H5MF_free_section_t *sect2 = (H5MF_free_section_t *)_sect2; /* File free section */
herr_t ret_value = SUCCEED; /* Return value */
@@ -263,16 +445,16 @@ H5MF_sect_simple_merge(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
/* Check arguments. */
HDassert(sect1);
- HDassert(sect1->sect_info.type == H5MF_FSPACE_SECT_SIMPLE);
+ HDassert((*sect1)->sect_info.type == H5MF_FSPACE_SECT_SIMPLE);
HDassert(sect2);
HDassert(sect2->sect_info.type == H5MF_FSPACE_SECT_SIMPLE);
- HDassert(H5F_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr));
+ HDassert(H5F_addr_eq((*sect1)->sect_info.addr + (*sect1)->sect_info.size, sect2->sect_info.addr));
/* Add second section's size to first section */
- sect1->sect_info.size += sect2->sect_info.size;
+ (*sect1)->sect_info.size += sect2->sect_info.size;
/* Get rid of second section */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)sect2) < 0)
+ if(H5MF_sect_free((H5FS_section_info_t *)sect2) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free section node")
done:
@@ -293,7 +475,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-htri_t
+static htri_t
H5MF_sect_simple_can_shrink(const H5FS_section_info_t *_sect, void *_udata)
{
const H5MF_free_section_t *sect = (const H5MF_free_section_t *)_sect; /* File free section */
@@ -392,7 +574,7 @@ done:
*
*-------------------------------------------------------------------------
*/
-herr_t
+static herr_t
H5MF_sect_simple_shrink(H5FS_section_info_t **_sect, void *_udata)
{
H5MF_free_section_t **sect = (H5MF_free_section_t **)_sect; /* File free section */
@@ -411,8 +593,8 @@ H5MF_sect_simple_shrink(H5FS_section_info_t **_sect, void *_udata)
/* Sanity check */
HDassert(H5F_INTENT(udata->f) & H5F_ACC_RDWR);
- /* Release section's space at EOA with file driver */
- if(H5FD_free(udata->f->shared->lf, udata->dxpl_id, udata->alloc_type, udata->f, (*sect)->sect_info.addr, (*sect)->sect_info.size) < 0)
+ /* Release section's space at EOA */
+ if(H5F_free(udata->f, udata->dxpl_id, udata->alloc_type, (*sect)->sect_info.addr, (*sect)->sect_info.size) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "driver free request failed")
} /* end if */
else {
@@ -427,7 +609,7 @@ H5MF_sect_simple_shrink(H5FS_section_info_t **_sect, void *_udata)
/* Check for freeing section */
if(udata->shrink != H5MF_SHRINK_SECT_ABSORB_AGGR) {
/* Free section */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)*sect) < 0)
+ if(H5MF_sect_free((H5FS_section_info_t *)*sect) < 0)
HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free simple section node")
/* Mark section as freed, for free space manager */
@@ -438,100 +620,474 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* H5MF_sect_simple_shrink() */
+/*
+ * "small" section callbacks
+ */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_small_add
+ *
+ * Purpose: Perform actions on a small "meta" action before adding it to the free space manager:
+ * 1) Drop the section if it is at page end and its size <= page end threshold
+ * 2) Adjust section size to include page end threshold if
+ * (section size + threshold) is at page end
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5MF_sect_small_add(H5FS_section_info_t **_sect, unsigned *flags, void *_udata)
+{
+ H5MF_free_section_t **sect = (H5MF_free_section_t **)_sect; /* Fractal heap free section */
+ H5MF_sect_ud_t *udata = (H5MF_sect_ud_t *)_udata; /* User data for callback */
+ haddr_t sect_end;
+ hsize_t rem, prem;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Entering, section {%a, %Hu}\n", FUNC, (*sect)->sect_info.addr, (*sect)->sect_info.size);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
+ /* Do not adjust the section raw data or global heap data */
+ if(udata->alloc_type == H5FD_MEM_DRAW || udata->alloc_type == H5FD_MEM_GHEAP)
+ HGOTO_DONE(ret_value);
+
+ sect_end = (*sect)->sect_info.addr + (*sect)->sect_info.size;
+ rem = sect_end % udata->f->shared->fs_page_size;
+ prem = udata->f->shared->fs_page_size - rem;
+
+ /* Drop the section if it is at page end and its size is <= pgend threshold */
+ if(!rem && (*sect)->sect_info.size <= H5F_PGEND_META_THRES(udata->f) && (*flags & H5FS_ADD_RETURNED_SPACE)) {
+ if(H5MF_sect_free((H5FS_section_info_t *)(*sect)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free section node")
+ *sect = NULL;
+ *flags &= (unsigned)~H5FS_ADD_RETURNED_SPACE;
+ *flags |= H5FS_PAGE_END_NO_ADD;
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: section is dropped\n", FUNC);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+ } /* end if */
+ /* Adjust the section if it is not at page end but its size + pgend threshold is at page end */
+ else
+ if(prem <= H5F_PGEND_META_THRES(udata->f)) {
+ (*sect)->sect_info.size += prem;
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: section is adjusted {%a, %Hu}\n", FUNC, (*sect)->sect_info.addr, (*sect)->sect_info.size);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_small_add() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_small_can_shrink
+ *
+ * Purpose: Can this section shrink the container?
+ *
+ * Note: A small section is allowed to shrink only at closing.
+ *
+ * Return: Success: non-negative (TRUE/FALSE)
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5MF_sect_small_can_shrink(const H5FS_section_info_t *_sect, void *_udata)
+{
+ const H5MF_free_section_t *sect = (const H5MF_free_section_t *)_sect; /* File free section */
+ H5MF_sect_ud_t *udata = (H5MF_sect_ud_t *)_udata; /* User data for callback */
+ haddr_t eoa; /* End of address space in the file */
+ haddr_t end; /* End of section to extend */
+ htri_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Check arguments. */
+ HDassert(sect);
+ HDassert(udata);
+ HDassert(udata->f);
+
+ /* Retrieve the end of the file's address space */
+ if(HADDR_UNDEF == (eoa = H5FD_get_eoa(udata->f->shared->lf, udata->alloc_type)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+
+ /* Compute address of end of section to check */
+ end = sect->sect_info.addr + sect->sect_info.size;
+
+ /* Check if the section is exactly at the end of the allocated space in the file */
+ if(H5F_addr_eq(end, eoa) && sect->sect_info.size == udata->f->shared->fs_page_size) {
+ udata->shrink = H5MF_SHRINK_EOA;
+
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: section {%a, %Hu}, shrinks file, eoa = %a\n", FUNC, sect->sect_info.addr, sect->sect_info.size, eoa);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
+ /* Indicate shrinking can occur */
+ HGOTO_DONE(TRUE)
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_small_can_shrink() */
+
/*-------------------------------------------------------------------------
- * Function: H5MF_sect_simple_free
+ * Function: H5MF_sect_small_shrink
*
- * Purpose: Free a 'single' section node
+ * Purpose: Shrink container with section
*
* Return: Success: non-negative
- * Failure: negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Dec 2012
*
- * Programmer: Quincey Koziol
- * Tuesday, January 8, 2008
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5MF_sect_small_shrink(H5FS_section_info_t **_sect, void *_udata)
+{
+ H5MF_free_section_t **sect = (H5MF_free_section_t **)_sect; /* File free section */
+ H5MF_sect_ud_t *udata = (H5MF_sect_ud_t *)_udata; /* User data for callback */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Check arguments. */
+ HDassert(sect);
+ HDassert((*sect)->sect_info.type == H5MF_FSPACE_SECT_SMALL);
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(udata->shrink == H5MF_SHRINK_EOA);
+ HDassert(H5F_INTENT(udata->f) & H5F_ACC_RDWR);
+
+ /* Release section's space at EOA */
+ if(H5F_free(udata->f, udata->dxpl_id, udata->alloc_type, (*sect)->sect_info.addr, (*sect)->sect_info.size) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "driver free request failed")
+
+ /* Free section */
+ if(H5MF_sect_free((H5FS_section_info_t *)*sect) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free simple section node")
+
+ /* Mark section as freed, for free space manager */
+ *sect = NULL;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_small_shrink() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_small_can_merge
+ *
+ * Purpose: Can two sections of this type merge?
+ *
+ * Note: Second section must be "after" first section
+ * The "merged" section cannot cross page boundary.
+ *
+ * Return: Success: non-negative (TRUE/FALSE)
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Dec 2012
*
*-------------------------------------------------------------------------
*/
-herr_t
-H5MF_sect_simple_free(H5FS_section_info_t *_sect)
+static htri_t
+H5MF_sect_small_can_merge(const H5FS_section_info_t *_sect1,
+ const H5FS_section_info_t *_sect2, void *_udata)
{
- H5MF_free_section_t *sect = (H5MF_free_section_t *)_sect; /* File free section */
+ const H5MF_free_section_t *sect1 = (const H5MF_free_section_t *)_sect1; /* File free section */
+ const H5MF_free_section_t *sect2 = (const H5MF_free_section_t *)_sect2; /* File free section */
+ H5MF_sect_ud_t *udata = (H5MF_sect_ud_t *)_udata; /* User data for callback */
+ htri_t ret_value = FALSE; /* Return value */
FUNC_ENTER_NOAPI_NOINIT_NOERR
/* Check arguments. */
- HDassert(sect);
+ HDassert(sect1);
+ HDassert(sect2);
+ HDassert(sect1->sect_info.type == sect2->sect_info.type); /* Checks "MERGE_SYM" flag */
+ HDassert(H5F_addr_lt(sect1->sect_info.addr, sect2->sect_info.addr));
- /* Release the section */
- sect = H5FL_FREE(H5MF_free_section_t, sect);
+ /* Check if second section adjoins first section */
+ ret_value = H5F_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr);
+ if(ret_value > 0)
+ /* If they are on different pages, couldn't merge */
+ if((sect1->sect_info.addr / udata->f->shared->fs_page_size) != (((sect2->sect_info.addr + sect2->sect_info.size - 1) / udata->f->shared->fs_page_size)))
+ ret_value = FALSE;
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5MF_sect_simple_free() */
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Leaving: ret_value = %t\n", FUNC, ret_value);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_small_can_merge() */
/*-------------------------------------------------------------------------
- * Function: H5MF_sect_simple_valid
+ * Function: H5MF_sect_small_merge
*
- * Purpose: Check the validity of a section
+ * Purpose: Merge two sections of this type
+ *
+ * Note: Second section always merges into first node.
+ * If the size of the "merged" section is equal to file space page size,
+ * free the section.
*
* Return: Success: non-negative
* Failure: negative
*
- * Programmer: Quincey Koziol
- * Tuesday, January 8, 2008
+ * Programmer: Vailin Choi; Dec 2012
*
*-------------------------------------------------------------------------
*/
static herr_t
-H5MF_sect_simple_valid(const H5FS_section_class_t H5_ATTR_UNUSED *cls,
- const H5FS_section_info_t
-#ifdef NDEBUG
- H5_ATTR_UNUSED
-#endif /* NDEBUG */
- *_sect, hid_t H5_ATTR_UNUSED dxpl_id)
+H5MF_sect_small_merge(H5FS_section_info_t **_sect1, H5FS_section_info_t *_sect2,
+ void *_udata)
{
-#ifndef NDEBUG
- const H5MF_free_section_t *sect = (const H5MF_free_section_t *)_sect; /* File free section */
-#endif /* NDEBUG */
+ H5MF_free_section_t **sect1 = (H5MF_free_section_t **)_sect1; /* File free section */
+ H5MF_free_section_t *sect2 = (H5MF_free_section_t *)_sect2; /* File free section */
+ H5MF_sect_ud_t *udata = (H5MF_sect_ud_t *)_udata; /* User data for callback */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Check arguments. */
+ HDassert(sect1);
+ HDassert((*sect1)->sect_info.type == H5MF_FSPACE_SECT_SMALL);
+ HDassert(sect2);
+ HDassert(sect2->sect_info.type == H5MF_FSPACE_SECT_SMALL);
+ HDassert(H5F_addr_eq((*sect1)->sect_info.addr + (*sect1)->sect_info.size, sect2->sect_info.addr));
+
+ /* Add second section's size to first section */
+ (*sect1)->sect_info.size += sect2->sect_info.size;
+
+ if((*sect1)->sect_info.size == udata->f->shared->fs_page_size) {
+ if(H5MF_xfree(udata->f, udata->alloc_type, udata->dxpl_id, (*sect1)->sect_info.addr, (*sect1)->sect_info.size) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free merged section")
+
+ /* Need to free possible metadata page in the PB cache */
+ /* This is in response to the data corruption bug from fheap.c with page buffering + page strategy */
+ /* Note: Large metadata page bypasses the PB cache */
+ /* Note: Update of raw data page (large or small sized) is handled by the PB cache */
+ if(udata->f->shared->page_buf != NULL && udata->alloc_type != H5FD_MEM_DRAW)
+ if(H5PB_remove_entry(udata->f, (*sect1)->sect_info.addr) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "can't free merged section")
+
+ if(H5MF_sect_free((H5FS_section_info_t *)(*sect1)) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free section node")
+ *sect1 = NULL;
+ } /* end if */
+
+ /* Get rid of second section */
+ if(H5MF_sect_free((H5FS_section_info_t *)sect2) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free section node")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_small_merge() */
+
+/*
+ * "Large" section callbacks
+ */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_large_can_merge (same as H5MF_sect_simple_can_merge)
+ *
+ * Purpose: Can two sections of this type merge?
+ *
+ * Note: Second section must be "after" first section
+ *
+ * Return: Success: non-negative (TRUE/FALSE)
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5MF_sect_large_can_merge(const H5FS_section_info_t *_sect1,
+ const H5FS_section_info_t *_sect2, void H5_ATTR_UNUSED *_udata)
+{
+ const H5MF_free_section_t *sect1 = (const H5MF_free_section_t *)_sect1; /* File free section */
+ const H5MF_free_section_t *sect2 = (const H5MF_free_section_t *)_sect2; /* File free section */
+ htri_t ret_value = FALSE; /* Return value */
FUNC_ENTER_NOAPI_NOINIT_NOERR
/* Check arguments. */
+ HDassert(sect1);
+ HDassert(sect2);
+ HDassert(sect1->sect_info.type == sect2->sect_info.type); /* Checks "MERGE_SYM" flag */
+ HDassert(H5F_addr_lt(sect1->sect_info.addr, sect2->sect_info.addr));
+
+ ret_value = H5F_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr);
+
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: Leaving: ret_value = %t\n", FUNC, ret_value);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_large_can_merge() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_large_merge (same as H5MF_sect_simple_merge)
+ *
+ * Purpose: Merge two sections of this type
+ *
+ * Note: Second section always merges into first node
+ *
+ * Return: Success: non-negative
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5MF_sect_large_merge(H5FS_section_info_t **_sect1, H5FS_section_info_t *_sect2,
+ void H5_ATTR_UNUSED *_udata)
+{
+ H5MF_free_section_t **sect1 = (H5MF_free_section_t **)_sect1; /* File free section */
+ H5MF_free_section_t *sect2 = (H5MF_free_section_t *)_sect2; /* File free section */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Check arguments. */
+ HDassert(sect1);
+ HDassert((*sect1)->sect_info.type == H5MF_FSPACE_SECT_LARGE);
+ HDassert(sect2);
+ HDassert(sect2->sect_info.type == H5MF_FSPACE_SECT_LARGE);
+ HDassert(H5F_addr_eq((*sect1)->sect_info.addr + (*sect1)->sect_info.size, sect2->sect_info.addr));
+
+ /* Add second section's size to first section */
+ (*sect1)->sect_info.size += sect2->sect_info.size;
+
+ /* Get rid of second section */
+ if(H5MF_sect_free((H5FS_section_info_t *)sect2) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free section node")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_large_merge() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5MF_sect_large_can_shrink
+ *
+ * Purpose: Can this section shrink the container?
+ *
+ * Return: Success: non-negative (TRUE/FALSE)
+ * Failure: negative
+ *
+ * Programmer: Vailin Choi; Dec 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5MF_sect_large_can_shrink(const H5FS_section_info_t *_sect, void *_udata)
+{
+ const H5MF_free_section_t *sect = (const H5MF_free_section_t *)_sect; /* File free section */
+ H5MF_sect_ud_t *udata = (H5MF_sect_ud_t *)_udata; /* User data for callback */
+ haddr_t eoa; /* End of address space in the file */
+ haddr_t end; /* End of section to extend */
+ htri_t ret_value = FALSE; /* Return value */
+
+ FUNC_ENTER_NOAPI_NOINIT
+
+ /* Check arguments. */
HDassert(sect);
+ HDassert(sect->sect_info.type == H5MF_FSPACE_SECT_LARGE);
+ HDassert(udata);
+ HDassert(udata->f);
- FUNC_LEAVE_NOAPI(SUCCEED)
-} /* H5MF_sect_simple_valid() */
+ /* Retrieve the end of the file's address space */
+ if(HADDR_UNDEF == (eoa = H5FD_get_eoa(udata->f->shared->lf, udata->alloc_type)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+
+ /* Compute address of end of section to check */
+ end = sect->sect_info.addr + sect->sect_info.size;
+
+ /* Check if the section is exactly at the end of the allocated space in the file */
+ if(H5F_addr_eq(end, eoa) && sect->sect_info.size >= udata->f->shared->fs_page_size) {
+ /* Set the shrinking type */
+ udata->shrink = H5MF_SHRINK_EOA;
+#ifdef H5MF_ALLOC_DEBUG_MORE
+HDfprintf(stderr, "%s: section {%a, %Hu}, shrinks file, eoa = %a\n", FUNC, sect->sect_info.addr, sect->sect_info.size, eoa);
+#endif /* H5MF_ALLOC_DEBUG_MORE */
+ /* Indicate shrinking can occur */
+ HGOTO_DONE(TRUE)
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_large_can_shrink() */
+
/*-------------------------------------------------------------------------
- * Function: H5MF_sect_simple_split
+ * Function: H5MF_sect_large_shrink
*
- * Purpose: Split SECT into 2 sections: fragment for alignment & the aligned section
- * SECT's addr and size are updated to point to the aligned section
+ * Purpose: Shrink a large-sized section
*
- * Return: Success: the fragment for aligning sect
- * Failure: null
+ * Return: Success: non-negative
+ * Failure: negative
*
- * Programmer: Vailin Choi, July 29, 2008
+ * Programmer: Vailin Choi; Dec 2012
*
*-------------------------------------------------------------------------
*/
-static H5FS_section_info_t *
-H5MF_sect_simple_split(H5FS_section_info_t *sect, hsize_t frag_size)
+static herr_t
+H5MF_sect_large_shrink(H5FS_section_info_t **_sect, void *_udata)
{
- H5MF_free_section_t *ret_value = NULL; /* Return value */
+ H5MF_free_section_t **sect = (H5MF_free_section_t **)_sect; /* File free section */
+ H5MF_sect_ud_t *udata = (H5MF_sect_ud_t *)_udata; /* User data for callback */
+ hsize_t frag_size = 0; /* Fragment size */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
- /* Allocate space for new section */
- if(NULL == (ret_value = H5MF_sect_simple_new(sect->addr, frag_size)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_CANTALLOC, NULL, "can't initialize free space section")
+ /* Check arguments. */
+ HDassert(sect);
+ HDassert((*sect)->sect_info.type == H5MF_FSPACE_SECT_LARGE);
+ HDassert(udata);
+ HDassert(udata->f);
+ HDassert(udata->shrink == H5MF_SHRINK_EOA);
+ HDassert(H5F_INTENT(udata->f) & H5F_ACC_RDWR);
+ HDassert(H5F_PAGED_AGGR(udata->f));
- /* Set new section's info */
- sect->addr += frag_size;
- sect->size -= frag_size;
+ /* Calculate possible mis-aligned fragment */
+ H5MF_EOA_MISALIGN(udata->f, (*sect)->sect_info.addr, udata->f->shared->fs_page_size, frag_size);
+
+ /* Free full pages from EOA */
+ /* Retain partial page in the free-space manager so as to keep EOA at page boundary */
+ if(H5F_free(udata->f, udata->dxpl_id, udata->alloc_type, (*sect)->sect_info.addr+frag_size, (*sect)->sect_info.size-frag_size) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTFREE, FAIL, "driver free request failed")
+
+ if(frag_size) /* Adjust section size for the partial page */
+ (*sect)->sect_info.size = frag_size;
+ else {
+ /* Free section */
+ if(H5MF_sect_free((H5FS_section_info_t *)*sect) < 0)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_CANTRELEASE, FAIL, "can't free simple section node")
+
+ /* Mark section as freed, for free space manager */
+ *sect = NULL;
+ } /* end else */
done:
- FUNC_LEAVE_NOAPI((H5FS_section_info_t *)ret_value)
-} /* end H5MF_sect_simple_split() */
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5MF_sect_large_shrink() */
diff --git a/src/H5MM.c b/src/H5MM.c
index daef7b1..ee3b28f 100644
--- a/src/H5MM.c
+++ b/src/H5MM.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5MMprivate.h b/src/H5MMprivate.h
index 14bd28d..0524601 100644
--- a/src/H5MMprivate.h
+++ b/src/H5MMprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5MMpublic.h b/src/H5MMpublic.h
index bfcb807..4e54c33 100644
--- a/src/H5MMpublic.h
+++ b/src/H5MMpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5MP.c b/src/H5MP.c
index 50fc598..8c9b411 100644
--- a/src/H5MP.c
+++ b/src/H5MP.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5MPmodule.h b/src/H5MPmodule.h
index ca6c405..27f7706 100644
--- a/src/H5MPmodule.h
+++ b/src/H5MPmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5MPpkg.h b/src/H5MPpkg.h
index e724146..29a25fa 100644
--- a/src/H5MPpkg.h
+++ b/src/H5MPpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5MPprivate.h b/src/H5MPprivate.h
index 3fa312c..009cb50 100644
--- a/src/H5MPprivate.h
+++ b/src/H5MPprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5MPtest.c b/src/H5MPtest.c
index 3f218db..b3f2e24 100644
--- a/src/H5MPtest.c
+++ b/src/H5MPtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5O.c b/src/H5O.c
index 0a4e4eb..152e6cc 100644
--- a/src/H5O.c
+++ b/src/H5O.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -129,12 +127,13 @@ const H5O_msg_class_t *const H5O_msg_class_g[] = {
H5O_MSG_DRVINFO, /*0x0014 Driver info settings */
H5O_MSG_AINFO, /*0x0015 Attribute information */
H5O_MSG_REFCOUNT, /*0x0016 Object's ref. count */
- H5O_MSG_FSINFO, /*0x0017 Free-space manager info message */
- H5O_MSG_UNKNOWN, /*0x0018 Placeholder for unknown message */
+ H5O_MSG_FSINFO, /*0x0017 Free-space manager info */
+ H5O_MSG_MDCI, /*0x0018 Metadata cache image */
+ H5O_MSG_UNKNOWN, /*0x0019 Placeholder for unknown message */
#ifdef H5O_ENABLE_BOGUS
- H5O_MSG_BOGUS_INVALID, /*0x0019 "Bogus invalid" (for testing) */
+ H5O_MSG_BOGUS_INVALID, /*0x001A "Bogus invalid" (for testing) */
#else /* H5O_ENABLE_BOGUS */
- NULL, /*0x0019 "Bogus invalid" (for testing) */
+ NULL, /*0x001A "Bogus invalid" (for testing) */
#endif /* H5O_ENABLE_BOGUS */
};
diff --git a/src/H5Oainfo.c b/src/H5Oainfo.c
index 44c6611..d8298a4 100644
--- a/src/H5Oainfo.c
+++ b/src/H5Oainfo.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Oalloc.c b/src/H5Oalloc.c
index 4f98cfa..3512d3e 100644
--- a/src/H5Oalloc.c
+++ b/src/H5Oalloc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -2023,6 +2021,7 @@ H5O_merge_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
/* Second message has been merged, delete it */
if(merged_msg) {
H5O_chunk_proxy_t *curr_chk_proxy; /* Chunk that message is in */
+ htri_t result;
/* Release any information/memory for second message */
H5O_msg_free_mesg(curr_msg2);
@@ -2050,6 +2049,13 @@ H5O_merge_null(H5F_t *f, hid_t dxpl_id, H5O_t *oh)
/* (Don't bother reducing size of message array for now -QAK) */
oh->nmesgs--;
+ /* The merge null message might span the entire chunk: scan for empty chunk to remove */
+ if((result = H5O_remove_empty_chunks(f, dxpl_id, oh)) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPACK, FAIL, "can't remove empty chunk")
+ else if(result > 0)
+ /* Get out of loop */
+ break;
+
/* If the merged message is too large, shrink the chunk */
if(curr_msg->raw_size >= H5O_MESG_MAX_SIZE)
if(H5O_alloc_shrink_chunk(f, dxpl_id, oh, curr_msg->chunkno) < 0)
diff --git a/src/H5Oattr.c b/src/H5Oattr.c
index 064c4cb..cb802ea 100644
--- a/src/H5Oattr.c
+++ b/src/H5Oattr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#define H5A_FRIEND /*suppress error about including H5Apkg */
diff --git a/src/H5Oattribute.c b/src/H5Oattribute.c
index 71f54eb..2223564 100644
--- a/src/H5Oattribute.c
+++ b/src/H5Oattribute.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Obogus.c b/src/H5Obogus.c
index d1085c8..a3531ed 100644
--- a/src/H5Obogus.c
+++ b/src/H5Obogus.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Obtreek.c b/src/H5Obtreek.c
index ac6fe37..4fd0577 100644
--- a/src/H5Obtreek.c
+++ b/src/H5Obtreek.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/src/H5Ocache.c b/src/H5Ocache.c
index d398e41..8f4c155 100644
--- a/src/H5Ocache.c
+++ b/src/H5Ocache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -84,6 +82,10 @@ static herr_t H5O__cache_chk_serialize(const H5F_t *f, void *image, size_t len,
static herr_t H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing);
static herr_t H5O__cache_chk_free_icr(void *thing);
+/* Prefix routines */
+static herr_t H5O__prefix_deserialize(const uint8_t *image,
+ H5O_cache_ud_t *udata);
+
/* Chunk routines */
static herr_t H5O__chunk_deserialize(H5O_t *oh, haddr_t addr, size_t len,
const uint8_t *image, H5O_common_cache_ud_t *udata, hbool_t *dirty);
@@ -198,13 +200,11 @@ H5O__cache_get_initial_load_size(void H5_ATTR_UNUSED *_udata, size_t *image_len)
*-------------------------------------------------------------------------
*/
static herr_t
-H5O__cache_get_final_load_size(const void *_image, size_t image_len,
+H5O__cache_get_final_load_size(const void *image, size_t image_len,
void *_udata, size_t *actual_len)
{
- const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
- H5O_t *oh = NULL; /* Object header read in */
- htri_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -214,136 +214,17 @@ H5O__cache_get_final_load_size(const void *_image, size_t image_len,
HDassert(actual_len);
HDassert(*actual_len == image_len);
- /* Allocate space for the new object header data structure */
- if(NULL == (oh = H5FL_CALLOC(H5O_t)))
- HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, FAIL, "memory allocation failed")
-
- /* File-specific, non-stored information */
- oh->sizeof_size = H5F_SIZEOF_SIZE(udata->common.f);
- oh->sizeof_addr = H5F_SIZEOF_ADDR(udata->common.f);
-
- /* Check for presence of magic number */
- /* (indicates version 2 or later) */
- if(!HDmemcmp(image, H5O_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) {
- /* Magic number */
- image += H5_SIZEOF_MAGIC;
-
- /* Version */
- oh->version = *image++;
- if(H5O_VERSION_2 != oh->version)
- HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "bad object header version number")
-
- /* Flags */
- oh->flags = *image++;
- if(oh->flags & ~H5O_HDR_ALL_FLAGS)
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown object header status flag(s)")
-
- /* Number of links to object (unless overridden by refcount message) */
- oh->nlink = 1;
-
- /* Time fields */
- if(oh->flags & H5O_HDR_STORE_TIMES) {
- uint32_t tmp; /* Temporary value */
-
- UINT32DECODE(image, tmp);
- oh->atime = (time_t)tmp;
- UINT32DECODE(image, tmp);
- oh->mtime = (time_t)tmp;
- UINT32DECODE(image, tmp);
- oh->ctime = (time_t)tmp;
- UINT32DECODE(image, tmp);
- oh->btime = (time_t)tmp;
- } /* end if */
- else
- oh->atime = oh->mtime = oh->ctime = oh->btime = 0;
-
- /* Attribute fields */
- if(oh->flags & H5O_HDR_ATTR_STORE_PHASE_CHANGE) {
- UINT16DECODE(image, oh->max_compact);
- UINT16DECODE(image, oh->min_dense);
- if(oh->max_compact < oh->min_dense)
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header attribute phase change values")
- } /* end if */
- else {
- oh->max_compact = H5O_CRT_ATTR_MAX_COMPACT_DEF;
- oh->min_dense = H5O_CRT_ATTR_MIN_DENSE_DEF;
- } /* end else */
-
- /* First chunk size */
- switch(oh->flags & H5O_HDR_CHUNK0_SIZE) {
- case 0: /* 1 byte size */
- udata->chunk0_size = *image++;
- break;
-
- case 1: /* 2 byte size */
- UINT16DECODE(image, udata->chunk0_size);
- break;
-
- case 2: /* 4 byte size */
- UINT32DECODE(image, udata->chunk0_size);
- break;
-
- case 3: /* 8 byte size */
- UINT64DECODE(image, udata->chunk0_size);
- break;
-
- default:
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad size for chunk 0")
- } /* end switch */
- if(udata->chunk0_size > 0 && udata->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh))
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header chunk size")
- } /* end if */
- else {
- /* Version */
- oh->version = *image++;
- if(H5O_VERSION_1 != oh->version)
- HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "bad object header version number")
-
- /* Flags */
- oh->flags = H5O_CRT_OHDR_FLAGS_DEF;
-
- /* Reserved */
- image++;
-
- /* Number of messages */
- UINT16DECODE(image, udata->v1_pfx_nmesgs);
-
- /* Link count */
- UINT32DECODE(image, oh->nlink);
-
- /* Reset unused time fields */
- oh->atime = oh->mtime = oh->ctime = oh->btime = 0;
-
- /* Reset unused attribute fields */
- oh->max_compact = 0;
- oh->min_dense = 0;
-
- /* First chunk size */
- UINT32DECODE(image, udata->chunk0_size);
- if((udata->v1_pfx_nmesgs > 0 && udata->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh)) ||
- (udata->v1_pfx_nmesgs == 0 && udata->chunk0_size > 0))
- HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header chunk size")
-
- /* Reserved, in version 1 (for 8-byte alignment padding) */
- image += 4;
- } /* end else */
+ /* Deserialize the object header prefix */
+ if(H5O__prefix_deserialize((const uint8_t *)image, udata) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, FAIL, "can't deserialize object header prefix")
- /* Determine object header prefix length */
- HDassert((size_t)((const uint8_t *)image - (const uint8_t *)_image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
+ /* Sanity check */
+ HDassert(udata->oh);
/* Set the final size for the cache image */
- *actual_len = udata->chunk0_size + (size_t)H5O_SIZEOF_HDR(oh);
-
- /* Save the object header for later use in 'deserialize' callback */
- udata->oh = oh;
- oh = NULL;
+ *actual_len = udata->chunk0_size + (size_t)H5O_SIZEOF_HDR(udata->oh);
done:
- /* Release the [possibly partially initialized] object header on errors */
- if(ret_value < 0 && oh)
- if(H5O__free(oh) < 0)
- HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to destroy object header data")
-
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O__cache_get_final_load_size() */
@@ -413,12 +294,11 @@ H5O__cache_verify_chksum(const void *_image, size_t len, void *_udata)
*-------------------------------------------------------------------------
*/
static void *
-H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
+H5O__cache_deserialize(const void *image, size_t len, void *_udata,
hbool_t *dirty)
{
- H5O_t *oh; /* Object header read in */
+ H5O_t *oh = NULL; /* Object header read in */
H5O_cache_ud_t *udata = (H5O_cache_ud_t *)_udata; /* User data for callback */
- const uint8_t *image = (const uint8_t *)_image; /* Pointer into buffer to decode */
void * ret_value = NULL; /* Return value */
FUNC_ENTER_STATIC
@@ -427,11 +307,24 @@ H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
HDassert(image);
HDassert(len > 0);
HDassert(udata);
- HDassert(udata->oh);
HDassert(udata->common.f);
HDassert(udata->common.cont_msg_info);
HDassert(dirty);
+ /* Check for partially deserialized object header */
+ /* (Object header prefix will be deserialized if the object header came
+ * through the 'get_final_load_size' callback and not deserialized if
+ * the object header is coming from a cache image - QAK, 2016/12/14)
+ */
+ if(NULL == udata->oh) {
+ /* Deserialize the object header prefix */
+ if(H5O__prefix_deserialize((const uint8_t *)image, udata) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTDECODE, NULL, "can't deserialize object header prefix")
+
+ /* Sanity check */
+ HDassert(udata->oh);
+ } /* end if */
+
/* Retrieve partially deserialized object header from user data */
oh = udata->oh;
@@ -448,7 +341,7 @@ H5O__cache_deserialize(const void *_image, size_t len, void *_udata,
oh->proxy = NULL;
/* Parse the first chunk */
- if(H5O__chunk_deserialize(oh, udata->common.addr, udata->chunk0_size, (const uint8_t *)_image, &(udata->common), dirty) < 0)
+ if(H5O__chunk_deserialize(oh, udata->common.addr, udata->chunk0_size, (const uint8_t *)image, &(udata->common), dirty) < 0)
HGOTO_ERROR(H5E_OHDR, H5E_CANTINIT, NULL, "can't deserialize first object header chunk")
/* Note that we've loaded the object header from the file */
@@ -705,6 +598,8 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -717,11 +612,7 @@ H5O__cache_notify(H5AC_notify_action_t action, void *_thing)
break;
default:
-#ifdef NDEBUG
HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown action from metadata cache")
-#else /* NDEBUG */
- HDassert(0 && "Unknown action?!?");
-#endif /* NDEBUG */
} /* end switch */
done:
@@ -1098,6 +989,8 @@ H5O__cache_chk_notify(H5AC_notify_action_t action, void *_thing)
case H5AC_NOTIFY_ACTION_CHILD_DIRTIED:
case H5AC_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -1228,6 +1121,163 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5O__prefix_deserialize()
+ *
+ * Purpose: Deserialize an object header prefix
+ *
+ * Return: Success: SUCCEED
+ * Failure: FAIL
+ *
+ * Programmer: Quincey Koziol
+ * December 14, 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__prefix_deserialize(const uint8_t *_image, H5O_cache_ud_t *udata)
+{
+ const uint8_t *image = (const uint8_t *)_image; /* Pointer into raw data buffer */
+ H5O_t *oh = NULL; /* Object header read in */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Check arguments */
+ HDassert(image);
+ HDassert(udata);
+
+ /* Allocate space for the new object header data structure */
+ if(NULL == (oh = H5FL_CALLOC(H5O_t)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTALLOC, FAIL, "memory allocation failed")
+
+ /* File-specific, non-stored information */
+ oh->sizeof_size = H5F_SIZEOF_SIZE(udata->common.f);
+ oh->sizeof_addr = H5F_SIZEOF_ADDR(udata->common.f);
+
+ /* Check for presence of magic number */
+ /* (indicates version 2 or later) */
+ if(!HDmemcmp(image, H5O_HDR_MAGIC, (size_t)H5_SIZEOF_MAGIC)) {
+ /* Magic number */
+ image += H5_SIZEOF_MAGIC;
+
+ /* Version */
+ oh->version = *image++;
+ if(H5O_VERSION_2 != oh->version)
+ HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "bad object header version number")
+
+ /* Flags */
+ oh->flags = *image++;
+ if(oh->flags & ~H5O_HDR_ALL_FLAGS)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "unknown object header status flag(s)")
+
+ /* Number of links to object (unless overridden by refcount message) */
+ oh->nlink = 1;
+
+ /* Time fields */
+ if(oh->flags & H5O_HDR_STORE_TIMES) {
+ uint32_t tmp; /* Temporary value */
+
+ UINT32DECODE(image, tmp);
+ oh->atime = (time_t)tmp;
+ UINT32DECODE(image, tmp);
+ oh->mtime = (time_t)tmp;
+ UINT32DECODE(image, tmp);
+ oh->ctime = (time_t)tmp;
+ UINT32DECODE(image, tmp);
+ oh->btime = (time_t)tmp;
+ } /* end if */
+ else
+ oh->atime = oh->mtime = oh->ctime = oh->btime = 0;
+
+ /* Attribute fields */
+ if(oh->flags & H5O_HDR_ATTR_STORE_PHASE_CHANGE) {
+ UINT16DECODE(image, oh->max_compact);
+ UINT16DECODE(image, oh->min_dense);
+ if(oh->max_compact < oh->min_dense)
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header attribute phase change values")
+ } /* end if */
+ else {
+ oh->max_compact = H5O_CRT_ATTR_MAX_COMPACT_DEF;
+ oh->min_dense = H5O_CRT_ATTR_MIN_DENSE_DEF;
+ } /* end else */
+
+ /* First chunk size */
+ switch(oh->flags & H5O_HDR_CHUNK0_SIZE) {
+ case 0: /* 1 byte size */
+ udata->chunk0_size = *image++;
+ break;
+
+ case 1: /* 2 byte size */
+ UINT16DECODE(image, udata->chunk0_size);
+ break;
+
+ case 2: /* 4 byte size */
+ UINT32DECODE(image, udata->chunk0_size);
+ break;
+
+ case 3: /* 8 byte size */
+ UINT64DECODE(image, udata->chunk0_size);
+ break;
+
+ default:
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad size for chunk 0")
+ } /* end switch */
+ if(udata->chunk0_size > 0 && udata->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh))
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header chunk size")
+ } /* end if */
+ else {
+ /* Version */
+ oh->version = *image++;
+ if(H5O_VERSION_1 != oh->version)
+ HGOTO_ERROR(H5E_OHDR, H5E_VERSION, FAIL, "bad object header version number")
+
+ /* Flags */
+ oh->flags = H5O_CRT_OHDR_FLAGS_DEF;
+
+ /* Reserved */
+ image++;
+
+ /* Number of messages */
+ UINT16DECODE(image, udata->v1_pfx_nmesgs);
+
+ /* Link count */
+ UINT32DECODE(image, oh->nlink);
+
+ /* Reset unused time fields */
+ oh->atime = oh->mtime = oh->ctime = oh->btime = 0;
+
+ /* Reset unused attribute fields */
+ oh->max_compact = 0;
+ oh->min_dense = 0;
+
+ /* First chunk size */
+ UINT32DECODE(image, udata->chunk0_size);
+ if((udata->v1_pfx_nmesgs > 0 && udata->chunk0_size < H5O_SIZEOF_MSGHDR_OH(oh)) ||
+ (udata->v1_pfx_nmesgs == 0 && udata->chunk0_size > 0))
+ HGOTO_ERROR(H5E_OHDR, H5E_BADVALUE, FAIL, "bad object header chunk size")
+
+ /* Reserved, in version 1 (for 8-byte alignment padding) */
+ image += 4;
+ } /* end else */
+
+ /* Verify object header prefix length */
+ HDassert((size_t)(image - _image) == (size_t)(H5O_SIZEOF_HDR(oh) - H5O_SIZEOF_CHKSUM_OH(oh)));
+
+ /* Save the object header for later use in 'deserialize' callback */
+ udata->oh = oh;
+ oh = NULL;
+
+done:
+ /* Release the [possibly partially initialized] object header on errors */
+ if(ret_value < 0 && oh)
+ if(H5O__free(oh) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTRELEASE, FAIL, "unable to destroy object header data")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__prefix_deserialize() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5O__chunk_deserialize
*
* Purpose: Deserialize a chunk for an object header
diff --git a/src/H5Ocache_image.c b/src/H5Ocache_image.c
new file mode 100644
index 0000000..29b2503
--- /dev/null
+++ b/src/H5Ocache_image.c
@@ -0,0 +1,377 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5Ocache_image.c
+ * June 21, 2015
+ * John Mainzer
+ *
+ * Purpose: A message indicating that a metadata cache image block
+ * of the indicated length exists at the specified offset
+ * in the HDF5 file.
+ *
+ * The mdci_msg only appears in the superblock extension.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#include "H5Omodule.h" /* This source code file is part of the H5O module */
+
+
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fprivate.h" /* Files */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5Opkg.h" /* Object headers */
+#include "H5MFprivate.h" /* File space management */
+
+/* Callbacks for message class */
+static void *H5O__mdci_decode(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
+ unsigned mesg_flags, unsigned *ioflags, const uint8_t *p);
+static herr_t H5O__mdci_encode(H5F_t *f, hbool_t disable_shared,
+ uint8_t *p, const void *_mesg);
+static void *H5O__mdci_copy(const void *_mesg, void *_dest);
+static size_t H5O__mdci_size(const H5F_t *f, hbool_t disable_shared,
+ const void *_mesg);
+static herr_t H5O__mdci_free(void *mesg);
+static herr_t H5O__mdci_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
+ void *_mesg);
+static herr_t H5O__mdci_debug(H5F_t *f, hid_t dxpl_id, const void *_mesg,
+ FILE *stream, int indent, int fwidth);
+
+/* This message derives from H5O message class */
+const H5O_msg_class_t H5O_MSG_MDCI[1] = {{
+ H5O_MDCI_MSG_ID, /* message id number */
+ "mdci", /* message name for debugging */
+ sizeof(H5O_mdci_t), /* native message size */
+ 0, /* messages are sharable? */
+ H5O__mdci_decode, /* decode message */
+ H5O__mdci_encode, /* encode message */
+ H5O__mdci_copy, /* copy method */
+ H5O__mdci_size, /* size of mdc image message */
+ NULL, /* reset method */
+ H5O__mdci_free, /* free method */
+ H5O__mdci_delete, /* file delete method */
+ NULL, /* link method */
+ NULL, /* set share method */
+ NULL, /* can share method */
+ NULL, /* pre copy native value to file */
+ NULL, /* copy native value to file */
+ NULL, /* post copy native value to file */
+ NULL, /* get creation index */
+ NULL, /* set creation index */
+ H5O__mdci_debug /* debugging */
+}};
+
+/* Only one version of the metadata cache image message at present */
+#define H5O_MDCI_VERSION_0 0
+
+/* Declare the free list for H5O_mdci_t's */
+H5FL_DEFINE(H5O_mdci_t);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__mdci_decode
+ *
+ * Purpose: Decode a metadata cache image message and return a
+ * pointer to a newly allocated H5O_mdci_t struct.
+ *
+ * Return: Success: Ptr to new message in native struct.
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 6/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5O__mdci_decode(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id,
+ H5O_t H5_ATTR_UNUSED *open_oh, unsigned H5_ATTR_UNUSED mesg_flags,
+ unsigned H5_ATTR_UNUSED *ioflags, const uint8_t *p)
+{
+ H5O_mdci_t *mesg; /* Native message */
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(p);
+
+ /* Version of message */
+ if(*p++ != H5O_MDCI_VERSION_0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message")
+
+ /* Allocate space for message */
+ if(NULL == (mesg = (H5O_mdci_t *)H5FL_MALLOC(H5O_mdci_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for metadata cache image message")
+
+ /* Decode */
+ H5F_addr_decode(f, &p, &(mesg->addr));
+ H5F_DECODE_LENGTH(f, p, mesg->size);
+
+ /* Set return value */
+ ret_value = (void *)mesg;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__mdci_decode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__mdci_encode
+ *
+ * Purpose: Encode metadata cache image message
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__mdci_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared,
+ uint8_t *p, const void *_mesg)
+{
+ const H5O_mdci_t *mesg = (const H5O_mdci_t *)_mesg;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(f);
+ HDassert(p);
+ HDassert(mesg);
+
+ /* encode */
+ *p++ = H5O_MDCI_VERSION_0;
+ H5F_addr_encode(f, &p, mesg->addr);
+ H5F_ENCODE_LENGTH(f, p, mesg->size);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5O__mdci_encode() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__mdci_copy
+ *
+ * Purpose: Copies a message from _MESG to _DEST, allocating _DEST if
+ * necessary.
+ *
+ * Return: Success: Ptr to _DEST
+ * Failure: NULL
+ *
+ * Programmer: John Mainzer
+ * 6/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static void *
+H5O__mdci_copy(const void *_mesg, void *_dest)
+{
+ const H5O_mdci_t *mesg = (const H5O_mdci_t *)_mesg;
+ H5O_mdci_t *dest = (H5O_mdci_t *) _dest;
+ void *ret_value = NULL; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* check args */
+ HDassert(mesg);
+ if(!dest && NULL == (dest = H5FL_MALLOC(H5O_mdci_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+
+ /* copy */
+ *dest = *mesg;
+
+ /* Set return value */
+ ret_value = dest;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_mdci__copy() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__mdci_size
+ *
+ * Purpose: Returns the size of the raw message in bytes not counting
+ * the message type or size fields, but only the data fields.
+ * This function doesn't take into account alignment.
+ *
+ * Return: Success: Message data size in bytes without alignment.
+ *
+ * Failure: zero
+ *
+ * Programmer: John Mainzer
+ * 6/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static size_t
+H5O__mdci_size(const H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared,
+ const void H5_ATTR_UNUSED *_mesg)
+{
+ size_t ret_value = 0; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Set return value */
+ ret_value = (size_t)( 1 + /* Version number */
+ H5F_SIZEOF_ADDR(f) + /* addr of metadata cache */
+ /* image block */
+ H5F_SIZEOF_SIZE(f) ); /* length of metadata cache */
+ /* image block */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__mdci_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__mdci_free
+ *
+ * Purpose: Free the message
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__mdci_free(void *mesg)
+{
+ FUNC_ENTER_STATIC_NOERR
+
+ HDassert(mesg);
+
+ mesg = H5FL_FREE(H5O_mdci_t, mesg);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5O__mdci_free() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__mdci_delete
+ *
+ * Purpose: Free file space referenced by message
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Quincey Koziol
+ * Wednesday, March 19, 2003
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__mdci_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh, void *_mesg)
+{
+ H5O_mdci_t *mesg = (H5O_mdci_t *)_mesg;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* check args */
+ HDassert(f);
+ HDassert(open_oh);
+ HDassert(mesg);
+
+ /* Free file space for cache image */
+ if(H5F_addr_defined(mesg->addr)) {
+ /* The space for the cache image block was allocated directly
+ * from the VFD layer at the end of file. As this was the
+ * last file space allocation before shutdown, the cache image
+ * should still be the last item in the file.
+ *
+ * If the hack to work around the self referential free space
+ * manager issue is in use, file space for the non-empty self
+ * referential free space managers was also allocated from VFD
+ * layer at the end of file. Since these allocations directly
+ * preceeded the cache image allocation they should be directly
+ * adjacent to the cache image block at the end of file.
+ *
+ * In this case, just call H5MF_tidy_self_referential_fsm_hack().
+ *
+ * That routine will float the self referential free space
+ * managers, and reduce the eoa to its value just prior to
+ * allocation of space for same. Since the cache image appears
+ * just after the self referential free space managers, this
+ * will release the file space for the cache image as well.
+ *
+ * Note that in this case, there must not have been any file
+ * space allocations / deallocations prior to the free of the
+ * cache image. Verify this to the extent possible.
+ *
+ * If the hack to work around the persistant self referential
+ * free space manager issue is NOT in use, just call H5MF_xfree()
+ * to release the cache iamge. In principle, we should be able
+ * to just reduce the EOA to the base address of the cache
+ * image block, as there shouldn't be any file space allocation
+ * before the first metadata cache access. However, given
+ * time constraints, I don't want to go there now.
+ */
+ if(H5F_FIRST_ALLOC_DEALLOC(f)) {
+ HDassert(HADDR_UNDEF !=H5F_EOA_PRE_FSM_FSALLOC(f));
+ HDassert(H5F_addr_ge(mesg->addr, H5F_EOA_PRE_FSM_FSALLOC(f)));
+ if(H5MF_tidy_self_referential_fsm_hack(f, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "tidy of self referential fsm hack failed")
+ } /* end if */
+ else {
+ if(H5MF_xfree(f, H5FD_MEM_SUPER, dxpl_id, mesg->addr, mesg->size) < 0)
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTFREE, FAIL, "unable to free file space for cache image block")
+ } /* end else */
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O__mdci_delete() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5O__mdci_debug
+ *
+ * Purpose: Prints debugging info.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: John Mainzer
+ * 6/22/15
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5O__mdci_debug(H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id,
+ const void *_mesg, FILE * stream, int indent, int fwidth)
+{
+ const H5O_mdci_t *mdci = (const H5O_mdci_t *) _mesg;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* check args */
+ HDassert(f);
+ HDassert(mdci);
+ HDassert(stream);
+ HDassert(indent >= 0);
+ HDassert(fwidth >= 0);
+
+ HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
+ "Metadata Cache Image Block address:", mdci->addr);
+
+ HDfprintf(stream, "%*s%-*s %Hu\n", indent, "", fwidth,
+ "Metadata Cache Image Block size in bytes:", mdci->size);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5O__mdci_debug() */
+
diff --git a/src/H5Ochunk.c b/src/H5Ochunk.c
index 50be171..dbc894c 100644
--- a/src/H5Ochunk.c
+++ b/src/H5Ochunk.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Ocont.c b/src/H5Ocont.c
index 63002c5..b002a32 100644
--- a/src/H5Ocont.c
+++ b/src/H5Ocont.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Ocopy.c b/src/H5Ocopy.c
index ddf375c..597af63 100644
--- a/src/H5Ocopy.c
+++ b/src/H5Ocopy.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Odbg.c b/src/H5Odbg.c
index 827f40c..483c5fd 100644
--- a/src/H5Odbg.c
+++ b/src/H5Odbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Odrvinfo.c b/src/H5Odrvinfo.c
index 2fdf494..b9dea26 100644
--- a/src/H5Odrvinfo.c
+++ b/src/H5Odrvinfo.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/src/H5Odtype.c b/src/H5Odtype.c
index 799f475..a1c24b6 100644
--- a/src/H5Odtype.c
+++ b/src/H5Odtype.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Omodule.h" /* This source code file is part of the H5O module */
@@ -1446,9 +1444,10 @@ H5O_dtype_set_share(void *_mesg/*in,out*/, const H5O_shared_t *sh)
dt->shared->state = H5T_STATE_NAMED;
/* Set up the object location for the datatype also */
+ if(H5O_loc_reset(&(dt->oloc)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to reset location")
dt->oloc.file = sh->file;
- dt->oloc.addr = sh->u.loc.oh_addr;;
- dt->oloc.holding_file = FALSE;
+ dt->oloc.addr = sh->u.loc.oh_addr;
} /* end if */
done:
@@ -1611,18 +1610,22 @@ H5O_dtype_shared_post_copy_upd(const H5O_loc_t H5_ATTR_UNUSED *src_oloc,
hid_t H5_ATTR_UNUSED dxpl_id, H5O_copy_t H5_ATTR_UNUSED *cpy_info)
{
H5T_t *dt_dst = (H5T_t *)mesg_dst; /* Destination datatype */
+ herr_t ret_value = SUCCEED; /* Return value */
- FUNC_ENTER_NOAPI_NOINIT_NOERR
+ FUNC_ENTER_NOAPI_NOINIT
if(dt_dst->sh_loc.type == H5O_SHARE_TYPE_COMMITTED) {
HDassert(H5T_committed(dt_dst));
+ if(H5O_loc_reset(&(dt_dst->oloc)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to reset location")
dt_dst->oloc.file = dt_dst->sh_loc.file;
dt_dst->oloc.addr = dt_dst->sh_loc.u.loc.oh_addr;
} /* end if */
else
HDassert(!H5T_committed(dt_dst));
- FUNC_LEAVE_NOAPI(SUCCEED)
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O_dtype_shared_post_copy_upd */
@@ -1712,62 +1715,64 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_NO_CLASS:
case H5T_NCLASSES:
default:
- sprintf(buf, "H5T_CLASS_%d", (int)(dt->shared->type));
+ HDsprintf(buf, "H5T_CLASS_%d", (int)(dt->shared->type));
s = buf;
break;
} /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
"Type class:",
s);
- fprintf(stream, "%*s%-*s %lu byte%s\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %lu byte%s\n", indent, "", fwidth,
"Size:",
(unsigned long)(dt->shared->size), 1 == dt->shared->size ? "" : "s");
- fprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
"Version:", dt->shared->version);
if (H5T_COMPOUND == dt->shared->type) {
- fprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
- "Number of members:",
- dt->shared->u.compnd.nmembs);
- for(i = 0; i < dt->shared->u.compnd.nmembs; i++) {
- sprintf(buf, "Member %u:", i);
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
- buf,
- dt->shared->u.compnd.memb[i].name);
- fprintf(stream, "%*s%-*s %lu\n", indent+3, "", MAX(0, fwidth-3),
- "Byte offset:",
- (unsigned long)(dt->shared->u.compnd.memb[i].offset));
- H5O_dtype_debug(f, dxpl_id, dt->shared->u.compnd.memb[i].type, stream,
- indent + 3, MAX(0, fwidth - 3));
- }
- } else if(H5T_ENUM == dt->shared->type) {
- fprintf(stream, "%*s%s\n", indent, "", "Base type:");
- H5O_dtype_debug(f, dxpl_id, dt->shared->parent, stream, indent+3, MAX(0, fwidth-3));
- fprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
- "Number of members:",
- dt->shared->u.enumer.nmembs);
- for(i = 0; i < dt->shared->u.enumer.nmembs; i++) {
- sprintf(buf, "Member %u:", i);
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
- buf,
- dt->shared->u.enumer.name[i]);
- fprintf(stream, "%*s%-*s 0x", indent, "", fwidth,
- "Raw bytes of value:");
- for(k = 0; k < dt->shared->parent->shared->size; k++)
- fprintf(stream, "%02x",
- dt->shared->u.enumer.value[i*dt->shared->parent->shared->size + k]);
- fprintf(stream, "\n");
- } /* end for */
-
- } else if(H5T_OPAQUE == dt->shared->type) {
- fprintf(stream, "%*s%-*s \"%s\"\n", indent, "", fwidth,
- "Tag:", dt->shared->u.opaque.tag);
- } else if(H5T_REFERENCE == dt->shared->type) {
- fprintf(stream, "%*s%-*s\n", indent, "", fwidth,
- "Fix dumping reference types!");
- } else if(H5T_STRING == dt->shared->type) {
+ HDfprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
+ "Number of members:",
+ dt->shared->u.compnd.nmembs);
+ for(i = 0; i < dt->shared->u.compnd.nmembs; i++) {
+ HDsprintf(buf, "Member %u:", i);
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ buf,
+ dt->shared->u.compnd.memb[i].name);
+ HDfprintf(stream, "%*s%-*s %lu\n", indent+3, "", MAX(0, fwidth-3),
+ "Byte offset:",
+ (unsigned long)(dt->shared->u.compnd.memb[i].offset));
+ H5O_dtype_debug(f, dxpl_id, dt->shared->u.compnd.memb[i].type, stream,
+ indent + 3, MAX(0, fwidth - 3));
+ } /* end for */
+ } /* end if */
+ else if(H5T_ENUM == dt->shared->type) {
+ HDfprintf(stream, "%*s%s\n", indent, "", "Base type:");
+ H5O_dtype_debug(f, dxpl_id, dt->shared->parent, stream, indent+3, MAX(0, fwidth-3));
+ HDfprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
+ "Number of members:",
+ dt->shared->u.enumer.nmembs);
+ for(i = 0; i < dt->shared->u.enumer.nmembs; i++) {
+ HDsprintf(buf, "Member %u:", i);
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ buf,
+ dt->shared->u.enumer.name[i]);
+ HDfprintf(stream, "%*s%-*s 0x", indent, "", fwidth,
+ "Raw bytes of value:");
+ for(k = 0; k < dt->shared->parent->shared->size; k++)
+ HDfprintf(stream, "%02x", dt->shared->u.enumer.value[i*dt->shared->parent->shared->size + k]);
+ HDfprintf(stream, "\n");
+ } /* end for */
+ } /* end else if */
+ else if(H5T_OPAQUE == dt->shared->type) {
+ HDfprintf(stream, "%*s%-*s \"%s\"\n", indent, "", fwidth,
+ "Tag:", dt->shared->u.opaque.tag);
+ } /* end else if */
+ else if(H5T_REFERENCE == dt->shared->type) {
+ HDfprintf(stream, "%*s%-*s\n", indent, "", fwidth,
+ "Fix dumping reference types!");
+ } /* end else if */
+ else if(H5T_STRING == dt->shared->type) {
switch(dt->shared->u.atomic.u.s.cset) {
case H5T_CSET_ASCII:
s = "ASCII";
@@ -1791,17 +1796,17 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_CSET_RESERVED_13:
case H5T_CSET_RESERVED_14:
case H5T_CSET_RESERVED_15:
- sprintf(buf, "H5T_CSET_RESERVED_%d", (int)(dt->shared->u.atomic.u.s.cset));
+ HDsprintf(buf, "H5T_CSET_RESERVED_%d", (int)(dt->shared->u.atomic.u.s.cset));
s = buf;
break;
case H5T_CSET_ERROR:
default:
- sprintf(buf, "Unknown character set: %d", (int)(dt->shared->u.atomic.u.s.cset));
+ HDsprintf(buf, "Unknown character set: %d", (int)(dt->shared->u.atomic.u.s.cset));
s = buf;
break;
} /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
"Character Set:",
s);
@@ -1831,20 +1836,21 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_STR_RESERVED_13:
case H5T_STR_RESERVED_14:
case H5T_STR_RESERVED_15:
- sprintf(buf, "H5T_STR_RESERVED_%d", (int)(dt->shared->u.atomic.u.s.pad));
+ HDsprintf(buf, "H5T_STR_RESERVED_%d", (int)(dt->shared->u.atomic.u.s.pad));
s = buf;
break;
case H5T_STR_ERROR:
default:
- sprintf(buf, "Unknown string padding: %d", (int)(dt->shared->u.atomic.u.s.pad));
+ HDsprintf(buf, "Unknown string padding: %d", (int)(dt->shared->u.atomic.u.s.pad));
s = buf;
break;
} /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
"String Padding:",
s);
- } else if(H5T_VLEN == dt->shared->type) {
+ } /* end else if */
+ else if(H5T_VLEN == dt->shared->type) {
switch(dt->shared->u.vlen.type) {
case H5T_VLEN_SEQUENCE:
s = "sequence";
@@ -1857,11 +1863,11 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_VLEN_BADTYPE:
case H5T_VLEN_MAXTYPE:
default:
- sprintf(buf, "H5T_VLEN_%d", dt->shared->u.vlen.type);
+ HDsprintf(buf, "H5T_VLEN_%d", dt->shared->u.vlen.type);
s = buf;
break;
} /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
"Vlen type:", s);
switch(dt->shared->u.vlen.loc) {
@@ -1876,11 +1882,11 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_LOC_BADLOC:
case H5T_LOC_MAXLOC:
default:
- sprintf(buf, "H5T_LOC_%d", (int)dt->shared->u.vlen.loc);
+ HDsprintf(buf, "H5T_LOC_%d", (int)dt->shared->u.vlen.loc);
s = buf;
break;
} /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
"Location:", s);
/* Extra information for VL-strings */
@@ -1908,17 +1914,17 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_CSET_RESERVED_13:
case H5T_CSET_RESERVED_14:
case H5T_CSET_RESERVED_15:
- sprintf(buf, "H5T_CSET_RESERVED_%d", (int)(dt->shared->u.vlen.cset));
+ HDsprintf(buf, "H5T_CSET_RESERVED_%d", (int)(dt->shared->u.vlen.cset));
s = buf;
break;
case H5T_CSET_ERROR:
default:
- sprintf(buf, "Unknown character set: %d", (int)(dt->shared->u.vlen.cset));
+ HDsprintf(buf, "Unknown character set: %d", (int)(dt->shared->u.vlen.cset));
s = buf;
break;
} /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
"Character Set:",
s);
@@ -1948,32 +1954,34 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_STR_RESERVED_13:
case H5T_STR_RESERVED_14:
case H5T_STR_RESERVED_15:
- sprintf(buf, "H5T_STR_RESERVED_%d", (int)(dt->shared->u.vlen.pad));
+ HDsprintf(buf, "H5T_STR_RESERVED_%d", (int)(dt->shared->u.vlen.pad));
s = buf;
break;
case H5T_STR_ERROR:
default:
- sprintf(buf, "Unknown string padding: %d", (int)(dt->shared->u.vlen.pad));
+ HDsprintf(buf, "Unknown string padding: %d", (int)(dt->shared->u.vlen.pad));
s = buf;
break;
} /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
"String Padding:",
s);
} /* end if */
- } else if(H5T_ARRAY == dt->shared->type) {
- fprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
- "Rank:",
- dt->shared->u.array.ndims);
- fprintf(stream, "%*s%-*s {", indent, "", fwidth, "Dim Size:");
+ } /* end else if */
+ else if(H5T_ARRAY == dt->shared->type) {
+ HDfprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
+ "Rank:",
+ dt->shared->u.array.ndims);
+ HDfprintf(stream, "%*s%-*s {", indent, "", fwidth, "Dim Size:");
for(i = 0; i < dt->shared->u.array.ndims; i++)
- fprintf(stream, "%s%u", (i ? ", " : ""), (unsigned)dt->shared->u.array.dim[i]);
- fprintf(stream, "}\n");
- fprintf(stream, "%*s%s\n", indent, "", "Base type:");
- H5O_dtype_debug(f, dxpl_id, dt->shared->parent, stream, indent + 3, MAX(0, fwidth - 3));
- } else {
- switch (dt->shared->u.atomic.order) {
+ HDfprintf(stream, "%s%u", (i ? ", " : ""), (unsigned)dt->shared->u.array.dim[i]);
+ HDfprintf(stream, "}\n");
+ HDfprintf(stream, "%*s%s\n", indent, "", "Base type:");
+ H5O_dtype_debug(f, dxpl_id, dt->shared->parent, stream, indent + 3, MAX(0, fwidth - 3));
+ } /* end else if */
+ else {
+ switch (dt->shared->u.atomic.order) {
case H5T_ORDER_LE:
s = "little endian";
break;
@@ -1996,25 +2004,25 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_ORDER_ERROR:
default:
- sprintf(buf, "H5T_ORDER_%d", dt->shared->u.atomic.order);
+ HDsprintf(buf, "H5T_ORDER_%d", dt->shared->u.atomic.order);
s = buf;
break;
- } /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
- "Byte order:",
- s);
-
- fprintf(stream, "%*s%-*s %lu bit%s\n", indent, "", fwidth,
- "Precision:",
- (unsigned long)(dt->shared->u.atomic.prec),
- 1==dt->shared->u.atomic.prec?"":"s");
-
- fprintf(stream, "%*s%-*s %lu bit%s\n", indent, "", fwidth,
- "Offset:",
- (unsigned long)(dt->shared->u.atomic.offset),
- 1==dt->shared->u.atomic.offset?"":"s");
-
- switch (dt->shared->u.atomic.lsb_pad) {
+ } /* end switch */
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Byte order:",
+ s);
+
+ HDfprintf(stream, "%*s%-*s %lu bit%s\n", indent, "", fwidth,
+ "Precision:",
+ (unsigned long)(dt->shared->u.atomic.prec),
+ 1==dt->shared->u.atomic.prec?"":"s");
+
+ HDfprintf(stream, "%*s%-*s %lu bit%s\n", indent, "", fwidth,
+ "Offset:",
+ (unsigned long)(dt->shared->u.atomic.offset),
+ 1==dt->shared->u.atomic.offset?"":"s");
+
+ switch (dt->shared->u.atomic.lsb_pad) {
case H5T_PAD_ZERO:
s = "zero";
break;
@@ -2032,11 +2040,11 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
default:
s = "pad?";
break;
- } /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
- "Low pad type:", s);
+ } /* end switch */
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Low pad type:", s);
- switch (dt->shared->u.atomic.msb_pad) {
+ switch (dt->shared->u.atomic.msb_pad) {
case H5T_PAD_ZERO:
s = "zero";
break;
@@ -2054,12 +2062,12 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
default:
s = "pad?";
break;
- } /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
- "High pad type:", s);
+ } /* end switch */
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "High pad type:", s);
- if (H5T_FLOAT == dt->shared->type) {
- switch (dt->shared->u.atomic.u.f.pad) {
+ if (H5T_FLOAT == dt->shared->type) {
+ switch (dt->shared->u.atomic.u.f.pad) {
case H5T_PAD_ZERO:
s = "zero";
break;
@@ -2076,16 +2084,16 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_NPAD:
default:
if (dt->shared->u.atomic.u.f.pad < 0)
- sprintf(buf, "H5T_PAD_%d", -(dt->shared->u.atomic.u.f.pad));
+ HDsprintf(buf, "H5T_PAD_%d", -(dt->shared->u.atomic.u.f.pad));
else
- sprintf(buf, "bit-%d", dt->shared->u.atomic.u.f.pad);
+ HDsprintf(buf, "bit-%d", dt->shared->u.atomic.u.f.pad);
s = buf;
break;
- } /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
- "Internal pad type:", s);
+ } /* end switch */
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Internal pad type:", s);
- switch (dt->shared->u.atomic.u.f.norm) {
+ switch (dt->shared->u.atomic.u.f.norm) {
case H5T_NORM_IMPLIED:
s = "implied";
break;
@@ -2100,38 +2108,39 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_NORM_ERROR:
default:
- sprintf(buf, "H5T_NORM_%d", (int) (dt->shared->u.atomic.u.f.norm));
+ HDsprintf(buf, "H5T_NORM_%d", (int) (dt->shared->u.atomic.u.f.norm));
s = buf;
- } /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
- "Normalization:", s);
+ } /* end switch */
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Normalization:", s);
- fprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
- "Sign bit location:",
- (unsigned long) (dt->shared->u.atomic.u.f.sign));
+ HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
+ "Sign bit location:",
+ (unsigned long) (dt->shared->u.atomic.u.f.sign));
- fprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
- "Exponent location:",
- (unsigned long) (dt->shared->u.atomic.u.f.epos));
+ HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
+ "Exponent location:",
+ (unsigned long) (dt->shared->u.atomic.u.f.epos));
- fprintf(stream, "%*s%-*s 0x%08lx\n", indent, "", fwidth,
- "Exponent bias:",
- (unsigned long) (dt->shared->u.atomic.u.f.ebias));
+ HDfprintf(stream, "%*s%-*s 0x%08lx\n", indent, "", fwidth,
+ "Exponent bias:",
+ (unsigned long) (dt->shared->u.atomic.u.f.ebias));
- fprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
- "Exponent size:",
- (unsigned long) (dt->shared->u.atomic.u.f.esize));
+ HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
+ "Exponent size:",
+ (unsigned long) (dt->shared->u.atomic.u.f.esize));
- fprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
- "Mantissa location:",
- (unsigned long) (dt->shared->u.atomic.u.f.mpos));
+ HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
+ "Mantissa location:",
+ (unsigned long) (dt->shared->u.atomic.u.f.mpos));
- fprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
- "Mantissa size:",
- (unsigned long) (dt->shared->u.atomic.u.f.msize));
+ HDfprintf(stream, "%*s%-*s %lu\n", indent, "", fwidth,
+ "Mantissa size:",
+ (unsigned long) (dt->shared->u.atomic.u.f.msize));
- } else if (H5T_INTEGER == dt->shared->type) {
- switch (dt->shared->u.atomic.u.i.sign) {
+ } /* end if */
+ else if (H5T_INTEGER == dt->shared->type) {
+ switch (dt->shared->u.atomic.u.i.sign) {
case H5T_SGN_NONE:
s = "none";
break;
@@ -2143,14 +2152,14 @@ H5O_dtype_debug(H5F_t *f, hid_t dxpl_id, const void *mesg, FILE *stream,
case H5T_SGN_ERROR:
case H5T_NSGN:
default:
- sprintf(buf, "H5T_SGN_%d", (int) (dt->shared->u.atomic.u.i.sign));
+ HDsprintf(buf, "H5T_SGN_%d", (int) (dt->shared->u.atomic.u.i.sign));
s = buf;
break;
- } /* end switch */
- fprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
- "Sign scheme:", s);
- }
- }
+ } /* end switch */
+ HDfprintf(stream, "%*s%-*s %s\n", indent, "", fwidth,
+ "Sign scheme:", s);
+ } /* end else if */
+ } /* end else */
FUNC_LEAVE_NOAPI(SUCCEED)
} /* end H5O_dtype_debug() */
diff --git a/src/H5Oefl.c b/src/H5Oefl.c
index 149c8b2..0456b00 100644
--- a/src/H5Oefl.c
+++ b/src/H5Oefl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Ofill.c b/src/H5Ofill.c
index 745d027..5419762 100644
--- a/src/H5Ofill.c
+++ b/src/H5Ofill.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/src/H5Oflush.c b/src/H5Oflush.c
index e399c6c..2d93221 100644
--- a/src/H5Oflush.c
+++ b/src/H5Oflush.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Ofsinfo.c b/src/H5Ofsinfo.c
index 1712857..89dc8ee 100644
--- a/src/H5Ofsinfo.c
+++ b/src/H5Ofsinfo.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -19,16 +17,17 @@
* Feb 2009
* Vailin Choi
*
- * Purpose: Free space manager info message.
+ * Purpose: File space info message.
*
*-------------------------------------------------------------------------
*/
-
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
#include "H5Omodule.h" /* This source code file is part of the H5O module */
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* File access */
#include "H5FLprivate.h" /* Free lists */
#include "H5Opkg.h" /* Object headers */
@@ -66,7 +65,8 @@ const H5O_msg_class_t H5O_MSG_FSINFO[1] = {{
}};
/* Current version of free-space manager info information */
-#define H5O_FSINFO_VERSION 0
+#define H5O_FSINFO_VERSION_0 0
+#define H5O_FSINFO_VERSION_1 1
/* Declare a free list to manage the H5O_fsinfo_t struct */
H5FL_DEFINE_STATIC(H5O_fsinfo_t);
@@ -85,12 +85,13 @@ H5FL_DEFINE_STATIC(H5O_fsinfo_t);
*-------------------------------------------------------------------------
*/
static void *
-H5O_fsinfo_decode(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, H5O_t H5_ATTR_UNUSED *open_oh,
+H5O_fsinfo_decode(H5F_t *f, hid_t dxpl_id, H5O_t H5_ATTR_UNUSED *open_oh,
unsigned H5_ATTR_UNUSED mesg_flags, unsigned H5_ATTR_UNUSED *ioflags, const uint8_t *p)
{
- H5O_fsinfo_t *fsinfo = NULL; /* free-space manager info */
- H5FD_mem_t type; /* Memory type for iteration */
- void *ret_value = NULL; /* Return value */
+ H5O_fsinfo_t *fsinfo = NULL; /* File space info message */
+ H5F_mem_page_t ptype; /* Memory type for iteration */
+ unsigned vers; /* message version */
+ void *ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -98,26 +99,83 @@ H5O_fsinfo_decode(H5F_t *f, hid_t H5_ATTR_UNUSED dxpl_id, H5O_t H5_ATTR_UNUSED *
HDassert(f);
HDassert(p);
- /* Version of message */
- if(*p++ != H5O_FSINFO_VERSION)
- HGOTO_ERROR(H5E_OHDR, H5E_CANTLOAD, NULL, "bad version number for message")
-
/* Allocate space for message */
if(NULL == (fsinfo = H5FL_CALLOC(H5O_fsinfo_t)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
- fsinfo->strategy = (H5F_file_space_type_t)*p++; /* file space strategy */
- H5F_DECODE_LENGTH(f, p, fsinfo->threshold); /* free space section size threshold */
+ for(ptype = H5F_MEM_PAGE_SUPER; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ fsinfo->fs_addr[ptype - 1] = HADDR_UNDEF;
- /* Addresses of free space managers: only exist for H5F_FILE_SPACE_ALL_PERSIST */
- if(fsinfo->strategy == H5F_FILE_SPACE_ALL_PERSIST) {
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
- H5F_addr_decode(f, &p, &(fsinfo->fs_addr[type-1]));
- } /* end if */
- else {
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
- fsinfo->fs_addr[type-1] = HADDR_UNDEF;
- } /* end else */
+ /* Version of message */
+ vers = *p++;
+
+ if(vers == H5O_FSINFO_VERSION_0) {
+ H5F_file_space_type_t strategy; /* Strategy */
+ hsize_t threshold; /* Threshold */
+ H5FD_mem_t type; /* Memory type for iteration */
+
+ fsinfo->persist = H5F_FREE_SPACE_PERSIST_DEF;
+ fsinfo->threshold = H5F_FREE_SPACE_THRESHOLD_DEF;
+ fsinfo->page_size = H5F_FILE_SPACE_PAGE_SIZE_DEF;
+ fsinfo->pgend_meta_thres = H5F_FILE_SPACE_PGEND_META_THRES;
+ fsinfo->eoa_pre_fsm_fsalloc = HADDR_UNDEF;
+
+ strategy = (H5F_file_space_type_t)*p++; /* File space strategy */
+ H5F_DECODE_LENGTH(f, p, threshold); /* Free-space section threshold */
+
+ /* Map version 0 (deprecated) to version 1 message */
+ switch(strategy) {
+
+ case H5F_FILE_SPACE_ALL_PERSIST:
+ fsinfo->strategy = H5F_FSPACE_STRATEGY_FSM_AGGR;
+ fsinfo->persist = TRUE;
+ fsinfo->threshold = threshold;
+ if(HADDR_UNDEF == (fsinfo->eoa_pre_fsm_fsalloc = H5F_get_eoa(f, H5FD_MEM_DEFAULT)) )
+ HGOTO_ERROR(H5E_FILE, H5E_CANTGET, NULL, "unable to get file size")
+ for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
+ H5F_addr_decode(f, &p, &(fsinfo->fs_addr[type-1]));
+ break;
+
+ case H5F_FILE_SPACE_ALL:
+ fsinfo->strategy = H5F_FSPACE_STRATEGY_FSM_AGGR;
+ fsinfo->threshold = threshold;
+ break;
+
+ case H5F_FILE_SPACE_AGGR_VFD:
+ fsinfo->strategy = H5F_FSPACE_STRATEGY_AGGR;
+ break;
+
+ case H5F_FILE_SPACE_VFD:
+ fsinfo->strategy = H5F_FSPACE_STRATEGY_NONE;
+ break;
+
+ case H5F_FILE_SPACE_NTYPES:
+ case H5F_FILE_SPACE_DEFAULT:
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "invalid file space strategy")
+ } /* end switch */
+
+ fsinfo->mapped = TRUE;
+
+ } else {
+ HDassert(vers == H5O_FSINFO_VERSION_1);
+
+ fsinfo->strategy = (H5F_fspace_strategy_t)*p++; /* File space strategy */
+ fsinfo->persist = *p++; /* Free-space persist or not */
+ H5F_DECODE_LENGTH(f, p, fsinfo->threshold); /* Free-space section threshold */
+
+ H5F_DECODE_LENGTH(f, p, fsinfo->page_size); /* File space page size */
+ UINT16DECODE(p, fsinfo->pgend_meta_thres); /* Page end metdata threshold */
+ H5F_addr_decode(f, &p, &(fsinfo->eoa_pre_fsm_fsalloc)); /* EOA before free-space header and section info */
+
+ /* Decode addresses of free space managers, if persisting */
+ if(fsinfo->persist) {
+ for(ptype = H5F_MEM_PAGE_SUPER; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ H5F_addr_decode(f, &p, &(fsinfo->fs_addr[ptype - 1]));
+ } /* end if */
+
+ fsinfo->mapped = FALSE;
+ }
/* Set return value */
ret_value = fsinfo;
@@ -145,7 +203,7 @@ static herr_t
H5O_fsinfo_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, uint8_t *p, const void *_mesg)
{
const H5O_fsinfo_t *fsinfo = (const H5O_fsinfo_t *)_mesg;
- H5FD_mem_t type; /* Memory type for iteration */
+ H5F_mem_page_t ptype; /* Memory type for iteration */
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -154,14 +212,20 @@ H5O_fsinfo_encode(H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, uint8_t *p, c
HDassert(p);
HDassert(fsinfo);
- *p++ = H5O_FSINFO_VERSION; /* message version */
- *p++ = fsinfo->strategy; /* file space strategy */
- H5F_ENCODE_LENGTH(f, p, fsinfo->threshold); /* free-space section size threshold */
+ *p++ = H5O_FSINFO_VERSION_1; /* message version */
+ *p++ = fsinfo->strategy; /* File space strategy */
+ *p++ = (unsigned char)fsinfo->persist; /* Free-space persist or not */
+ H5F_ENCODE_LENGTH(f, p, fsinfo->threshold); /* Free-space section size threshold */
+
+ H5F_ENCODE_LENGTH(f, p, fsinfo->page_size); /* File space page size */
+ UINT16ENCODE(p, fsinfo->pgend_meta_thres); /* Page end metadata threshold */
+ H5F_addr_encode(f, &p, fsinfo->eoa_pre_fsm_fsalloc); /* EOA before free-space header and section info */
- /* Addresses of free space managers: only exist for H5F_FILE_SPACE_ALL_PERSIST */
- if(fsinfo->strategy == H5F_FILE_SPACE_ALL_PERSIST) {
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
- H5F_addr_encode(f, &p, fsinfo->fs_addr[type-1]);
+ /* Store addresses of free-space managers, if persisting */
+ if(fsinfo->persist) {
+ /* Addresses of free-space managers */
+ for(ptype = H5F_MEM_PAGE_SUPER; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ H5F_addr_encode(f, &p, fsinfo->fs_addr[ptype - 1]);
} /* end if */
FUNC_LEAVE_NOAPI(SUCCEED)
@@ -224,19 +288,19 @@ static size_t
H5O_fsinfo_size(const H5F_t *f, hbool_t H5_ATTR_UNUSED disable_shared, const void *_mesg)
{
const H5O_fsinfo_t *fsinfo = (const H5O_fsinfo_t *)_mesg;
- size_t fs_addr_size = 0;
size_t ret_value = 0; /* Return value */
FUNC_ENTER_NOAPI_NOINIT_NOERR
-
- /* Addresses of free-space managers exist only for H5F_FILE_SPACE_ALL_PERSIST type */
- if(H5F_FILE_SPACE_ALL_PERSIST == fsinfo->strategy)
- fs_addr_size = (H5FD_MEM_NTYPES - 1) * (size_t)H5F_SIZEOF_ADDR(f);
-
- ret_value = 2 /* Version & strategy */
- + (size_t)H5F_SIZEOF_SIZE(f) /* Threshold */
- + fs_addr_size; /* Addresses of free-space managers */
+ ret_value = 3 /* Version, strategy & persist */
+ + (size_t)H5F_SIZEOF_SIZE(f) /* Free-space section threshold */
+ + (size_t)H5F_SIZEOF_SIZE(f) /* File space page size */
+ + 2 /* Page end meta threshold */
+ + (size_t)H5F_SIZEOF_ADDR(f);
+
+ /* Free-space manager addresses */
+ if(fsinfo->persist)
+ ret_value += (H5F_MEM_PAGE_NTYPES - 1) * (size_t)H5F_SIZEOF_ADDR(f);
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O_fsinfo_size() */
@@ -282,7 +346,7 @@ H5O_fsinfo_debug(H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id, const vo
int indent, int fwidth)
{
const H5O_fsinfo_t *fsinfo = (const H5O_fsinfo_t *) _mesg;
- H5FD_mem_t type; /* Memory type for iteration */
+ H5F_mem_page_t ptype; /* Free-space types for iteration */
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -293,16 +357,48 @@ H5O_fsinfo_debug(H5F_t H5_ATTR_UNUSED *f, hid_t H5_ATTR_UNUSED dxpl_id, const vo
HDassert(indent >= 0);
HDassert(fwidth >= 0);
- HDfprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
- "File space strategy:", fsinfo->strategy);
+ HDfprintf(stream, "%*s%-*s ", indent, "", fwidth, "File space strategy:");
+ switch(fsinfo->strategy) {
+ case H5F_FSPACE_STRATEGY_FSM_AGGR:
+ HDfprintf(stream, "%s\n", "H5F_FSPACE_STRATEGY_FSM_AGGR");
+ break;
+
+ case H5F_FSPACE_STRATEGY_PAGE:
+ HDfprintf(stream, "%s\n", "H5F_FSPACE_STRATEGY_PAGE");
+ break;
+
+ case H5F_FSPACE_STRATEGY_AGGR:
+ HDfprintf(stream, "%s\n", "H5F_FSPACE_STRATEGY_AGGR");
+ break;
+
+ case H5F_FSPACE_STRATEGY_NONE:
+ HDfprintf(stream, "%s\n", "H5F_FSPACE_STRATEGY_NONE");
+ break;
+
+ case H5F_FSPACE_STRATEGY_NTYPES:
+ default:
+ HDfprintf(stream, "%s\n", "unknown");
+ } /* end switch */
+
+ HDfprintf(stream, "%*s%-*s %t\n", indent, "", fwidth,
+ "Free-space persist:", fsinfo->persist);
HDfprintf(stream, "%*s%-*s %Hu\n", indent, "", fwidth,
- "Free space section threshold:", fsinfo->threshold);
+ "Free-space section threshold:", fsinfo->threshold);
+
+ HDfprintf(stream, "%*s%-*s %Hu\n", indent, "", fwidth,
+ "File space page size:", fsinfo->page_size);
+
+ HDfprintf(stream, "%*s%-*s %u\n", indent, "", fwidth,
+ "Page end metadata threshold:", fsinfo->pgend_meta_thres);
+
+ HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
+ "eoa_pre_fsm_fsalloc:", fsinfo->eoa_pre_fsm_fsalloc);
- if(fsinfo->strategy == H5F_FILE_SPACE_ALL_PERSIST) {
- for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type))
- HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
- "Free space manager address:", fsinfo->fs_addr[type-1]);
+ if(fsinfo->persist) {
+ for(ptype = H5F_MEM_PAGE_SUPER; ptype < H5F_MEM_PAGE_NTYPES; H5_INC_ENUM(H5F_mem_page_t, ptype))
+ HDfprintf(stream, "%*s%-*s %a\n", indent, "", fwidth,
+ "Free space manager address:", fsinfo->fs_addr[ptype-1]);
} /* end if */
FUNC_LEAVE_NOAPI(SUCCEED)
diff --git a/src/H5Oginfo.c b/src/H5Oginfo.c
index 9cd0dc1..468e07a 100644
--- a/src/H5Oginfo.c
+++ b/src/H5Oginfo.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Olayout.c b/src/H5Olayout.c
index b7a2584..838a80f 100644
--- a/src/H5Olayout.c
+++ b/src/H5Olayout.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/src/H5Olinfo.c b/src/H5Olinfo.c
index 62e63d4..cac4ed1 100644
--- a/src/H5Olinfo.c
+++ b/src/H5Olinfo.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Olink.c b/src/H5Olink.c
index fd4ee88..77872ad 100644
--- a/src/H5Olink.c
+++ b/src/H5Olink.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Omessage.c b/src/H5Omessage.c
index ee21e49..158701b 100644
--- a/src/H5Omessage.c
+++ b/src/H5Omessage.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -1323,10 +1321,9 @@ done:
* object header, the header will be condensed after each
* message removal)
*/
- if(oh_modified & H5O_MODIFY_CONDENSE) {
+ if(oh_modified & H5O_MODIFY_CONDENSE)
if(H5O_condense_header(f, oh, dxpl_id) < 0)
HDONE_ERROR(H5E_OHDR, H5E_CANTPACK, FAIL, "can't pack object header")
- }
/* Mark object header as changed */
if(H5O_touch_oh(f, dxpl_id, oh, FALSE) < 0)
@@ -2254,3 +2251,55 @@ done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5O_flush_msgs() */
+
+/*-------------------------------------------------------------------------
+ * Function: H5O_msg_get_flags
+ *
+ * Purpose: Queries a message's message flags in the object header
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin; Jan 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5O_msg_get_flags(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id, uint8_t *flags)
+{
+ H5O_t *oh = NULL; /* Object header to use */
+ const H5O_msg_class_t *type; /* Actual H5O class type for the ID */
+ H5O_mesg_t *idx_msg; /* Pointer to message to modify */
+ unsigned idx; /* Index of message to modify */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* check args */
+ HDassert(loc);
+ HDassert(loc->file);
+ HDassert(H5F_addr_defined(loc->addr));
+ HDassert(type_id < NELMTS(H5O_msg_class_g));
+ type = H5O_msg_class_g[type_id]; /* map the type ID to the actual type object */
+ HDassert(type);
+
+ /* Get the object header */
+ if(NULL == (oh = H5O_protect(loc, dxpl_id, H5AC__READ_ONLY_FLAG, FALSE)))
+ HGOTO_ERROR(H5E_OHDR, H5E_CANTPROTECT, FAIL, "unable to protect object header")
+
+ /* Locate message of correct type */
+ for(idx = 0, idx_msg = &oh->mesg[0]; idx < oh->nmesgs; idx++, idx_msg++)
+ if(type == idx_msg->type)
+ break;
+
+ if(idx == oh->nmesgs)
+ HGOTO_ERROR(H5E_OHDR, H5E_NOTFOUND, FAIL, "message type not found")
+
+ /* Set return value */
+ *flags = idx_msg->flags;
+
+done:
+ if(oh && H5O_unprotect(loc, dxpl_id, oh, H5AC__NO_FLAGS_SET) < 0)
+ HDONE_ERROR(H5E_OHDR, H5E_CANTUNPROTECT, FAIL, "unable to release object header")
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5O_msg_get_flags() */
diff --git a/src/H5Omodule.h b/src/H5Omodule.h
index a8f1301..df3ab56 100644
--- a/src/H5Omodule.h
+++ b/src/H5Omodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Omtime.c b/src/H5Omtime.c
index c61fa66..7e7baea 100644
--- a/src/H5Omtime.c
+++ b/src/H5Omtime.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/src/H5Oname.c b/src/H5Oname.c
index 6c4f76f..6292883 100644
--- a/src/H5Oname.c
+++ b/src/H5Oname.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Onull.c b/src/H5Onull.c
index 258f695..5697455 100644
--- a/src/H5Onull.c
+++ b/src/H5Onull.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Opkg.h b/src/H5Opkg.h
index ef49535..b0c67d1 100644
--- a/src/H5Opkg.h
+++ b/src/H5Opkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#if !(defined H5O_FRIEND || defined H5O_MODULE)
@@ -31,7 +29,7 @@
#define H5O_NMESGS 8 /*initial number of messages */
#define H5O_NCHUNKS 2 /*initial number of chunks */
#define H5O_MIN_SIZE 22 /* Min. obj header data size (must be big enough for a message prefix and a continuation message) */
-#define H5O_MSG_TYPES 26 /* # of types of messages */
+#define H5O_MSG_TYPES 27 /* # of types of messages */
#define H5O_MAX_CRT_ORDER_IDX 65535 /* Max. creation order index value */
/* Versions of object header structure */
@@ -419,12 +417,6 @@ typedef struct H5O_chk_cache_ud_t {
} H5O_chk_cache_ud_t;
-/* H5O object header inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_OHDR[1];
-
-/* H5O object header chunk inherits cache-like properties from H5AC */
-H5_DLLVAR const H5AC_class_t H5AC_OHDR_CHK[1];
-
/* Header message ID to class mapping */
H5_DLLVAR const H5O_msg_class_t *const H5O_msg_class_g[H5O_MSG_TYPES];
@@ -544,7 +536,10 @@ H5_DLLVAR const H5O_msg_class_t H5O_MSG_REFCOUNT[1];
/* Free-space Manager Info message. (0x0017) */
H5_DLLVAR const H5O_msg_class_t H5O_MSG_FSINFO[1];
-/* Placeholder for unknown message. (0x0018) */
+/* Metadata Cache Image message. (0x0018) */
+H5_DLLVAR const H5O_msg_class_t H5O_MSG_MDCI[1];
+
+/* Placeholder for unknown message. (0x0019) */
H5_DLLVAR const H5O_msg_class_t H5O_MSG_UNKNOWN[1];
diff --git a/src/H5Opline.c b/src/H5Opline.c
index 95a82b5..2e52dbb 100644
--- a/src/H5Opline.c
+++ b/src/H5Opline.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Oprivate.h b/src/H5Oprivate.h
index e430b1f..0f798b2 100644
--- a/src/H5Oprivate.h
+++ b/src/H5Oprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -69,6 +67,7 @@ typedef struct H5O_t H5O_t;
#define H5O_FIRST (-2) /* Operate on first message of type */
/* Flags needed when encoding messages */
+#define H5O_MSG_NO_FLAGS_SET 0x00u
#define H5O_MSG_FLAG_CONSTANT 0x01u
#define H5O_MSG_FLAG_SHARED 0x02u
#define H5O_MSG_FLAG_DONTSHARE 0x04u
@@ -99,7 +98,7 @@ typedef struct H5O_t H5O_t;
#define H5O_BOGUS_MSG_FLAGS_NAME "bogus msg flags" /* Flags for 'bogus' message */
#define H5O_BOGUS_MSG_FLAGS_SIZE sizeof(uint8_t)
-/* bogus ID can be either (a) H5O_BOGUS_VALID_ID 0x0009 or (b) H5O_BOGUS_INVALID_ID 0x0019 */
+/* bogus ID can be either (a) H5O_BOGUS_VALID_ID or (b) H5O_BOGUS_INVALID_ID */
#define H5O_BOGUS_MSG_ID_NAME "bogus msg id" /* ID for 'bogus' message */
#define H5O_BOGUS_MSG_ID_SIZE sizeof(unsigned)
@@ -203,10 +202,19 @@ typedef struct H5O_copy_t {
#define H5O_DRVINFO_ID 0x0014 /* Driver info message. */
#define H5O_AINFO_ID 0x0015 /* Attribute info message. */
#define H5O_REFCOUNT_ID 0x0016 /* Reference count message. */
-#define H5O_FSINFO_ID 0x0017 /* Free-space manager info message. */
-#define H5O_UNKNOWN_ID 0x0018 /* Placeholder message ID for unknown message. */
+#define H5O_FSINFO_ID 0x0017 /* File space info message. */
+#define H5O_MDCI_MSG_ID 0x0018 /* Metadata Cache Image Message */
+#define H5O_UNKNOWN_ID 0x0019 /* Placeholder message ID for unknown message. */
/* (this should never exist in a file) */
-#define H5O_BOGUS_INVALID_ID 0x0019 /* "Bogus invalid" Message. */
+/*
+ * Note: Must increment H5O_MSG_TYPES in H5Opkg.h and update H5O_msg_class_g
+ * in H5O.c when creating a new message type. Also bump the value of
+ * H5O_BOGUS_INVALID_ID, below, to be one greater than the value of
+ * H5O_UNKNOWN_ID.
+ *
+ * (this should never exist in a file)
+ */
+#define H5O_BOGUS_INVALID_ID 0x001A /* "Bogus invalid" Message. */
/* Shared object message types.
* Shared objects can be committed, in which case the shared message contains
@@ -574,6 +582,7 @@ typedef struct H5O_layout_chunk_earray_t {
unsigned unlim_dim; /* Rank of unlimited dimension for dataset */
uint32_t swizzled_dim[H5O_LAYOUT_NDIMS]; /* swizzled chunk dimensions */
hsize_t swizzled_down_chunks[H5O_LAYOUT_NDIMS]; /* swizzled "down" size of number of chunks in each dimension */
+ hsize_t swizzled_max_down_chunks[H5O_LAYOUT_NDIMS]; /* swizzled max "down" size of number of chunks in each dimension */
} H5O_layout_chunk_earray_t;
typedef struct H5O_layout_chunk_bt2_t {
@@ -775,17 +784,34 @@ typedef uint32_t H5O_refcount_t; /* Contains # of links to object, if >1
typedef unsigned H5O_unknown_t; /* Original message type ID */
/*
- * Free space manager info Message.
+ * File space info Message.
* Contains file space management info and
* addresses of free space managers for file memory
* (Data structure in memory)
*/
typedef struct H5O_fsinfo_t {
- H5F_file_space_type_t strategy; /* File space strategy */
- hsize_t threshold; /* Free space section threshold */
- haddr_t fs_addr[H5FD_MEM_NTYPES-1]; /* Addresses of free space managers */
+ H5F_fspace_strategy_t strategy; /* File space strategy */
+ hbool_t persist; /* Persisting free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ hsize_t page_size; /* For paged aggregation: file space page size */
+ size_t pgend_meta_thres; /* For paged aggregation: page end metadata threshold */
+ haddr_t eoa_pre_fsm_fsalloc; /* For paged aggregation: the eoa before free-space headers & sinfo */
+ haddr_t fs_addr[H5F_MEM_PAGE_NTYPES - 1]; /* 13 addresses of free-space managers */
+ /* For non-paged aggregation: only 6 addresses are used */
+ hbool_t mapped; /* Not stored */
+ /* Indicate the message is mapped from version 0 to version 1 */
} H5O_fsinfo_t;
+/*
+ * Metadata Cache Image Message.
+ * Contains base address and length of the metadata cache image.
+ * (Data structure in memory)
+ */
+typedef struct H5O_mdci_t {
+ haddr_t addr; /* address of MDC image block */
+ hsize_t size; /* size of MDC image block */
+} H5O_mdci_t;
+
/* Typedef for "application" iteration operations */
typedef herr_t (*H5O_operator_t)(const void *mesg/*in*/, unsigned idx,
void *operator_data/*in,out*/);
@@ -900,6 +926,7 @@ H5_DLL void* H5O_msg_decode(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
unsigned type_id, const unsigned char *buf);
H5_DLL herr_t H5O_msg_delete(H5F_t *f, hid_t dxpl_id, H5O_t *open_oh,
unsigned type_id, void *mesg);
+H5_DLL herr_t H5O_msg_get_flags(const H5O_loc_t *loc, unsigned type_id, hid_t dxpl_id, uint8_t *flags);
/* Object metadata flush/refresh routines */
H5_DLL herr_t H5O_flush_common(H5O_loc_t *oloc, hid_t obj_id, hid_t dxpl_id);
diff --git a/src/H5Opublic.h b/src/H5Opublic.h
index dec7b5b..8d6dda4 100644
--- a/src/H5Opublic.h
+++ b/src/H5Opublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Orefcount.c b/src/H5Orefcount.c
index ff7dfee..af68417 100644
--- a/src/H5Orefcount.c
+++ b/src/H5Orefcount.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Osdspace.c b/src/H5Osdspace.c
index 28021de..3fe5652 100644
--- a/src/H5Osdspace.c
+++ b/src/H5Osdspace.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Omodule.h" /* This source code file is part of the H5O module */
diff --git a/src/H5Oshared.c b/src/H5Oshared.c
index 25baa88..db2d0cc 100644
--- a/src/H5Oshared.c
+++ b/src/H5Oshared.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Oshared.h b/src/H5Oshared.h
index e8d620a..2465e65 100644
--- a/src/H5Oshared.h
+++ b/src/H5Oshared.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Oshmesg.c b/src/H5Oshmesg.c
index a506ce2..1cbfb05 100644
--- a/src/H5Oshmesg.c
+++ b/src/H5Oshmesg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/src/H5Ostab.c b/src/H5Ostab.c
index bb39e58..5c840a6 100644
--- a/src/H5Ostab.c
+++ b/src/H5Ostab.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Otest.c b/src/H5Otest.c
index 8f8980a..f0deade 100644
--- a/src/H5Otest.c
+++ b/src/H5Otest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
diff --git a/src/H5Ounknown.c b/src/H5Ounknown.c
index 546e839..1b3a997 100644
--- a/src/H5Ounknown.c
+++ b/src/H5Ounknown.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5P.c b/src/H5P.c
index 254c3a9..49bea0a 100644
--- a/src/H5P.c
+++ b/src/H5P.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5PB.c b/src/H5PB.c
new file mode 100644
index 0000000..52576f8
--- /dev/null
+++ b/src/H5PB.c
@@ -0,0 +1,1537 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5PB.c
+ *
+ * Purpose: Page Buffer routines.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/****************/
+/* Module Setup */
+/****************/
+
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#include "H5PBmodule.h" /* This source code file is part of the H5PB module */
+
+
+/***********/
+/* Headers */
+/***********/
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fpkg.h" /* Files */
+#include "H5FDprivate.h" /* File drivers */
+#include "H5Iprivate.h" /* IDs */
+#include "H5PBpkg.h" /* File access */
+#include "H5SLprivate.h" /* Skip List */
+
+
+/****************/
+/* Local Macros */
+/****************/
+#define H5PB__PREPEND(page_ptr, head_ptr, tail_ptr, len) { \
+ if((head_ptr) == NULL) { \
+ (head_ptr) = (page_ptr); \
+ (tail_ptr) = (page_ptr); \
+ } /* end if */ \
+ else { \
+ (head_ptr)->prev = (page_ptr); \
+ (page_ptr)->next = (head_ptr); \
+ (head_ptr) = (page_ptr); \
+ } /* end else */ \
+ (len)++; \
+} /* H5PB__PREPEND() */
+
+#define H5PB__REMOVE(page_ptr, head_ptr, tail_ptr, len) { \
+ if((head_ptr) == (page_ptr)) { \
+ (head_ptr) = (page_ptr)->next; \
+ if((head_ptr) != NULL) \
+ (head_ptr)->prev = NULL; \
+ } /* end if */ \
+ else \
+ (page_ptr)->prev->next = (page_ptr)->next; \
+ if((tail_ptr) == (page_ptr)) { \
+ (tail_ptr) = (page_ptr)->prev; \
+ if((tail_ptr) != NULL) \
+ (tail_ptr)->next = NULL; \
+ } /* end if */ \
+ else \
+ (page_ptr)->next->prev = (page_ptr)->prev; \
+ page_ptr->next = NULL; \
+ page_ptr->prev = NULL; \
+ (len)--; \
+}
+
+#define H5PB__INSERT_LRU(page_buf, page_ptr) { \
+ HDassert(page_buf); \
+ HDassert(page_ptr); \
+ /* insert the entry at the head of the list. */ \
+ H5PB__PREPEND((page_ptr), (page_buf)->LRU_head_ptr, \
+ (page_buf)->LRU_tail_ptr, (page_buf)->LRU_list_len) \
+}
+
+#define H5PB__REMOVE_LRU(page_buf, page_ptr) { \
+ HDassert(page_buf); \
+ HDassert(page_ptr); \
+ /* remove the entry from the list. */ \
+ H5PB__REMOVE((page_ptr), (page_buf)->LRU_head_ptr, \
+ (page_buf)->LRU_tail_ptr, (page_buf)->LRU_list_len) \
+}
+
+#define H5PB__MOVE_TO_TOP_LRU(page_buf, page_ptr) { \
+ HDassert(page_buf); \
+ HDassert(page_ptr); \
+ /* Remove entry and insert at the head of the list. */ \
+ H5PB__REMOVE((page_ptr), (page_buf)->LRU_head_ptr, \
+ (page_buf)->LRU_tail_ptr, (page_buf)->LRU_list_len) \
+ H5PB__PREPEND((page_ptr), (page_buf)->LRU_head_ptr, \
+ (page_buf)->LRU_tail_ptr, (page_buf)->LRU_list_len) \
+}
+
+
+/******************/
+/* Local Typedefs */
+/******************/
+
+/* Iteration context for destroying page buffer */
+typedef struct {
+ H5PB_t *page_buf;
+ hbool_t actual_slist;
+} H5PB_ud1_t;
+
+
+/********************/
+/* Package Typedefs */
+/********************/
+
+
+/********************/
+/* Local Prototypes */
+/********************/
+static herr_t H5PB__insert_entry(H5PB_t *page_buf, H5PB_entry_t *page_entry);
+static htri_t H5PB__make_space(const H5F_io_info2_t *fio_info, H5PB_t *page_buf, H5FD_mem_t inserted_type);
+static herr_t H5PB__write_entry(const H5F_io_info2_t *fio_info, H5PB_entry_t *page_entry);
+
+
+/*********************/
+/* Package Variables */
+/*********************/
+
+/* Package initialization variable */
+hbool_t H5_PKG_INIT_VAR = FALSE;
+
+
+/*****************************/
+/* Library Private Variables */
+/*****************************/
+
+
+/*******************/
+/* Local Variables */
+/*******************/
+/* Declare a free list to manage the H5PB_t struct */
+H5FL_DEFINE_STATIC(H5PB_t);
+
+/* Declare a free list to manage the H5PB_entry_t struct */
+H5FL_DEFINE_STATIC(H5PB_entry_t);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_reset_stats
+ *
+ * Purpose: This function was created without documentation.
+ * What follows is my best understanding of Mohamad's intent.
+ *
+ * Reset statistics collected for the page buffer layer.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_reset_stats(H5PB_t *page_buf)
+{
+ FUNC_ENTER_NOAPI_NOERR
+
+ /* Sanity checks */
+ HDassert(page_buf);
+
+ page_buf->accesses[0] = 0;
+ page_buf->accesses[1] = 0;
+ page_buf->hits[0] = 0;
+ page_buf->hits[1] = 0;
+ page_buf->misses[0] = 0;
+ page_buf->misses[1] = 0;
+ page_buf->evictions[0] = 0;
+ page_buf->evictions[1] = 0;
+ page_buf->bypasses[0] = 0;
+ page_buf->bypasses[1] = 0;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5PB_reset_stats() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_get_stats
+ *
+ * Purpose: This function was created without documentation.
+ * What follows is my best understanding of Mohamad's intent.
+ *
+ * Retrieve statistics collected about page accesses for the page buffer layer.
+ * --accesses: the number of metadata and raw data accesses to the page buffer layer
+ * --hits: the number of metadata and raw data hits in the page buffer layer
+ * --misses: the number of metadata and raw data misses in the page buffer layer
+ * --evictions: the number of metadata and raw data evictions from the page buffer layer
+ * --bypasses: the number of metadata and raw data accesses that bypass the page buffer layer
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_get_stats(const H5PB_t *page_buf, unsigned accesses[2], unsigned hits[2],
+ unsigned misses[2], unsigned evictions[2], unsigned bypasses[2])
+{
+ FUNC_ENTER_NOAPI_NOERR
+
+ /* Sanity checks */
+ HDassert(page_buf);
+
+ accesses[0] = page_buf->accesses[0];
+ accesses[1] = page_buf->accesses[1];
+ hits[0] = page_buf->hits[0];
+ hits[1] = page_buf->hits[1];
+ misses[0] = page_buf->misses[0];
+ misses[1] = page_buf->misses[1];
+ evictions[0] = page_buf->evictions[0];
+ evictions[1] = page_buf->evictions[1];
+ bypasses[0] = page_buf->bypasses[0];
+ bypasses[1] = page_buf->bypasses[1];
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5PB_get_stats */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_print_stats()
+ *
+ * Purpose: This function was created without documentation.
+ * What follows is my best understanding of Mohamad's intent.
+ *
+ * Print out statistics collected for the page buffer layer.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_print_stats(const H5PB_t *page_buf)
+{
+ FUNC_ENTER_NOAPI_NOINIT_NOERR
+
+ HDassert(page_buf);
+
+ printf("PAGE BUFFER STATISTICS:\n");
+
+ printf("******* METADATA\n");
+ printf("\t Total Accesses: %u\n", page_buf->accesses[0]);
+ printf("\t Hits: %u\n", page_buf->hits[0]);
+ printf("\t Misses: %u\n", page_buf->misses[0]);
+ printf("\t Evictions: %u\n", page_buf->evictions[0]);
+ printf("\t Bypasses: %u\n", page_buf->bypasses[0]);
+ printf("\t Hit Rate = %f%%\n", ((double)page_buf->hits[0]/(page_buf->accesses[0] - page_buf->bypasses[0]))*100);
+ printf("*****************\n\n");
+
+ printf("******* RAWDATA\n");
+ printf("\t Total Accesses: %u\n", page_buf->accesses[1]);
+ printf("\t Hits: %u\n", page_buf->hits[1]);
+ printf("\t Misses: %u\n", page_buf->misses[1]);
+ printf("\t Evictions: %u\n", page_buf->evictions[1]);
+ printf("\t Bypasses: %u\n", page_buf->bypasses[1]);
+ printf("\t Hit Rate = %f%%\n", ((double)page_buf->hits[1]/(page_buf->accesses[1]-page_buf->bypasses[0]))*100);
+ printf("*****************\n\n");
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5PB_print_stats */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_create
+ *
+ * Purpose: Create and setup the PB on the file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_create(H5F_t *f, size_t size, unsigned page_buf_min_meta_perc, unsigned page_buf_min_raw_perc)
+{
+ H5PB_t *page_buf = NULL;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(f);
+ HDassert(f->shared);
+
+ /* Check args */
+ if(f->shared->fs_strategy != H5F_FSPACE_STRATEGY_PAGE)
+ HGOTO_ERROR(H5E_FILE, H5E_CANTINIT, FAIL, "Enabling Page Buffering requires PAGE file space strategy")
+ /* round down the size if it is larger than the page size */
+ else if(size > f->shared->fs_page_size) {
+ hsize_t temp_size;
+
+ temp_size = (size / f->shared->fs_page_size) * f->shared->fs_page_size;
+ H5_CHECKED_ASSIGN(size, size_t, temp_size, hsize_t);
+ } /* end if */
+ else if(0 != size % f->shared->fs_page_size)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTINIT, FAIL, "Page Buffer size must be >= to the page size")
+
+ /* Allocate the new page buffering structure */
+ if(NULL == (page_buf = H5FL_CALLOC(H5PB_t)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ page_buf->max_size = size;
+ H5_CHECKED_ASSIGN(page_buf->page_size, size_t, f->shared->fs_page_size, hsize_t);
+ page_buf->min_meta_perc = page_buf_min_meta_perc;
+ page_buf->min_raw_perc = page_buf_min_raw_perc;
+
+ /* Calculate the minimum page count for metadata and raw data
+ * based on the fractions provided
+ */
+ page_buf->min_meta_count = (unsigned)((size * page_buf_min_meta_perc) / (f->shared->fs_page_size * 100));
+ page_buf->min_raw_count = (unsigned)((size * page_buf_min_raw_perc) / (f->shared->fs_page_size * 100));
+
+ if(NULL == (page_buf->slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTCREATE, FAIL, "can't create skip list")
+ if(NULL == (page_buf->mf_slist_ptr = H5SL_create(H5SL_TYPE_HADDR, NULL)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTCREATE, FAIL, "can't create skip list")
+
+ if(NULL == (page_buf->page_fac = H5FL_fac_init(page_buf->page_size)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTINIT, FAIL, "can't create page factory")
+
+ f->shared->page_buf = page_buf;
+
+done:
+ if(ret_value < 0) {
+ if(page_buf != NULL) {
+ if(page_buf->slist_ptr != NULL)
+ H5SL_close(page_buf->slist_ptr);
+ if(page_buf->mf_slist_ptr != NULL)
+ H5SL_close(page_buf->mf_slist_ptr);
+ if(page_buf->page_fac != NULL)
+ H5FL_fac_term(page_buf->page_fac);
+ page_buf = H5FL_FREE(H5PB_t, page_buf);
+ } /* end if */
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5PB_create */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB__flush_cb
+ *
+ * Purpose: Callback to flush PB skiplist entries.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5PB__flush_cb(void *item, void H5_ATTR_UNUSED *key, void *_op_data)
+{
+ H5PB_entry_t *page_entry = (H5PB_entry_t *)item; /* Pointer to page entry node */
+ const H5F_io_info2_t *fio_info = (const H5F_io_info2_t *)_op_data;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(page_entry);
+ HDassert(fio_info);
+
+ /* Flush the page if it's dirty */
+ if(page_entry->is_dirty)
+ if(H5PB__write_entry(fio_info, page_entry) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "file write failed")
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5PB__flush_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_flush
+ *
+ * Purpose: Flush/Free all the PB entries to the file.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_flush(const H5F_io_info2_t *fio_info)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity check */
+ HDassert(fio_info);
+ HDassert(fio_info->f);
+ HDassert(fio_info->meta_dxpl);
+ HDassert(fio_info->raw_dxpl);
+
+ /* Flush all the entries in the PB skiplist, if we have write access on the file */
+ if(fio_info->f->shared->page_buf && (H5F_ACC_RDWR & H5F_INTENT(fio_info->f))) {
+ H5PB_t *page_buf = fio_info->f->shared->page_buf;
+
+ /* Iterate over all entries in page buffer skip list */
+ if(H5SL_iterate(page_buf->slist_ptr, H5PB__flush_cb, (void *)fio_info))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_BADITER, FAIL, "can't flush page buffer skip list")
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5PB_flush */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB__dest_cb
+ *
+ * Purpose: Callback to free PB skiplist entries.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5PB__dest_cb(void *item, void H5_ATTR_UNUSED *key, void *_op_data)
+{
+ H5PB_entry_t *page_entry = (H5PB_entry_t *)item; /* Pointer to page entry node */
+ H5PB_ud1_t *op_data = (H5PB_ud1_t *)_op_data;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checking */
+ HDassert(page_entry);
+ HDassert(op_data);
+ HDassert(op_data->page_buf);
+
+ /* Remove entry from LRU list */
+ if(op_data->actual_slist) {
+ H5PB__REMOVE_LRU(op_data->page_buf, page_entry)
+ page_entry->page_buf_ptr = H5FL_FAC_FREE(op_data->page_buf->page_fac, page_entry->page_buf_ptr);
+ } /* end if */
+
+ /* Free page entry */
+ page_entry = H5FL_FREE(H5PB_entry_t, page_entry);
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5PB__dest_cb() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_dest
+ *
+ * Purpose: Flush and destroy the PB on the file if it exists.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_dest(const H5F_io_info2_t *fio_info)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ H5F_t *f; /* file pointer */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(fio_info);
+ f = fio_info->f;
+ HDassert(f);
+
+ /* flush and destroy the page buffer, if it exists */
+ if(f->shared->page_buf) {
+ H5PB_t *page_buf = f->shared->page_buf;
+ H5PB_ud1_t op_data; /* Iteration context */
+
+ if(H5PB_flush(fio_info)<0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTFLUSH, FAIL, "can't flush page buffer")
+
+ /* Set up context info */
+ op_data.page_buf = page_buf;
+
+ /* Destroy the skip list containing all the entries in the PB */
+ op_data.actual_slist = TRUE;
+ if(H5SL_destroy(page_buf->slist_ptr, H5PB__dest_cb, &op_data))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTCLOSEOBJ, FAIL, "can't destroy page buffer skip list")
+
+ /* Destroy the skip list containing the new entries */
+ op_data.actual_slist = FALSE;
+ if(H5SL_destroy(page_buf->mf_slist_ptr, H5PB__dest_cb, &op_data))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTCLOSEOBJ, FAIL, "can't destroy page buffer skip list")
+
+ /* Destroy the page factory */
+ if(H5FL_fac_term(page_buf->page_fac) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTRELEASE, FAIL, "can't destroy page buffer page factory")
+
+#ifdef QAK
+H5PB_print_stats(page_buf);
+#endif /* QAK */
+
+ f->shared->page_buf = H5FL_FREE(H5PB_t, page_buf);
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5PB_dest */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_add_new_page
+ *
+ * Purpose: Add a new page to the new page skip list. This is called
+ * from the MF layer when a new page is allocated to
+ * indicate to the page buffer layer that a read of the page
+ * from the file is not necessary since it's an empty page.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_add_new_page(H5F_t *f, H5FD_mem_t type, haddr_t page_addr)
+{
+ H5PB_t *page_buf = f->shared->page_buf;
+ H5PB_entry_t *page_entry = NULL; /* pointer to the corresponding page entry */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(page_buf);
+
+ /* If there is an existing page, this means that at some point the
+ * file free space manager freed and re-allocated a page at the same
+ * address. No need to do anything here then...
+ */
+ /* MSC - to be safe, might want to dig in the MF layer and remove
+ * the page when it is freed from this list if it still exists and
+ * remove this check
+ */
+ if(NULL == H5SL_search(page_buf->mf_slist_ptr, &(page_addr))) {
+ /* Create the new PB entry */
+ if(NULL == (page_entry = H5FL_CALLOC(H5PB_entry_t)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ /* Initialize page fields */
+ page_entry->addr = page_addr;
+ page_entry->type = (H5F_mem_page_t)type;
+ page_entry->is_dirty = FALSE;
+
+ /* Insert entry in skip list */
+ if(H5SL_insert(page_buf->mf_slist_ptr, page_entry, &(page_entry->addr)) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_BADVALUE, FAIL, "Can't insert entry in skip list")
+ } /* end if */
+
+done:
+ if(ret_value < 0)
+ if(page_entry)
+ page_entry = H5FL_FREE(H5PB_entry_t, page_entry);
+
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5PB_add_new_page */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_update_entry
+ *
+ * Purpose: In PHDF5, entries that are written by other processes and just
+ * marked clean by this process have to have their corresponding
+ * pages updated if they exist in the page buffer.
+ * This routine checks and update the pages.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_update_entry(H5PB_t *page_buf, haddr_t addr, size_t size, const void *buf)
+{
+ H5PB_entry_t *page_entry; /* Pointer to the corresponding page entry */
+ haddr_t page_addr;
+
+ FUNC_ENTER_NOAPI_NOERR
+
+ /* Sanity checks */
+ HDassert(page_buf);
+ HDassert(size <= page_buf->page_size);
+ HDassert(buf);
+
+ /* calculate the aligned address of the first page */
+ page_addr = (addr / page_buf->page_size) * page_buf->page_size;
+
+ /* search for the page and update if found */
+ page_entry = (H5PB_entry_t *)H5SL_search(page_buf->slist_ptr, (void *)(&page_addr));
+ if(page_entry) {
+ haddr_t offset;
+
+ HDassert(addr + size <= page_addr + page_buf->page_size);
+ offset = addr - page_addr;
+ HDmemcpy((uint8_t *)page_entry->page_buf_ptr + offset, buf, size);
+
+ /* move to top of LRU list */
+ H5PB__MOVE_TO_TOP_LRU(page_buf, page_entry)
+ } /* end if */
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* H5PB_update_entry */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_remove_entry
+ *
+ * Purpose: Remove possible metadata entry with ADDR from the PB cache.
+ * This is in response to the data corruption bug from fheap.c
+ * with page buffering + page strategy.
+ * Note: Large metadata page bypasses the PB cache.
+ * Note: Update of raw data page (large or small sized) is handled by the PB cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Feb 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_remove_entry(const H5F_t *f, haddr_t addr)
+{
+ H5PB_t *page_buf = f->shared->page_buf;
+ H5PB_entry_t *page_entry = NULL; /* pointer to the page entry being searched */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(page_buf);
+
+ /* Search for address in the skip list */
+ page_entry = (H5PB_entry_t *)H5SL_search(page_buf->slist_ptr, (void *)(&addr));
+
+ /* If found, remove the entry from the PB cache */
+ if(page_entry) {
+ HDassert(page_entry->type != H5F_MEM_PAGE_DRAW);
+ if(NULL == H5SL_remove(page_buf->slist_ptr, &(page_entry->addr)))
+ HGOTO_ERROR(H5E_CACHE, H5E_BADVALUE, FAIL, "Page Entry is not in skip list")
+
+ /* Remove from LRU list */
+ H5PB__REMOVE_LRU(page_buf, page_entry)
+ HDassert(H5SL_count(page_buf->slist_ptr) == page_buf->LRU_list_len);
+
+ page_buf->meta_count--;
+
+ page_entry->page_buf_ptr = H5FL_FAC_FREE(page_buf->page_fac, page_entry->page_buf_ptr);
+ page_entry = H5FL_FREE(H5PB_entry_t, page_entry);
+ } /* end if */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* H5PB_remove_entry */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_read
+ *
+ * Purpose: Reads in the data from the page containing it if it exists
+ * in the PB cache; otherwise reads in the page through the VFD.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_read(const H5F_io_info2_t *fio_info, H5FD_mem_t type, haddr_t addr,
+ size_t size, void *buf/*out*/)
+{
+ H5PB_t *page_buf; /* Page buffering info for this file */
+ H5PB_entry_t *page_entry; /* Pointer to the corresponding page entry */
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
+ haddr_t first_page_addr, last_page_addr; /* Addresses of the first and last pages covered by I/O */
+ haddr_t offset;
+ haddr_t search_addr; /* Address of current page */
+ hsize_t num_touched_pages; /* Number of pages accessed */
+ size_t access_size;
+ hbool_t bypass_pb = FALSE; /* Whether to bypass page buffering */
+ hsize_t i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(fio_info);
+
+ /* Get pointer to page buffer info for this file */
+ page_buf = fio_info->f->shared->page_buf;
+
+#ifdef H5_HAVE_PARALLEL
+ if(H5F_HAS_FEATURE(fio_info->f, H5FD_FEAT_HAS_MPI)) {
+#if 1
+ bypass_pb = TRUE;
+#else
+ /* MSC - why this stopped working ? */
+ int mpi_size;
+
+ if((mpi_size = H5F_mpi_get_size(fio_info->f)) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "can't retrieve MPI communicator size")
+ if(1 != mpi_size)
+ bypass_pb = TRUE;
+#endif
+ } /* end if */
+#endif
+
+ /* If page buffering is disabled, or the I/O size is larger than that of a
+ * single page, or if this is a parallel raw data access, bypass page
+ * buffering.
+ */
+ if(NULL == page_buf || size >= page_buf->page_size ||
+ (bypass_pb && H5FD_MEM_DRAW == type)) {
+ if(H5F__accum_read(fio_info, type, addr, size, buf) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "read through metadata accumulator failed")
+
+ /* Update statistics */
+ if(page_buf) {
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->bypasses[1] ++;
+ else
+ page_buf->bypasses[0] ++;
+ } /* end if */
+
+ /* If page buffering is disabled, or if this is a large metadata access,
+ * or if this is parallel raw data access, we are done here
+ */
+ if(NULL == page_buf || (size >= page_buf->page_size && H5FD_MEM_DRAW != type) ||
+ (bypass_pb && H5FD_MEM_DRAW == type))
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
+
+ /* Update statistics */
+ if(page_buf) {
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->accesses[1]++;
+ else
+ page_buf->accesses[0]++;
+ } /* end if */
+
+ /* Calculate the aligned address of the first page */
+ first_page_addr = (addr / page_buf->page_size) * page_buf->page_size;
+
+ /* For Raw data calculate the aligned address of the last page and
+ * the number of pages accessed if more than 1 page is accessed
+ */
+ if(H5FD_MEM_DRAW == type) {
+ last_page_addr = ((addr + size - 1) / page_buf->page_size) * page_buf->page_size;
+
+ /* How many pages does this write span */
+ num_touched_pages = (last_page_addr / page_buf->page_size + 1) -
+ (first_page_addr / page_buf->page_size);
+ if(first_page_addr == last_page_addr) {
+ HDassert(1 == num_touched_pages);
+ last_page_addr = HADDR_UNDEF;
+ } /* end if */
+ } /* end if */
+ /* Otherwise set last page addr to HADDR_UNDEF */
+ else {
+ num_touched_pages = 1;
+ last_page_addr = HADDR_UNDEF;
+ } /* end else */
+
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
+ /* Copy raw data from dirty pages into the read buffer if the read
+ request spans pages in the page buffer*/
+ if(H5FD_MEM_DRAW == type && size >= page_buf->page_size) {
+ H5SL_node_t *node;
+
+ /* For each touched page in the page buffer, check if it
+ * exists in the page Buffer and is dirty. If it does, we
+ * update the buffer with what's in the page so we get the up
+ * to date data into the buffer after the big read from the file.
+ */
+ node = H5SL_find(page_buf->slist_ptr, (void *)(&first_page_addr));
+ for(i = 0; i < num_touched_pages; i++) {
+ search_addr = i*page_buf->page_size + first_page_addr;
+
+ /* if we still haven't located a starting page, search again */
+ if(!node && i!=0)
+ node = H5SL_find(page_buf->slist_ptr, (void *)(&search_addr));
+
+ /* if the current page is in the Page Buffer, do the updates */
+ if(node) {
+ page_entry = (H5PB_entry_t *)H5SL_item(node);
+
+ HDassert(page_entry);
+
+ /* If the current page address falls out of the access
+ block, then there are no more pages to go over */
+ if(page_entry->addr >= addr + size)
+ break;
+
+ HDassert(page_entry->addr == search_addr);
+
+ if(page_entry->is_dirty) {
+ /* special handling for the first page if it is not a full page access */
+ if(i == 0 && first_page_addr != addr) {
+ offset = addr - first_page_addr;
+ HDassert(page_buf->page_size > offset);
+
+ HDmemcpy(buf, (uint8_t *)page_entry->page_buf_ptr + offset,
+ page_buf->page_size - (size_t)offset);
+
+ /* move to top of LRU list */
+ H5PB__MOVE_TO_TOP_LRU(page_buf, page_entry)
+ } /* end if */
+ /* special handling for the last page if it is not a full page access */
+ else if(num_touched_pages > 1 && i == num_touched_pages-1 && search_addr < addr+size) {
+ offset = (num_touched_pages-2)*page_buf->page_size +
+ (page_buf->page_size - (addr - first_page_addr));
+
+ HDmemcpy((uint8_t *)buf + offset, page_entry->page_buf_ptr,
+ (size_t)((addr + size) - last_page_addr));
+
+ /* move to top of LRU list */
+ H5PB__MOVE_TO_TOP_LRU(page_buf, page_entry)
+ } /* end else-if */
+ /* copy the entire fully accessed pages */
+ else {
+ offset = i*page_buf->page_size;
+
+ HDmemcpy((uint8_t *)buf+(i*page_buf->page_size) , page_entry->page_buf_ptr,
+ page_buf->page_size);
+ } /* end else */
+ } /* end if */
+ node = H5SL_next(node);
+ } /* end if */
+ } /* end for */
+ } /* end if */
+ else {
+ /* A raw data access could span 1 or 2 PB entries at this point so
+ we need to handle that */
+ HDassert(1 == num_touched_pages || 2 == num_touched_pages);
+ for(i = 0 ; i < num_touched_pages; i++) {
+ haddr_t buf_offset;
+
+ /* Calculate the aligned address of the page to search for it in the skip list */
+ search_addr = (0==i ? first_page_addr : last_page_addr);
+
+ /* Calculate the access size if the access spans more than 1 page */
+ if(1 == num_touched_pages)
+ access_size = size;
+ else
+ access_size = (0 == i ? (size_t)((first_page_addr + page_buf->page_size) - addr) : (size - access_size));
+
+ /* Lookup the page in the skip list */
+ page_entry = (H5PB_entry_t *)H5SL_search(page_buf->slist_ptr, (void *)(&search_addr));
+
+ /* if found */
+ if(page_entry) {
+ offset = (0 == i ? addr - page_entry->addr : 0);
+ buf_offset = (0 == i ? 0 : size - access_size);
+
+ /* copy the requested data from the page into the input buffer */
+ HDmemcpy((uint8_t *)buf + buf_offset, (uint8_t *)page_entry->page_buf_ptr + offset, access_size);
+
+ /* Update LRU */
+ H5PB__MOVE_TO_TOP_LRU(page_buf, page_entry)
+
+ /* Update statistics */
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->hits[1]++;
+ else
+ page_buf->hits[0]++;
+ } /* end if */
+ /* if not found */
+ else {
+ void *new_page_buf = NULL;
+ size_t page_size = page_buf->page_size;
+ haddr_t eoa;
+
+ /* make space for new entry */
+ if((H5SL_count(page_buf->slist_ptr) * page_buf->page_size) >= page_buf->max_size) {
+ htri_t can_make_space;
+
+ /* check if we can make space in page buffer */
+ if((can_make_space = H5PB__make_space(fio_info, page_buf, type)) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_NOSPACE, FAIL, "make space in Page buffer Failed")
+
+ /* if make_space returns 0, then we can't use the page
+ buffer for this I/O and we need to bypass */
+ if(0 == can_make_space) {
+ /* make space can't return FALSE on second touched page since the first is of the same type */
+ HDassert(0 == i);
+
+ /* read entire block from VFD and return */
+ if(H5FD_read(&fdio_info, type, addr, size, buf) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "driver read request failed")
+
+ /* Break out of loop */
+ break;
+ } /* end if */
+ } /* end if */
+
+ /* Read page from VFD */
+ if(NULL == (new_page_buf = H5FL_FAC_MALLOC(page_buf->page_fac)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTALLOC, FAIL, "memory allocation failed for page buffer entry")
+
+ /* Read page through the VFD layer, but make sure we don't read past the EOA. */
+
+ /* Retrieve the 'eoa' for the file */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(fio_info->f, type)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+
+ /* If the entire page falls outside the EOA, then fail */
+ if(search_addr > eoa)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_BADVALUE, FAIL, "reading an entire page that is outside the file EOA")
+
+ /* Adjust the read size to not go beyond the EOA */
+ if(search_addr + page_size > eoa)
+ page_size = (size_t)(eoa - search_addr);
+
+ /* Read page from VFD */
+ if(H5FD_read(&fdio_info, type, search_addr, page_size, new_page_buf) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "driver read request failed")
+
+ /* Copy the requested data from the page into the input buffer */
+ offset = (0 == i ? addr - search_addr : 0);
+ buf_offset = (0 == i ? 0 : size - access_size);
+ HDmemcpy((uint8_t *)buf + buf_offset, (uint8_t *)new_page_buf + offset, access_size);
+
+ /* Create the new PB entry */
+ if(NULL == (page_entry = H5FL_CALLOC(H5PB_entry_t)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_NOSPACE, FAIL, "memory allocation failed")
+
+ page_entry->page_buf_ptr = new_page_buf;
+ page_entry->addr = search_addr;
+ page_entry->type = (H5F_mem_page_t)type;
+ page_entry->is_dirty = FALSE;
+
+ /* Insert page into PB */
+ if(H5PB__insert_entry(page_buf, page_entry) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTSET, FAIL, "error inserting new page in page buffer")
+
+ /* Update statistics */
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->misses[1]++;
+ else
+ page_buf->misses[0]++;
+ } /* end else */
+ } /* end for */
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5PB_read() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB_write
+ *
+ * Purpose: Write data into the Page Buffer. If the page exists in the
+ * cache, update it; otherwise read it from disk, update it, and
+ * insert into cache.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PB_write(const H5F_io_info2_t *fio_info, H5FD_mem_t type, haddr_t addr,
+ size_t size, const void *buf)
+{
+ H5PB_t *page_buf; /* Page buffering info for this file */
+ H5PB_entry_t *page_entry; /* Pointer to the corresponding page entry */
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
+ haddr_t first_page_addr, last_page_addr; /* Addresses of the first and last pages covered by I/O */
+ haddr_t offset;
+ haddr_t search_addr; /* Address of current page */
+ hsize_t num_touched_pages; /* Number of pages accessed */
+ size_t access_size;
+ hbool_t bypass_pb = FALSE; /* Whether to bypass page buffering */
+ hsize_t i; /* Local index variable */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_NOAPI(FAIL)
+
+ /* Sanity checks */
+ HDassert(fio_info);
+ HDassert(fio_info->f);
+
+ /* Get pointer to page buffer info for this file */
+ page_buf = fio_info->f->shared->page_buf;
+
+#ifdef H5_HAVE_PARALLEL
+ if(H5F_HAS_FEATURE(fio_info->f, H5FD_FEAT_HAS_MPI)) {
+#if 1
+ bypass_pb = TRUE;
+#else
+ /* MSC - why this stopped working ? */
+ int mpi_size;
+
+ if((mpi_size = H5F_mpi_get_size(fio_info->f)) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "can't retrieve MPI communicator size")
+ if(1 != mpi_size)
+ bypass_pb = TRUE;
+#endif
+ } /* end if */
+#endif
+
+ /* If page buffering is disabled, or the I/O size is larger than that of a
+ * single page, or if this is a parallel raw data access, bypass page
+ * buffering.
+ */
+ if(NULL == page_buf || size >= page_buf->page_size || bypass_pb) {
+ if(H5F__accum_write(fio_info, type, addr, size, buf) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "write through metadata accumulator failed")
+
+ /* Update statistics */
+ if(page_buf) {
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->bypasses[1]++;
+ else
+ page_buf->bypasses[0]++;
+ } /* end if */
+
+ /* If page buffering is disabled, or if this is a large metadata access,
+ * or if this is a parallel raw data access, we are done here
+ */
+ if(NULL == page_buf || (size >= page_buf->page_size && H5FD_MEM_DRAW != type) ||
+ (bypass_pb && H5FD_MEM_DRAW == type))
+ HGOTO_DONE(SUCCEED)
+
+#ifdef H5_HAVE_PARALLEL
+ if(bypass_pb) {
+ if(H5PB_update_entry(page_buf, addr, size, buf) > 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTUPDATE, FAIL, "failed to update PB with metadata cache")
+ HGOTO_DONE(SUCCEED)
+ } /* end if */
+#endif
+ } /* end if */
+
+ /* Update statistics */
+ if(page_buf) {
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->accesses[1]++;
+ else
+ page_buf->accesses[0]++;
+ } /* end if */
+
+ /* Calculate the aligned address of the first page */
+ first_page_addr = (addr / page_buf->page_size) * page_buf->page_size;
+
+ /* For raw data calculate the aligned address of the last page and
+ * the number of pages accessed if more than 1 page is accessed
+ */
+ if(H5FD_MEM_DRAW == type) {
+ last_page_addr = (addr + size - 1) / page_buf->page_size * page_buf->page_size;
+
+ /* how many pages does this write span */
+ num_touched_pages = (last_page_addr/page_buf->page_size + 1) -
+ (first_page_addr / page_buf->page_size);
+ if(first_page_addr == last_page_addr) {
+ HDassert(1 == num_touched_pages);
+ last_page_addr = HADDR_UNDEF;
+ } /* end if */
+ } /* end if */
+ /* Otherwise set last page addr to HADDR_UNDEF */
+ else {
+ num_touched_pages = 1;
+ last_page_addr = HADDR_UNDEF;
+ } /* end else */
+
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
+ /* Check if existing pages for raw data need to be updated since raw data access is not atomic */
+ if(H5FD_MEM_DRAW == type && size >= page_buf->page_size) {
+ /* For each touched page, check if it exists in the page buffer, and
+ * update it with the data in the buffer to keep it up to date
+ */
+ for(i = 0; i < num_touched_pages; i++) {
+ search_addr = i * page_buf->page_size + first_page_addr;
+
+ /* Special handling for the first page if it is not a full page update */
+ if(i == 0 && first_page_addr != addr) {
+ /* Lookup the page in the skip list */
+ page_entry = (H5PB_entry_t *)H5SL_search(page_buf->slist_ptr, (void *)(&search_addr));
+ if(page_entry) {
+ offset = addr - first_page_addr;
+ HDassert(page_buf->page_size > offset);
+
+ /* Update page's data */
+ HDmemcpy((uint8_t *)page_entry->page_buf_ptr + offset, buf, page_buf->page_size - (size_t)offset);
+
+ /* Mark page dirty and push to top of LRU */
+ page_entry->is_dirty = TRUE;
+ H5PB__MOVE_TO_TOP_LRU(page_buf, page_entry)
+ } /* end if */
+ } /* end if */
+ /* Special handling for the last page if it is not a full page update */
+ else if(num_touched_pages > 1 && i == (num_touched_pages - 1) &&
+ (search_addr + page_buf->page_size) != (addr + size)) {
+ HDassert(search_addr+page_buf->page_size > addr+size);
+
+ /* Lookup the page in the skip list */
+ page_entry = (H5PB_entry_t *)H5SL_search(page_buf->slist_ptr, (void *)(&search_addr));
+ if(page_entry) {
+ offset = (num_touched_pages - 2) * page_buf->page_size +
+ (page_buf->page_size - (addr - first_page_addr));
+
+ /* Update page's data */
+ HDmemcpy(page_entry->page_buf_ptr, (const uint8_t *)buf + offset,
+ (size_t)((addr + size) - last_page_addr));
+
+ /* Mark page dirty and push to top of LRU */
+ page_entry->is_dirty = TRUE;
+ H5PB__MOVE_TO_TOP_LRU(page_buf, page_entry)
+ } /* end if */
+ } /* end else-if */
+ /* Discard all fully written pages from the page buffer */
+ else {
+ page_entry = (H5PB_entry_t *)H5SL_remove(page_buf->slist_ptr, (void *)(&search_addr));
+ if(page_entry) {
+ /* Remove from LRU list */
+ H5PB__REMOVE_LRU(page_buf, page_entry)
+
+ /* Decrement page count of appropriate type */
+ if(H5F_MEM_PAGE_DRAW == page_entry->type || H5F_MEM_PAGE_GHEAP == page_entry->type)
+ page_buf->raw_count--;
+ else
+ page_buf->meta_count--;
+
+ /* Free page info */
+ page_entry->page_buf_ptr = H5FL_FAC_FREE(page_buf->page_fac, page_entry->page_buf_ptr);
+ page_entry = H5FL_FREE(H5PB_entry_t, page_entry);
+ } /* end if */
+ } /* end else */
+ } /* end for */
+ } /* end if */
+ else {
+ /* An access could span 1 or 2 PBs at this point so we need to handle that */
+ HDassert(1 == num_touched_pages || 2 == num_touched_pages);
+ for(i = 0; i < num_touched_pages; i++) {
+ haddr_t buf_offset;
+
+ /* Calculate the aligned address of the page to search for it in the skip list */
+ search_addr = (0 == i ? first_page_addr : last_page_addr);
+
+ /* Calculate the access size if the access spans more than 1 page */
+ if(1 == num_touched_pages)
+ access_size = size;
+ else
+ access_size = (0 == i ? (size_t)(first_page_addr + page_buf->page_size - addr) : (size - access_size));
+
+ /* Lookup the page in the skip list */
+ page_entry = (H5PB_entry_t *)H5SL_search(page_buf->slist_ptr, (void *)(&search_addr));
+
+ /* If found */
+ if(page_entry) {
+ offset = (0 == i ? addr - page_entry->addr : 0);
+ buf_offset = (0 == i ? 0 : size - access_size);
+
+ /* Copy the requested data from the input buffer into the page */
+ HDmemcpy((uint8_t *)page_entry->page_buf_ptr + offset, (const uint8_t *)buf + buf_offset, access_size);
+
+ /* Mark page dirty and push to top of LRU */
+ page_entry->is_dirty = TRUE;
+ H5PB__MOVE_TO_TOP_LRU(page_buf, page_entry)
+
+ /* Update statistics */
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->hits[1]++;
+ else
+ page_buf->hits[0]++;
+ } /* end if */
+ /* If not found */
+ else {
+ void *new_page_buf;
+ size_t page_size = page_buf->page_size;
+
+ /* Make space for new entry */
+ if((H5SL_count(page_buf->slist_ptr) * page_buf->page_size) >= page_buf->max_size) {
+ htri_t can_make_space;
+
+ /* Check if we can make space in page buffer */
+ if((can_make_space = H5PB__make_space(fio_info, page_buf, type)) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_NOSPACE, FAIL, "make space in Page buffer Failed")
+
+ /* If make_space returns 0, then we can't use the page
+ * buffer for this I/O and we need to bypass
+ */
+ if(0 == can_make_space) {
+ HDassert(0 == i);
+
+ /* Write to VFD and return */
+ if(H5FD_write(&fdio_info, type, addr, size, buf) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "driver write request failed")
+
+ /* Break out of loop */
+ break;
+ } /* end if */
+ } /* end if */
+
+ /* Don't bother searching if there is no write access */
+ if(H5F_ACC_RDWR & H5F_INTENT(fio_info->f))
+ /* Lookup & remove the page from the new skip list page if
+ * it exists to see if this is a new page from the MF layer
+ */
+ page_entry = (H5PB_entry_t *)H5SL_remove(page_buf->mf_slist_ptr, (void *)(&search_addr));
+
+ /* Calculate offset into the buffer of the page and the user buffer */
+ offset = (0 == i ? addr - search_addr : 0);
+ buf_offset = (0 == i ? 0 : size - access_size);
+
+ /* If found, then just update the buffer pointer to the newly allocate buffer */
+ if(page_entry) {
+ /* Allocate space for the page buffer */
+ if(NULL == (new_page_buf = H5FL_FAC_MALLOC(page_buf->page_fac)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTALLOC, FAIL, "memory allocation failed for page buffer entry")
+ HDmemset(new_page_buf, 0, (size_t)offset);
+ HDmemset((uint8_t *)new_page_buf + offset + access_size, 0, page_size - ((size_t)offset + access_size));
+
+ page_entry->page_buf_ptr = new_page_buf;
+
+ /* Update statistics */
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->hits[1]++;
+ else
+ page_buf->hits[0]++;
+ } /* end if */
+ /* Otherwise read page through the VFD layer, but make sure we don't read past the EOA. */
+ else {
+ haddr_t eoa, eof = HADDR_UNDEF;
+
+ /* Allocate space for the page buffer */
+ if(NULL == (new_page_buf = H5FL_FAC_CALLOC(page_buf->page_fac)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTALLOC, FAIL, "memory allocation failed for page buffer entry")
+
+ /* Create the new loaded PB entry */
+ if(NULL == (page_entry = H5FL_CALLOC(H5PB_entry_t)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTALLOC, FAIL, "memory allocation failed")
+
+ page_entry->page_buf_ptr = new_page_buf;
+ page_entry->addr = search_addr;
+ page_entry->type = (H5F_mem_page_t)type;
+
+ /* Retrieve the 'eoa' for the file */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(fio_info->f, type)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+
+ /* If the entire page falls outside the EOA, then fail */
+ if(search_addr > eoa)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_BADVALUE, FAIL, "writing to a page that is outside the file EOA")
+
+ /* Retrieve the 'eof' for the file - The MPI-VFD EOF
+ * returned will most likely be HADDR_UNDEF, so skip
+ * that check.
+ */
+ if(!H5F_HAS_FEATURE(fio_info->f, H5FD_FEAT_HAS_MPI))
+ if(HADDR_UNDEF == (eof = H5FD_get_eof(fio_info->f->shared->lf, H5FD_MEM_DEFAULT)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "driver get_eof request failed")
+
+ /* Adjust the read size to not go beyond the EOA */
+ if(search_addr + page_size > eoa)
+ page_size = (size_t)(eoa - search_addr);
+
+ if(search_addr < eof) {
+ if(H5FD_read(&fdio_info, type, search_addr, page_size, new_page_buf) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_READERROR, FAIL, "driver read request failed")
+
+ /* Update statistics */
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ page_buf->misses[1]++;
+ else
+ page_buf->misses[0]++;
+ } /* end if */
+ } /* end else */
+
+ /* Copy the requested data from the page into the input buffer */
+ HDmemcpy((uint8_t *)new_page_buf + offset, (const uint8_t *)buf+buf_offset, access_size);
+
+ /* Page is dirty now */
+ page_entry->is_dirty = TRUE;
+
+ /* Insert page into PB, evicting other pages as necessary */
+ if(H5PB__insert_entry(page_buf, page_entry) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTSET, FAIL, "error inserting new page in page buffer")
+ } /* end else */
+ } /* end for */
+ } /* end else */
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5PB_write() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB__insert_entry()
+ *
+ * Purpose: ???
+ *
+ * This function was created without documentation.
+ * What follows is my best understanding of Mohamad's intent.
+ *
+ * Insert the supplied page into the page buffer, both the
+ * skip list and the LRU.
+ *
+ * As best I can tell, this function imposes no limit on the
+ * number of entries in the page buffer beyond an assertion
+ * failure it the page count exceeds the limit.
+ *
+ * JRM -- 12/22/16
+ *
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5PB__insert_entry(H5PB_t *page_buf, H5PB_entry_t *page_entry)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Insert entry in skip list */
+ if(H5SL_insert(page_buf->slist_ptr, page_entry, &(page_entry->addr)) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTINSERT, FAIL, "can't insert entry in skip list")
+ HDassert(H5SL_count(page_buf->slist_ptr) * page_buf->page_size <= page_buf->max_size);
+
+ /* Increment appropriate page count */
+ if(H5F_MEM_PAGE_DRAW == page_entry->type || H5F_MEM_PAGE_GHEAP == page_entry->type)
+ page_buf->raw_count++;
+ else
+ page_buf->meta_count++;
+
+ /* Insert entry in LRU */
+ H5PB__INSERT_LRU(page_buf, page_entry)
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5PB__insert_entry() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB__make_space()
+ *
+ * Purpose: ???
+ *
+ * This function was created without documentation.
+ * What follows is my best understanding of Mohamad's intent.
+ *
+ * If necessary and if possible, evict a page from the page
+ * buffer to make space for the supplied page. Depending on
+ * the page buffer configuration and contents, and the page
+ * supplied this may or may not be possible.
+ *
+ * JRM -- 12/22/16
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static htri_t
+H5PB__make_space(const H5F_io_info2_t *fio_info, H5PB_t *page_buf,
+ H5FD_mem_t inserted_type)
+{
+ H5PB_entry_t *page_entry; /* Pointer to page eviction candidate */
+ htri_t ret_value = TRUE; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(fio_info);
+ HDassert(page_buf);
+
+ /* Get oldest entry */
+ page_entry = page_buf->LRU_tail_ptr;
+
+ if(H5FD_MEM_DRAW == inserted_type) {
+ /* If threshould is 100% metadata and page buffer is full of
+ metadata, then we can't make space for raw data */
+ if(0 == page_buf->raw_count && page_buf->min_meta_count == page_buf->meta_count) {
+ HDassert(page_buf->meta_count * page_buf->page_size == page_buf->max_size);
+ HGOTO_DONE(FALSE)
+ } /* end if */
+
+ /* check the metadata threshold before evicting metadata items */
+ while(1) {
+ if(page_entry->prev && H5F_MEM_PAGE_META == page_entry->type &&
+ page_buf->min_meta_count >= page_buf->meta_count)
+ page_entry = page_entry->prev;
+ else
+ break;
+ } /* end while */
+ } /* end if */
+ else {
+ /* If threshould is 100% raw data and page buffer is full of
+ raw data, then we can't make space for meta data */
+ if(0 == page_buf->meta_count && page_buf->min_raw_count == page_buf->raw_count) {
+ HDassert(page_buf->raw_count * page_buf->page_size == page_buf->max_size);
+ HGOTO_DONE(FALSE)
+ } /* end if */
+
+ /* check the raw data threshold before evicting raw data items */
+ while(1) {
+ if(page_entry->prev && (H5F_MEM_PAGE_DRAW == page_entry->type || H5F_MEM_PAGE_GHEAP == page_entry->type) &&
+ page_buf->min_raw_count >= page_buf->raw_count)
+ page_entry = page_entry->prev;
+ else
+ break;
+ } /* end while */
+ } /* end else */
+
+ /* Remove from page index */
+ if(NULL == H5SL_remove(page_buf->slist_ptr, &(page_entry->addr)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_BADVALUE, FAIL, "Tail Page Entry is not in skip list")
+
+ /* Remove entry from LRU list */
+ H5PB__REMOVE_LRU(page_buf, page_entry)
+ HDassert(H5SL_count(page_buf->slist_ptr) == page_buf->LRU_list_len);
+
+ /* Decrement appropriate page type counter */
+ if(H5F_MEM_PAGE_DRAW == page_entry->type || H5F_MEM_PAGE_GHEAP == page_entry->type)
+ page_buf->raw_count--;
+ else
+ page_buf->meta_count--;
+
+ /* Flush page if dirty */
+ if(page_entry->is_dirty)
+ if(H5PB__write_entry(fio_info, page_entry) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "file write failed")
+
+ /* Update statistics */
+ if(page_entry->type == H5F_MEM_PAGE_DRAW || H5F_MEM_PAGE_GHEAP == page_entry->type)
+ page_buf->evictions[1]++;
+ else
+ page_buf->evictions[0]++;
+
+ /* Release page */
+ page_entry->page_buf_ptr = H5FL_FAC_FREE(page_buf->page_fac, page_entry->page_buf_ptr);
+ page_entry = H5FL_FREE(H5PB_entry_t, page_entry);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5PB__make_space() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PB__write_entry()
+ *
+ * Purpose: ???
+ *
+ * This function was created without documentation.
+ * What follows is my best understanding of Mohamad's intent.
+ *
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5PB__write_entry(const H5F_io_info2_t *fio_info, H5PB_entry_t *page_entry)
+{
+ haddr_t eoa; /* Current EOA for the file */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity check */
+ HDassert(fio_info);
+ HDassert(fio_info->f);
+ HDassert(page_entry);
+
+ /* Retrieve the 'eoa' for the file */
+ if(HADDR_UNDEF == (eoa = H5F_get_eoa(fio_info->f, page_entry->type)))
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_CANTGET, FAIL, "driver get_eoa request failed")
+
+ /* If the starting address of the page is larger than
+ * the EOA, then the entire page is discarded without writing.
+ */
+ if(page_entry->addr <= eoa) {
+ H5FD_io_info_t fdio_info; /* File driver I/O info */
+ size_t page_size = fio_info->f->shared->page_buf->page_size;
+
+ /* Adjust the page length if it exceeds the EOA */
+ if((page_entry->addr + page_size) > eoa)
+ page_size = (size_t)(eoa - page_entry->addr);
+
+ /* Translate to file driver I/O info object */
+ fdio_info.file = fio_info->f->shared->lf;
+ fdio_info.meta_dxpl = fio_info->meta_dxpl;
+ fdio_info.raw_dxpl = fio_info->raw_dxpl;
+
+ if(H5FD_write(&fdio_info, page_entry->type, page_entry->addr, page_size, page_entry->page_buf_ptr) < 0)
+ HGOTO_ERROR(H5E_PAGEBUF, H5E_WRITEERROR, FAIL, "file write failed")
+ } /* end if */
+
+ page_entry->is_dirty = FALSE;
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5PB__write_entry() */
+
diff --git a/src/H5PBmodule.h b/src/H5PBmodule.h
new file mode 100644
index 0000000..c8aabb6
--- /dev/null
+++ b/src/H5PBmodule.h
@@ -0,0 +1,32 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Programmer: Quincey Koziol <koziol@hdfgroup.org>
+ * Saturday, September 12, 2015
+ *
+ * Purpose: This file contains declarations which define macros for the
+ * H5PB package. Including this header means that the source file
+ * is part of the H5PB package.
+ */
+#ifndef _H5PBmodule_H
+#define _H5PBmodule_H
+
+/* Define the proper control macros for the generic FUNC_ENTER/LEAVE and error
+ * reporting macros.
+ */
+#define H5PB_MODULE
+#define H5_MY_PKG H5PB
+#define H5_MY_PKG_ERR H5E_RESOURCE
+#define H5_MY_PKG_INIT NO
+
+#endif /* _H5PBmodule_H */
diff --git a/src/H5PBpkg.h b/src/H5PBpkg.h
new file mode 100644
index 0000000..6b9168b
--- /dev/null
+++ b/src/H5PBpkg.h
@@ -0,0 +1,58 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#if !(defined H5PB_FRIEND || defined H5PB_MODULE)
+#error "Do not include this file outside the H5PB package!"
+#endif
+
+#ifndef _H5PBpkg_H
+#define _H5PBpkg_H
+
+/* Get package's private header */
+#include "H5PBprivate.h"
+
+/* Other private headers needed by this file */
+
+/**************************/
+/* Package Private Macros */
+/**************************/
+
+
+/****************************/
+/* Package Private Typedefs */
+/****************************/
+
+typedef struct H5PB_entry_t {
+ void *page_buf_ptr; /* Pointer to the buffer containing the data */
+ haddr_t addr; /* Address of the page in the file */
+ H5F_mem_page_t type; /* Type of the page entry (H5F_MEM_PAGE_RAW/META) */
+ hbool_t is_dirty; /* Flag indicating whether the page has dirty data or not */
+
+ /* Fields supporting replacement policies */
+ struct H5PB_entry_t *next; /* next pointer in the LRU list */
+ struct H5PB_entry_t *prev; /* previous pointer in the LRU list */
+} H5PB_entry_t;
+
+
+/*****************************/
+/* Package Private Variables */
+/*****************************/
+
+
+/******************************/
+/* Package Private Prototypes */
+/******************************/
+
+
+#endif /* _H5PBpkg_H */
+
diff --git a/src/H5PBprivate.h b/src/H5PBprivate.h
new file mode 100644
index 0000000..e6f2874
--- /dev/null
+++ b/src/H5PBprivate.h
@@ -0,0 +1,106 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: H5PBprivate.h
+ * June 2014
+ * Mohamad Chaarawi
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifndef _H5PBprivate_H
+#define _H5PBprivate_H
+
+/* Include package's public header */
+#ifdef NOT_YET
+#include "H5PBpublic.h"
+#endif /* NOT_YET */
+
+/* Private headers needed by this header */
+#include "H5private.h" /* Generic Functions */
+#include "H5Fprivate.h" /* File access */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5SLprivate.h" /* Skip List */
+
+
+/**************************/
+/* Library Private Macros */
+/**************************/
+
+
+/****************************/
+/* Library Private Typedefs */
+/****************************/
+
+/* Forward declaration for a page buffer entry */
+struct H5PB_entry_t;
+
+/* Typedef for the main structure for the page buffer */
+typedef struct H5PB_t {
+ size_t max_size; /* The total page buffer size */
+ size_t page_size; /* Size of a single page */
+ unsigned min_meta_perc; /* Minimum ratio of metadata entries required before evicting meta entries */
+ unsigned min_raw_perc; /* Minimum ratio of raw data entries required before evicting raw entries */
+ unsigned meta_count; /* Number of entries for metadata */
+ unsigned raw_count; /* Number of entries for raw data */
+ unsigned min_meta_count; /* Minimum # of entries for metadata */
+ unsigned min_raw_count; /* Minimum # of entries for raw data */
+
+ H5SL_t *slist_ptr; /* Skip list with all the active page entries */
+ H5SL_t *mf_slist_ptr; /* Skip list containing newly allocated page entries inserted from the MF layer */
+
+ size_t LRU_list_len; /* Number of entries in the LRU (identical to slist_ptr count) */
+ struct H5PB_entry_t *LRU_head_ptr; /* Head pointer of the LRU */
+ struct H5PB_entry_t *LRU_tail_ptr; /* Tail pointer of the LRU */
+
+ H5FL_fac_head_t *page_fac; /* Factory for allocating pages */
+
+ /* Statistics */
+ unsigned accesses[2];
+ unsigned hits[2];
+ unsigned misses[2];
+ unsigned evictions[2];
+ unsigned bypasses[2];
+} H5PB_t;
+
+/*****************************/
+/* Library-private Variables */
+/*****************************/
+
+
+/***************************************/
+/* Library-private Function Prototypes */
+/***************************************/
+
+/* General routines */
+H5_DLL herr_t H5PB_create(H5F_t *file, size_t page_buffer_size, unsigned page_buf_min_meta_perc, unsigned page_buf_min_raw_perc);
+H5_DLL herr_t H5PB_flush(const H5F_io_info2_t *fio_info);
+H5_DLL herr_t H5PB_dest(const H5F_io_info2_t *fio_info);
+H5_DLL herr_t H5PB_add_new_page(H5F_t *f, H5FD_mem_t type, haddr_t page_addr);
+H5_DLL herr_t H5PB_update_entry(H5PB_t *page_buf, haddr_t addr, size_t size, const void *buf);
+H5_DLL herr_t H5PB_remove_entry(const H5F_t *f, haddr_t addr);
+H5_DLL herr_t H5PB_read(const H5F_io_info2_t *fio_info, H5FD_mem_t type,
+ haddr_t addr, size_t size, void *buf/*out*/);
+H5_DLL herr_t H5PB_write(const H5F_io_info2_t *f, H5FD_mem_t type, haddr_t addr,
+ size_t size, const void *buf);
+
+/* Statistics routines */
+H5_DLL herr_t H5PB_reset_stats(H5PB_t *page_buf);
+H5_DLL herr_t H5PB_get_stats(const H5PB_t *page_buf, unsigned accesses[2],
+ unsigned hits[2], unsigned misses[2], unsigned evictions[2], unsigned bypasses[2]);
+H5_DLL herr_t H5PB_print_stats(const H5PB_t *page_buf);
+
+#endif /* !_H5PBprivate_H */
+
diff --git a/src/H5PL.c b/src/H5PL.c
index 1f7bb2a..65d6c91 100644
--- a/src/H5PL.c
+++ b/src/H5PL.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -25,15 +23,33 @@
#include "H5private.h" /* Generic Functions */
#include "H5Eprivate.h" /* Error handling */
#include "H5MMprivate.h" /* Memory management */
-#include "H5PLprivate.h" /* Plugin */
+#include "H5PLpkg.h" /* Plugin */
#include "H5Zprivate.h" /* Filter pipeline */
/****************/
/* Local Macros */
/****************/
-
-#define H5PL_MAX_PATH_NUM 16
+#ifdef H5_HAVE_WIN32_API
+#define H5PL_EXPAND_ENV_VAR { \
+ long bufCharCount; \
+ char *tempbuf; \
+ if(NULL == (tempbuf = (char *)H5MM_malloc(H5PL_EXPAND_BUFFER_SIZE))) \
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "can't allocate memory for expanded path") \
+ if((bufCharCount = ExpandEnvironmentStringsA(dl_path, tempbuf, H5PL_EXPAND_BUFFER_SIZE)) > H5PL_EXPAND_BUFFER_SIZE) { \
+ tempbuf = (char *)H5MM_xfree(tempbuf); \
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "expanded path is too long") \
+ } \
+ if(bufCharCount == 0) { \
+ tempbuf = (char *)H5MM_xfree(tempbuf); \
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTGET, FAIL, "failed to expand path") \
+ } \
+ dl_path = (char *)H5MM_xfree(dl_path); \
+ dl_path = tempbuf; \
+ }
+#else
+#define H5PL_EXPAND_ENV_VAR
+#endif /* H5_HAVE_WIN32_API */
/****************************/
/* Macros for supporting
@@ -182,7 +198,7 @@ H5PL__init_package(void)
* to tell the library to load plugin libraries without search.
*/
if(NULL != (preload_path = HDgetenv("HDF5_PLUGIN_PRELOAD")))
- /* Special symbal "::" means no plugin during data reading. */
+ /* Special symbol "::" means no plugin during data reading. */
if(!HDstrcmp(preload_path, H5PL_NO_PLUGIN))
H5PL_plugin_g = 0;
@@ -390,6 +406,258 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5PLappend
+ *
+ * Purpose: Insert a plugin path at the end of the list.
+ *
+ * Return: Non-negative or success.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PLappend(const char *plugin_path)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ char *dl_path = NULL;
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "*s", plugin_path);
+ if(H5PL_num_paths_g == H5PL_MAX_PATH_NUM)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "too many directories in path for table")
+ if(NULL == plugin_path)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "no path provided")
+ if(NULL == (dl_path = H5MM_strdup(plugin_path)))
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "can't allocate memory for path")
+
+ H5PL_EXPAND_ENV_VAR
+
+ H5PL_path_table_g[H5PL_num_paths_g] = dl_path;
+ H5PL_num_paths_g++;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5PLappend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PLprepend
+ *
+ * Purpose: Insert a plugin path at the beginning of the list.
+ *
+ * Return: Non-negative or success.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PLprepend(const char *plugin_path)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ char *dl_path = NULL;
+ unsigned int plindex;
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "*s", plugin_path);
+ if(H5PL_num_paths_g == H5PL_MAX_PATH_NUM)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "too many directories in path for table")
+ if(NULL == plugin_path)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "no path provided")
+ if(NULL == (dl_path = H5MM_strdup(plugin_path)))
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "can't allocate memory for path")
+
+ H5PL_EXPAND_ENV_VAR
+
+ for (plindex = (unsigned int)H5PL_num_paths_g; plindex > 0; plindex--)
+ H5PL_path_table_g[plindex] = H5PL_path_table_g[plindex - 1];
+ H5PL_path_table_g[0] = dl_path;
+ H5PL_num_paths_g++;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5PLprepend() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PLreplace
+ *
+ * Purpose: Replace the path at the specified index.
+ *
+ * Return: Non-negative or success.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PLreplace(const char *plugin_path, unsigned int index)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ char *dl_path = NULL;
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "*sIu", plugin_path, index);
+ if(NULL == plugin_path)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "no path provided")
+ if(index >= H5PL_MAX_PATH_NUM)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "index path out of bounds for table")
+ if(NULL == (dl_path = H5MM_strdup(plugin_path)))
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "can't allocate memory for path")
+
+ H5PL_EXPAND_ENV_VAR
+
+ if(H5PL_path_table_g[index])
+ H5PL_path_table_g[index] = (char *)H5MM_xfree(H5PL_path_table_g[index]);
+ H5PL_path_table_g[index] = dl_path;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5PLreplace() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PLinsert
+ *
+ * Purpose: Insert a plugin path at the specified index, moving other paths after the index.
+ *
+ * Return: Non-negative or success.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PLinsert(const char *plugin_path, unsigned int index)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ char *dl_path = NULL;
+ unsigned int plindex;
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "*sIu", plugin_path, index);
+ if(H5PL_num_paths_g == H5PL_MAX_PATH_NUM)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "too many directories in path for table")
+ if(NULL == plugin_path)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "no path provided")
+ if(index >= H5PL_MAX_PATH_NUM)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "index path out of bounds for table")
+ if(NULL == (dl_path = H5MM_strdup(plugin_path)))
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "can't allocate memory for path")
+
+ H5PL_EXPAND_ENV_VAR
+
+ for(plindex = (unsigned int)H5PL_num_paths_g; plindex > index; plindex--)
+ H5PL_path_table_g[plindex] = H5PL_path_table_g[plindex - 1];
+ H5PL_path_table_g[index] = dl_path;
+ H5PL_num_paths_g++;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5PLinsert() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PLremove
+ *
+ * Purpose: Remove the plugin path at the specifed index and compacting the list.
+ *
+ * Return: Non-negative or success.
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PLremove(unsigned int index)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+ unsigned int plindex;
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "Iu", index);
+ if(H5PL_num_paths_g == 0)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "no directories in table")
+ if(index >= H5PL_MAX_PATH_NUM)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "index path out of bounds for table")
+ if(NULL == H5PL_path_table_g[index])
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "no directory path at index")
+ H5PL_path_table_g[index] = (char *)H5MM_xfree(H5PL_path_table_g[index]);
+
+ H5PL_num_paths_g--;
+ for(plindex = index; plindex < (unsigned int)H5PL_num_paths_g; plindex++)
+ H5PL_path_table_g[plindex] = H5PL_path_table_g[plindex + 1];
+ H5PL_path_table_g[H5PL_num_paths_g] = NULL;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5PLremove() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PLget
+ *
+ * Purpose: Query the plugin path at the specified index.
+ *
+ * Return: Success: The length of path.
+ *
+ * If `pathname' is non-NULL then write up to `size' bytes into that
+ * buffer and always return the length of the pathname.
+ * Otherwise `size' is ignored and the function does not store the pathname,
+ * just returning the number of characters required to store the pathname.
+ * If an error occurs then the buffer pointed to by `pathname' (NULL or non-NULL)
+ * is unchanged and the function returns a negative value.
+ * If a zero is returned for the name's length, then there is no pathname
+ * associated with the index.
+ *
+ *-------------------------------------------------------------------------
+ */
+ssize_t
+H5PLget(unsigned int index, char *pathname/*out*/, size_t size)
+{
+ ssize_t ret_value = 0; /* Return value */
+ size_t len = 0; /* Length of pathname */
+ char *dl_path = NULL;
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE3("Zs", "Iuxz", index, pathname, size);
+ if(H5PL_num_paths_g == 0)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "no directories in table")
+ if(index >= H5PL_MAX_PATH_NUM)
+ HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "index path out of bounds for table")
+ if(NULL == (dl_path = H5PL_path_table_g[index]))
+ HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "no directory path at index")
+ len = HDstrlen(dl_path);
+ if(pathname) {
+ HDstrncpy(pathname, dl_path, MIN((size_t)(len + 1), size));
+ if((size_t)len >= size)
+ pathname[size - 1] = '\0';
+ } /* end if */
+
+ /* Set return value */
+ ret_value = (ssize_t)len;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5PLget() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5PLsize
+ *
+ * Purpose: Query the size of the current list of plugin paths.
+ *
+ * Return: Plugin path size
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5PLsize(unsigned int *listsize)
+{
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE1("e", "*Iu", listsize);
+
+ *listsize = (unsigned int)H5PL_num_paths_g;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5PLsize() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5PL__init_path_table
*
* Purpose: Initialize the path table.
@@ -422,25 +690,7 @@ H5PL__init_path_table(void)
if(NULL == dl_path)
HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "can't allocate memory for path")
-#ifdef H5_HAVE_WIN32_API
- else { /* Expand windows env var*/
- long bufCharCount;
- char *tempbuf;
- if(NULL == (tempbuf = (char *)H5MM_malloc(H5PL_EXPAND_BUFFER_SIZE)))
- HGOTO_ERROR(H5E_PLUGIN, H5E_CANTALLOC, FAIL, "can't allocate memory for expanded path")
- if((bufCharCount = ExpandEnvironmentStrings(dl_path, tempbuf, H5PL_EXPAND_BUFFER_SIZE)) > H5PL_EXPAND_BUFFER_SIZE) {
- tempbuf = (char *)H5MM_xfree(tempbuf);
- HGOTO_ERROR(H5E_PLUGIN, H5E_NOSPACE, FAIL, "expanded path is too long")
- }
- if(bufCharCount == 0) {
- tempbuf = (char *)H5MM_xfree(tempbuf);
- HGOTO_ERROR(H5E_PLUGIN, H5E_CANTGET, FAIL, "failed to expand path")
- }
- dl_path = (char *)H5MM_xfree(dl_path);
- dl_path = H5MM_strdup(tempbuf);
- tempbuf = (char *)H5MM_xfree(tempbuf);
- }
-#endif /* H5_HAVE_WIN32_API */
+ H5PL_EXPAND_ENV_VAR
/* Put paths in the path table. They are separated by ":" */
dir = HDstrtok(dl_path, H5PL_PATH_SEPARATOR);
diff --git a/src/H5PLextern.h b/src/H5PLextern.h
index 3264435..7547ad7 100644
--- a/src/H5PLextern.h
+++ b/src/H5PLextern.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5PLmodule.h b/src/H5PLmodule.h
index 0339737..b441aed 100644
--- a/src/H5PLmodule.h
+++ b/src/H5PLmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5PLpkg.h b/src/H5PLpkg.h
new file mode 100644
index 0000000..e356893
--- /dev/null
+++ b/src/H5PLpkg.h
@@ -0,0 +1,48 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#if !(defined H5PL_FRIEND || defined H5PL_MODULE)
+#error "Do not include this file outside the H5PL package!"
+#endif
+
+#ifndef _H5PLpkg_H
+#define _H5PLpkg_H
+
+/* Include private header file */
+#include "H5PLprivate.h" /* Filter functions */
+
+/* Other private headers needed by this file */
+
+/**************************/
+/* Package Private Macros */
+/**************************/
+
+#define H5PL_MAX_PATH_NUM 16
+
+
+/****************************/
+/* Package Private Typedefs */
+/****************************/
+
+
+/*****************************/
+/* Package Private Variables */
+/*****************************/
+
+
+/******************************/
+/* Package Private Prototypes */
+/******************************/
+
+#endif /* _H5PLpkg_H */
+
diff --git a/src/H5PLprivate.h b/src/H5PLprivate.h
index 77e115b..0ab8f8c 100644
--- a/src/H5PLprivate.h
+++ b/src/H5PLprivate.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Raymond Lu <songyulu@hdfgroup.org>
diff --git a/src/H5PLpublic.h b/src/H5PLpublic.h
index 0528945..9ce1fca 100644
--- a/src/H5PLpublic.h
+++ b/src/H5PLpublic.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Raymond Lu <songyulu@hdfgroup.org>
@@ -43,7 +41,14 @@ extern "C" {
/* plugin state */
H5_DLL herr_t H5PLset_loading_state(unsigned int plugin_type);
-H5_DLL herr_t H5PLget_loading_state(unsigned int* plugin_type/*out*/);
+H5_DLL herr_t H5PLget_loading_state(unsigned int *plugin_type/*out*/);
+H5_DLL herr_t H5PLappend(const char *plugin_path);
+H5_DLL herr_t H5PLprepend(const char *plugin_path);
+H5_DLL herr_t H5PLreplace(const char *plugin_path, unsigned int index);
+H5_DLL herr_t H5PLinsert(const char *plugin_path, unsigned int index);
+H5_DLL herr_t H5PLremove(unsigned int index);
+H5_DLL ssize_t H5PLget(unsigned int index, char *pathname/*out*/, size_t size);
+H5_DLL herr_t H5PLsize(unsigned int *listsize/*out*/);
#ifdef __cplusplus
}
diff --git a/src/H5Pacpl.c b/src/H5Pacpl.c
index bfed332..4368dd6 100644
--- a/src/H5Pacpl.c
+++ b/src/H5Pacpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Pdapl.c b/src/H5Pdapl.c
index aa58dc4..3b0a8c5 100644
--- a/src/H5Pdapl.c
+++ b/src/H5Pdapl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Pdcpl.c b/src/H5Pdcpl.c
index 1237bfc..3b4c159 100644
--- a/src/H5Pdcpl.c
+++ b/src/H5Pdcpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Pdeprec.c b/src/H5Pdeprec.c
index ecf2bea..7f96333 100644
--- a/src/H5Pdeprec.c
+++ b/src/H5Pdeprec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -488,5 +486,135 @@ done:
FUNC_LEAVE_API(ret_value)
} /* end H5Pget_version() */
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_file_space
+ *
+ * Purpose: It is mapped to H5Pset_file_space_strategy().
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Jan 2017
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_file_space(hid_t plist_id, H5F_file_space_type_t strategy, hsize_t threshold)
+{
+
+ H5F_fspace_strategy_t new_strategy; /* File space strategy type */
+ hbool_t new_persist = H5F_FREE_SPACE_PERSIST_DEF; /* Persisting free-space or not */
+ hsize_t new_threshold = H5F_FREE_SPACE_THRESHOLD_DEF; /* Free-space section threshold */
+ H5F_file_space_type_t in_strategy = strategy; /* Input strategy */
+ hsize_t in_threshold = threshold; /* Input threshold */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE3("e", "iFfh", plist_id, strategy, threshold);
+
+ if((unsigned)in_strategy >= H5F_FILE_SPACE_NTYPES)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid strategy")
+ /*
+ * For 1.10.0 H5Pset_file_space:
+ * If strategy is zero, the property is not changed;
+ * the existing strategy is retained.
+ * If threshold is zero, the property is not changed;
+ * the existing threshold is retained.
+ */
+ if(!in_strategy)
+ H5Pget_file_space(plist_id, &in_strategy, NULL);
+ if(!in_threshold)
+ H5Pget_file_space(plist_id, NULL, &in_threshold);
+
+ switch(in_strategy) {
+ case H5F_FILE_SPACE_ALL_PERSIST:
+ new_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR;
+ new_persist = TRUE;
+ new_threshold = in_threshold;
+ break;
+
+ case H5F_FILE_SPACE_ALL:
+ new_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR;
+ new_threshold = in_threshold;
+ break;
+
+ case H5F_FILE_SPACE_AGGR_VFD:
+ new_strategy = H5F_FSPACE_STRATEGY_AGGR;
+ break;
+
+ case H5F_FILE_SPACE_VFD:
+ new_strategy = H5F_FSPACE_STRATEGY_NONE;
+ break;
+
+ case H5F_FILE_SPACE_NTYPES:
+ case H5F_FILE_SPACE_DEFAULT:
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file space strategy")
+ }
+
+ if(H5Pset_file_space_strategy(plist_id, new_strategy, new_persist, new_threshold) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set file space strategy")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pset_file_space() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_file_space
+ *
+ * Purpose: It is mapped to H5Pget_file_space_strategy().
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; Jan 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_file_space(hid_t plist_id, H5F_file_space_type_t *strategy, hsize_t *threshold)
+{
+ H5F_fspace_strategy_t new_strategy; /* File space strategy type */
+ hbool_t new_persist; /* Persisting free-space or not */
+ hsize_t new_threshold; /* Free-space section threshold */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE3("e", "i*Ff*h", plist_id, strategy, threshold);
+
+ /* Get current file space info */
+ if(H5Pget_file_space_strategy(plist_id, &new_strategy, &new_persist, &new_threshold) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file space strategy")
+
+ /* Get value(s) */
+ if(strategy) {
+ switch(new_strategy) {
+
+ case H5F_FSPACE_STRATEGY_FSM_AGGR:
+ if(new_persist)
+ *strategy = H5F_FILE_SPACE_ALL_PERSIST;
+ else
+ *strategy = H5F_FILE_SPACE_ALL;
+ break;
+
+ case H5F_FSPACE_STRATEGY_AGGR:
+ *strategy = H5F_FILE_SPACE_AGGR_VFD;
+ break;
+
+ case H5F_FSPACE_STRATEGY_NONE:
+ *strategy = H5F_FILE_SPACE_VFD;
+ break;
+
+ case H5F_FSPACE_STRATEGY_PAGE:
+ case H5F_FSPACE_STRATEGY_NTYPES:
+ default:
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid file space strategy")
+ }
+ }
+
+ if(threshold)
+ *threshold = new_threshold;
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pget_file_space() */
#endif /* H5_NO_DEPRECATED_SYMBOLS */
diff --git a/src/H5Pdxpl.c b/src/H5Pdxpl.c
index c094c20..3c53c15 100644
--- a/src/H5Pdxpl.c
+++ b/src/H5Pdxpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -171,9 +169,16 @@
#define H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_DEF NULL
#define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_SIZE sizeof(uint32_t)
#define H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF 0
+/* Definitions for properties of direct chunk read */
+#define H5D_XFER_DIRECT_CHUNK_READ_FLAG_SIZE sizeof(hbool_t)
+#define H5D_XFER_DIRECT_CHUNK_READ_FLAG_DEF FALSE
+#define H5D_XFER_DIRECT_CHUNK_READ_FILTERS_SIZE sizeof(uint32_t)
+#define H5D_XFER_DIRECT_CHUNK_READ_FILTERS_DEF 0
+#define H5D_XFER_DIRECT_CHUNK_READ_OFFSET_SIZE sizeof(hsize_t *)
+#define H5D_XFER_DIRECT_CHUNK_READ_OFFSET_DEF NULL
/* Ring type - private property */
#define H5AC_XFER_RING_SIZE sizeof(unsigned)
-#define H5AC_XFER_RING_DEF H5AC_RING_US
+#define H5AC_XFER_RING_DEF H5AC_RING_USER
#define H5AC_XFER_RING_ENC H5P__encode_unsigned
#define H5AC_XFER_RING_DEC H5P__decode_unsigned
#ifdef H5_DEBUG_BUILD
@@ -295,6 +300,9 @@ static const hbool_t H5D_def_direct_chunk_flag_g = H5D_XFER_DIRECT_CHUNK_WRITE_F
static const uint32_t H5D_def_direct_chunk_filters_g = H5D_XFER_DIRECT_CHUNK_WRITE_FILTERS_DEF; /* Default value for the filters of direct chunk write */
static const hsize_t *H5D_def_direct_chunk_offset_g = H5D_XFER_DIRECT_CHUNK_WRITE_OFFSET_DEF; /* Default value for the offset of direct chunk write */
static const uint32_t H5D_def_direct_chunk_datasize_g = H5D_XFER_DIRECT_CHUNK_WRITE_DATASIZE_DEF; /* Default value for the datasize of direct chunk write */
+static const hbool_t direct_chunk_read_flag = H5D_XFER_DIRECT_CHUNK_READ_FLAG_DEF; /* Default value for the flag of direct chunk read */
+static const hsize_t *direct_chunk_read_offset = H5D_XFER_DIRECT_CHUNK_READ_OFFSET_DEF; /* Default value for the offset of direct chunk read */
+static const uint32_t direct_chunk_read_filters = H5D_XFER_DIRECT_CHUNK_READ_FILTERS_DEF; /* Default value for the filters of direct chunk read */
static const H5AC_ring_t H5D_ring_g = H5AC_XFER_RING_DEF; /* Default value for the cache entry ring type */
#ifdef H5_DEBUG_BUILD
static const H5FD_dxpl_type_t H5D_dxpl_type_g = H5FD_NOIO_DXPL; /* Default value for the dxpl type */
@@ -499,6 +507,24 @@ H5P__dxfr_reg_prop(H5P_genclass_t *pclass)
NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+ /* Register the property of flag for direct chunk read */
+ /* (Note: this property should not have an encode/decode callback) */
+ if(H5P_register_real(pclass, H5D_XFER_DIRECT_CHUNK_READ_FLAG_NAME, H5D_XFER_DIRECT_CHUNK_READ_FLAG_SIZE, &direct_chunk_read_flag,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
+ /* Register the property of filter for direct chunk read */
+ /* (Note: this property should not have an encode/decode callback) */
+ if(H5P_register_real(pclass, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_NAME, H5D_XFER_DIRECT_CHUNK_READ_FILTERS_SIZE, &direct_chunk_read_filters,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
+ /* Register the property of offset for direct chunk read */
+ /* (Note: this property should not have an encode/decode callback) */
+ if(H5P_register_real(pclass, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_NAME, H5D_XFER_DIRECT_CHUNK_READ_OFFSET_SIZE, &direct_chunk_read_offset,
+ NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
/* Register the ring property (private) */
if(H5P_register_real(pclass, H5AC_RING_NAME, H5AC_XFER_RING_SIZE, &H5D_ring_g,
NULL, NULL, NULL, H5AC_XFER_RING_ENC, H5AC_XFER_RING_DEC,
diff --git a/src/H5Pencdec.c b/src/H5Pencdec.c
index 1bcd19c..73c48d7 100644
--- a/src/H5Pencdec.c
+++ b/src/H5Pencdec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
diff --git a/src/H5Pfapl.c b/src/H5Pfapl.c
index 91a0e03..1b0a4b9 100644
--- a/src/H5Pfapl.c
+++ b/src/H5Pfapl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -80,18 +78,18 @@
#define H5F_ACS_PREEMPT_READ_CHUNKS_DEC H5P__decode_double
/* Definition for threshold for alignment */
#define H5F_ACS_ALIGN_THRHD_SIZE sizeof(hsize_t)
-#define H5F_ACS_ALIGN_THRHD_DEF 1
+#define H5F_ACS_ALIGN_THRHD_DEF H5F_ALIGN_THRHD_DEF
#define H5F_ACS_ALIGN_THRHD_ENC H5P__encode_hsize_t
#define H5F_ACS_ALIGN_THRHD_DEC H5P__decode_hsize_t
/* Definition for alignment */
#define H5F_ACS_ALIGN_SIZE sizeof(hsize_t)
-#define H5F_ACS_ALIGN_DEF 1
+#define H5F_ACS_ALIGN_DEF H5F_ALIGN_DEF
#define H5F_ACS_ALIGN_ENC H5P__encode_hsize_t
#define H5F_ACS_ALIGN_DEC H5P__decode_hsize_t
/* Definition for minimum metadata allocation block size (when
aggregating metadata allocations. */
#define H5F_ACS_META_BLOCK_SIZE_SIZE sizeof(hsize_t)
-#define H5F_ACS_META_BLOCK_SIZE_DEF 2048
+#define H5F_ACS_META_BLOCK_SIZE_DEF H5F_META_BLOCK_SIZE_DEF
#define H5F_ACS_META_BLOCK_SIZE_ENC H5P__encode_hsize_t
#define H5F_ACS_META_BLOCK_SIZE_DEC H5P__decode_hsize_t
/* Definition for maximum sieve buffer size (when data sieving
@@ -103,7 +101,7 @@
/* Definition for minimum "small data" allocation block size (when
aggregating "small" raw data allocations. */
#define H5F_ACS_SDATA_BLOCK_SIZE_SIZE sizeof(hsize_t)
-#define H5F_ACS_SDATA_BLOCK_SIZE_DEF 2048
+#define H5F_ACS_SDATA_BLOCK_SIZE_DEF H5F_SDATA_BLOCK_SIZE_DEF
#define H5F_ACS_SDATA_BLOCK_SIZE_ENC H5P__encode_hsize_t
#define H5F_ACS_SDATA_BLOCK_SIZE_DEC H5P__decode_hsize_t
/* Definition for garbage-collect references */
@@ -225,6 +223,28 @@
#define H5F_ACS_COLL_MD_WRITE_FLAG_ENC H5P__encode_hbool_t
#define H5F_ACS_COLL_MD_WRITE_FLAG_DEC H5P__decode_hbool_t
#endif /* H5_HAVE_PARALLEL */
+/* Definitions for the initial metadata cache image configuration */
+#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_SIZE sizeof(H5AC_cache_image_config_t)
+#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEF H5AC__DEFAULT_CACHE_IMAGE_CONFIG
+#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_ENC H5P__facc_cache_image_config_enc
+#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEC H5P__facc_cache_image_config_dec
+#define H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_CMP H5P__facc_cache_image_config_cmp
+/* Definition for total size of page buffer(bytes) */
+#define H5F_ACS_PAGE_BUFFER_SIZE_SIZE sizeof(size_t)
+#define H5F_ACS_PAGE_BUFFER_SIZE_DEF 0
+#define H5F_ACS_PAGE_BUFFER_SIZE_ENC H5P__encode_size_t
+#define H5F_ACS_PAGE_BUFFER_SIZE_DEC H5P__decode_size_t
+/* Definition for minimum metadata size of page buffer(bytes) */
+#define H5F_ACS_PAGE_BUFFER_MIN_META_PERC_SIZE sizeof(unsigned)
+#define H5F_ACS_PAGE_BUFFER_MIN_META_PERC_DEF 0
+#define H5F_ACS_PAGE_BUFFER_MIN_META_PERC_ENC H5P__encode_unsigned
+#define H5F_ACS_PAGE_BUFFER_MIN_META_PERC_DEC H5P__decode_unsigned
+/* Definition for minimum raw data size of page buffer(bytes) */
+#define H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_SIZE sizeof(unsigned)
+#define H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEF 0
+#define H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_ENC H5P__encode_unsigned
+#define H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEC H5P__decode_unsigned
+
/******************/
/* Local Typedefs */
@@ -279,6 +299,11 @@ static herr_t H5P_facc_mdc_log_location_copy(const char *name, size_t size, void
static int H5P_facc_mdc_log_location_cmp(const void *value1, const void *value2, size_t size);
static herr_t H5P_facc_mdc_log_location_close(const char *name, size_t size, void *value);
+/* Metadata cache image property callbacks */
+static int H5P__facc_cache_image_config_cmp(const void *_config1, const void *_config2, size_t H5_ATTR_UNUSED size);
+static herr_t H5P__facc_cache_image_config_enc(const void *value, void **_pp, size_t *size);
+static herr_t H5P__facc_cache_image_config_dec(const void **_pp, void *_value);
+
/*********************/
/* Package Variables */
@@ -346,6 +371,10 @@ static const hbool_t H5F_def_evict_on_close_flag_g = H5F_ACS_EVICT_ON_CLOSE_FLAG
static const H5P_coll_md_read_flag_t H5F_def_coll_md_read_flag_g = H5F_ACS_COLL_MD_READ_FLAG_DEF; /* Default setting for the collective metedata read flag */
static const hbool_t H5F_def_coll_md_write_flag_g = H5F_ACS_COLL_MD_WRITE_FLAG_DEF; /* Default setting for the collective metedata write flag */
#endif /* H5_HAVE_PARALLEL */
+static const H5AC_cache_image_config_t H5F_def_mdc_initCacheImageCfg_g = H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEF; /* Default metadata cache image settings */
+static const size_t H5F_def_page_buf_size_g = H5F_ACS_PAGE_BUFFER_SIZE_DEF; /* Default page buffer size */
+static const unsigned H5F_def_page_buf_min_meta_perc_g = H5F_ACS_PAGE_BUFFER_MIN_META_PERC_DEF; /* Default page buffer minimum metadata size */
+static const unsigned H5F_def_page_buf_min_raw_perc_g = H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEF; /* Default page buffer minumum raw data size */
/*-------------------------------------------------------------------------
@@ -555,6 +584,28 @@ H5P__facc_reg_prop(H5P_genclass_t *pclass)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
#endif /* H5_HAVE_PARALLEL */
+ /* Register the initial metadata cache image configuration */
+ if(H5P_register_real(pclass, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_SIZE, &H5F_def_mdc_initCacheImageCfg_g,
+ NULL, NULL, NULL, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_ENC, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_DEC,
+ NULL, NULL, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_CMP, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
+ /* Register the size of the page buffer size */
+ if(H5P_register_real(pclass, H5F_ACS_PAGE_BUFFER_SIZE_NAME, H5F_ACS_PAGE_BUFFER_SIZE_SIZE, &H5F_def_page_buf_size_g,
+ NULL, NULL, NULL, H5F_ACS_PAGE_BUFFER_SIZE_ENC, H5F_ACS_PAGE_BUFFER_SIZE_DEC,
+ NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+ /* Register the size of the page buffer minimum metadata size */
+ if(H5P_register_real(pclass, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_SIZE, &H5F_def_page_buf_min_meta_perc_g,
+ NULL, NULL, NULL, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_ENC, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_DEC,
+ NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+ /* Register the size of the page buffer minimum raw data size */
+ if(H5P_register_real(pclass, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_SIZE, &H5F_def_page_buf_min_raw_perc_g,
+ NULL, NULL, NULL, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_ENC, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_DEC,
+ NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5P__facc_reg_prop() */
@@ -1543,6 +1594,101 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5Pset_mdc_image_config
+ *
+ * Purpose: Set the initial metadata cache image configuration in the
+ * target FAPL.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: J. Mainzer
+ * Thursday, June 25, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "i*x", plist_id, config_ptr);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id,H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* validate the new configuration */
+ if(H5AC_validate_cache_image_config(config_ptr) < 0)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid metadata cache image configuration")
+
+ /* set the modified metadata cache image config */
+
+ /* If we ever support multiple versions of H5AC_cache_image_config_t, we
+ * will have to test the version and do translation here.
+ */
+
+ if(H5P_set(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, config_ptr) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set metadata cache image initial config")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pset_mdc_image_config() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_mdc_image_config
+ *
+ * Purpose: Retrieve the metadata cache initial image configuration
+ * from the target FAPL.
+ *
+ * Observe that the function will fail if config_ptr is
+ * NULL, or if config_ptr->version specifies an unknown
+ * version of H5AC_cache_image_config_t.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: J. Mainzer
+ * Friday, June 26, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "i*x", plist_id, config_ptr);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id,H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* validate the config_ptr */
+ if(config_ptr == NULL)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL config_ptr on entry.")
+
+ if(config_ptr->version != H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Unknown image config version.")
+
+ /* If we ever support multiple versions of H5AC_cache_config_t, we
+ * will have to get the cannonical version here, and then translate
+ * to the version of the structure supplied.
+ */
+
+ /* Get the current initial metadata cache resize configuration */
+ if(H5P_get(plist, H5F_ACS_META_CACHE_INIT_IMAGE_CONFIG_NAME, config_ptr) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get metadata cache initial image config")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pget_mdc_image_config() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5Pset_mdc_config
*
* Purpose: Set the initial metadata cache resize configuration in the
@@ -2754,6 +2900,147 @@ done:
/*-------------------------------------------------------------------------
+ * Function: H5P__facc_cache_image_config_cmp
+ *
+ * Purpose: Compare two cache image configurations.
+ *
+ * Return: positive if VALUE1 is greater than VALUE2, negative if VALUE2 is
+ * greater than VALUE1 and zero if VALUE1 and VALUE2 are equal.
+ *
+ * Programmer: John Mainzer
+ * June 26, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+H5P__facc_cache_image_config_cmp(const void *_config1, const void *_config2, size_t H5_ATTR_UNUSED size)
+{
+ const H5AC_cache_image_config_t *config1 = (const H5AC_cache_image_config_t *)_config1; /* Create local aliases for values */
+ const H5AC_cache_image_config_t *config2 = (const H5AC_cache_image_config_t *)_config2; /* Create local aliases for values */
+ int ret_value = 0; /* Return value */
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Check for a property being set */
+ if(config1 == NULL && config2 != NULL) HGOTO_DONE(-1);
+ if(config1 != NULL && config2 == NULL) HGOTO_DONE(1);
+
+ if(config1->version < config2->version) HGOTO_DONE(-1);
+ if(config1->version > config2->version) HGOTO_DONE(1);
+
+ if(config1->generate_image < config2->generate_image) HGOTO_DONE(-1);
+ if(config1->generate_image > config2->generate_image) HGOTO_DONE(1);
+
+ if(config1->save_resize_status < config2->save_resize_status) HGOTO_DONE(-1);
+ if(config1->save_resize_status > config2->save_resize_status) HGOTO_DONE(1);
+
+ if(config1->entry_ageout < config2->entry_ageout) HGOTO_DONE(-1);
+ if(config1->entry_ageout > config2->entry_ageout) HGOTO_DONE(1);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5P__facc_cache_image_config_cmp() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P__facc_cache_image_config_enc
+ *
+ * Purpose: Callback routine which is called whenever the default
+ * cache image config property in the file creation
+ * property list is encoded.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: John Mainzer
+ * June 26, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P__facc_cache_image_config_enc(const void *value, void **_pp, size_t *size)
+{
+ const H5AC_cache_image_config_t *config = (const H5AC_cache_image_config_t *)value; /* Create local aliases for value */
+ uint8_t **pp = (uint8_t **)_pp;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(value);
+
+ if(NULL != *pp) {
+ /* Encode type sizes (as a safety check) */
+ *(*pp)++ = (uint8_t)sizeof(unsigned);
+
+ INT32ENCODE(*pp, (int32_t)config->version);
+
+ H5_ENCODE_UNSIGNED(*pp, config->generate_image);
+
+ H5_ENCODE_UNSIGNED(*pp, config->save_resize_status);
+
+ INT32ENCODE(*pp, (int32_t)config->entry_ageout);
+ } /* end if */
+
+ /* Compute encoded size of fixed-size values */
+ *size += (1 + (2 * sizeof(unsigned)) + (2 * sizeof(int32_t)));
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5P__facc_cache_image_config_enc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P__facc_cache_image_config_dec
+ *
+ * Purpose: Callback routine which is called whenever the default
+ * cache image config property in the file creation property
+ * list is decoded.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: John Mainzer
+ * June 26, 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P__facc_cache_image_config_dec(const void **_pp, void *_value)
+{
+ H5AC_cache_image_config_t *config = (H5AC_cache_image_config_t *)_value;
+ const uint8_t **pp = (const uint8_t **)_pp;
+ unsigned enc_size;
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_STATIC
+
+ /* Sanity checks */
+ HDassert(pp);
+ HDassert(*pp);
+ HDassert(config);
+ HDcompile_assert(sizeof(size_t) <= sizeof(uint64_t));
+
+ /* Set property to default value */
+ HDmemcpy(config, &H5F_def_mdc_initCacheImageCfg_g, sizeof(H5AC_cache_image_config_t));
+
+ /* Decode type sizes */
+ enc_size = *(*pp)++;
+ if(enc_size != sizeof(unsigned))
+ HGOTO_ERROR(H5E_PLIST, H5E_BADVALUE, FAIL, "unsigned value can't be decoded")
+
+ INT32DECODE(*pp, config->version);
+
+ H5_DECODE_UNSIGNED(*pp, config->generate_image);
+
+ H5_DECODE_UNSIGNED(*pp, config->save_resize_status);
+
+ INT32DECODE(*pp, config->entry_ageout);
+
+done:
+ FUNC_LEAVE_NOAPI(ret_value)
+} /* end H5P__facc_cache_image_config_dec() */
+
+
+/*-------------------------------------------------------------------------
* Function: H5P__facc_file_image_info_set
*
* Purpose: Copies a file image property when it's set for a property list
@@ -4108,9 +4395,13 @@ H5Pset_evict_on_close(hid_t fapl_id, hbool_t evict_on_close)
if(NULL == (plist = (H5P_genplist_t *)H5I_object(fapl_id)))
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
- /* Set values */
+#ifndef H5_HAVE_PARALLEL
+ /* Set value */
if(H5P_set(plist, H5F_ACS_EVICT_ON_CLOSE_FLAG_NAME, &evict_on_close) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set evict on close property")
+#else
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "evict on close is currently not supported in parallel HDF5")
+#endif /* H5_HAVE_PARALLEL */
done:
FUNC_LEAVE_API(ret_value)
@@ -4426,3 +4717,93 @@ done:
} /* end H5Pget_coll_metadata_write() */
#endif /* H5_HAVE_PARALLEL */
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_page_buffer_size
+ *
+ * Purpose: Set the maximum page buffering size. This has to be a
+ * multiple of the page allocation size which must be enabled;
+ * otherwise file create/open will fail.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ * June 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_page_buffer_size(hid_t plist_id, size_t buf_size, unsigned min_meta_perc, unsigned min_raw_perc)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE4("e", "izIuIu", plist_id, buf_size, min_meta_perc, min_raw_perc);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ if(min_meta_perc > 100)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Minimum metadata fractions must be between 0 and 100 inclusive")
+ if(min_raw_perc > 100)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Minimum rawdata fractions must be between 0 and 100 inclusive")
+
+ if(min_meta_perc + min_raw_perc > 100)
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "Sum of minimum metadata and raw data fractions can't be bigger than 100");
+
+ /* Set size */
+ if(H5P_set(plist, H5F_ACS_PAGE_BUFFER_SIZE_NAME, &buf_size) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET,FAIL, "can't set page buffer size")
+ if(H5P_set(plist, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME, &min_meta_perc) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET,FAIL, "can't set percentage of min metadata entries")
+ if(H5P_set(plist, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME, &min_raw_perc) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET,FAIL, "can't set percentage of min rawdata entries")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pset_page_buffer_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_page_buffer_size
+ *
+ * Purpose: Retrieves the maximum page buffer size.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Mohamad Chaarawi
+ * June 2015
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_page_buffer_size(hid_t plist_id, size_t *buf_size, unsigned *min_meta_perc, unsigned *min_raw_perc)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value = SUCCEED; /* return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE4("e", "i*z*Iu*Iu", plist_id, buf_size, min_meta_perc, min_raw_perc);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id, H5P_FILE_ACCESS)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Get size */
+
+ if(buf_size)
+ if(H5P_get(plist, H5F_ACS_PAGE_BUFFER_SIZE_NAME, buf_size) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get page buffer size")
+ if(min_meta_perc)
+ if(H5P_get(plist, H5F_ACS_PAGE_BUFFER_MIN_META_PERC_NAME, min_meta_perc) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get page buffer minimum metadata percent")
+ if(min_raw_perc)
+ if(H5P_get(plist, H5F_ACS_PAGE_BUFFER_MIN_RAW_PERC_NAME, min_raw_perc) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET,FAIL, "can't get page buffer minimum raw data percent")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* end H5Pget_page_buffer_size() */
+
diff --git a/src/H5Pfcpl.c b/src/H5Pfcpl.c
index d451982..5383aae 100644
--- a/src/H5Pfcpl.c
+++ b/src/H5Pfcpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -97,14 +95,23 @@
#define H5F_CRT_SHMSG_BTREE_MIN_ENC H5P__encode_unsigned
#define H5F_CRT_SHMSG_BTREE_MIN_DEC H5P__decode_unsigned
/* Definitions for file space handling strategy */
-#define H5F_CRT_FILE_SPACE_STRATEGY_SIZE sizeof(unsigned)
-#define H5F_CRT_FILE_SPACE_STRATEGY_DEF H5F_FILE_SPACE_STRATEGY_DEF
-#define H5F_CRT_FILE_SPACE_STRATEGY_ENC H5P__encode_unsigned
-#define H5F_CRT_FILE_SPACE_STRATEGY_DEC H5P__decode_unsigned
-#define H5F_CRT_FREE_SPACE_THRESHOLD_SIZE sizeof(hsize_t)
-#define H5F_CRT_FREE_SPACE_THRESHOLD_DEF H5F_FREE_SPACE_THRESHOLD_DEF
-#define H5F_CRT_FREE_SPACE_THRESHOLD_ENC H5P__encode_hsize_t
-#define H5F_CRT_FREE_SPACE_THRESHOLD_DEC H5P__decode_hsize_t
+#define H5F_CRT_FILE_SPACE_STRATEGY_SIZE sizeof(H5F_fspace_strategy_t)
+#define H5F_CRT_FILE_SPACE_STRATEGY_DEF H5F_FILE_SPACE_STRATEGY_DEF
+#define H5F_CRT_FILE_SPACE_STRATEGY_ENC H5P__fcrt_fspace_strategy_enc
+#define H5F_CRT_FILE_SPACE_STRATEGY_DEC H5P__fcrt_fspace_strategy_dec
+#define H5F_CRT_FREE_SPACE_PERSIST_SIZE sizeof(hbool_t)
+#define H5F_CRT_FREE_SPACE_PERSIST_DEF H5F_FREE_SPACE_PERSIST_DEF
+#define H5F_CRT_FREE_SPACE_PERSIST_ENC H5P__encode_hbool_t
+#define H5F_CRT_FREE_SPACE_PERSIST_DEC H5P__decode_hbool_t
+#define H5F_CRT_FREE_SPACE_THRESHOLD_SIZE sizeof(hsize_t)
+#define H5F_CRT_FREE_SPACE_THRESHOLD_DEF H5F_FREE_SPACE_THRESHOLD_DEF
+#define H5F_CRT_FREE_SPACE_THRESHOLD_ENC H5P__encode_hsize_t
+#define H5F_CRT_FREE_SPACE_THRESHOLD_DEC H5P__decode_hsize_t
+/* Definitions for file space page size in support of level-2 page caching */
+#define H5F_CRT_FILE_SPACE_PAGE_SIZE_SIZE sizeof(hsize_t)
+#define H5F_CRT_FILE_SPACE_PAGE_SIZE_DEF H5F_FILE_SPACE_PAGE_SIZE_DEF
+#define H5F_CRT_FILE_SPACE_PAGE_SIZE_ENC H5P__encode_hsize_t
+#define H5F_CRT_FILE_SPACE_PAGE_SIZE_DEC H5P__decode_hsize_t
/******************/
@@ -131,6 +138,8 @@ static herr_t H5P__fcrt_shmsg_index_types_enc(const void *value, void **_pp, siz
static herr_t H5P__fcrt_shmsg_index_types_dec(const void **_pp, void *value);
static herr_t H5P__fcrt_shmsg_index_minsize_enc(const void *value, void **_pp, size_t *size);
static herr_t H5P__fcrt_shmsg_index_minsize_dec(const void **_pp, void *value);
+static herr_t H5P__fcrt_fspace_strategy_enc(const void *value, void **_pp, size_t *size);
+static herr_t H5P__fcrt_fspace_strategy_dec(const void **_pp, void *_value);
/*********************/
@@ -178,8 +187,10 @@ static const unsigned H5F_def_sohm_index_flags_g[H5O_SHMESG_MAX_NINDEXES] = H
static const unsigned H5F_def_sohm_index_minsizes_g[H5O_SHMESG_MAX_NINDEXES] = H5F_CRT_SHMSG_INDEX_MINSIZE_DEF;
static const unsigned H5F_def_sohm_list_max_g = H5F_CRT_SHMSG_LIST_MAX_DEF;
static const unsigned H5F_def_sohm_btree_min_g = H5F_CRT_SHMSG_BTREE_MIN_DEF;
-static const unsigned H5F_def_file_space_strategy_g = H5F_CRT_FILE_SPACE_STRATEGY_DEF;
+static const H5F_fspace_strategy_t H5F_def_file_space_strategy_g = H5F_CRT_FILE_SPACE_STRATEGY_DEF;
+static const hbool_t H5F_def_free_space_persist_g = H5F_CRT_FREE_SPACE_PERSIST_DEF;
static const hsize_t H5F_def_free_space_threshold_g = H5F_CRT_FREE_SPACE_THRESHOLD_DEF;
+static const hsize_t H5F_def_file_space_page_size_g = H5F_CRT_FILE_SPACE_PAGE_SIZE_DEF;
@@ -267,12 +278,24 @@ H5P_fcrt_reg_prop(H5P_genclass_t *pclass)
NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+ /* Register the free-space persist flag */
+ if(H5P_register_real(pclass, H5F_CRT_FREE_SPACE_PERSIST_NAME, H5F_CRT_FREE_SPACE_PERSIST_SIZE, &H5F_def_free_space_persist_g,
+ NULL, NULL, NULL, H5F_CRT_FREE_SPACE_PERSIST_ENC, H5F_CRT_FREE_SPACE_PERSIST_DEC,
+ NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
/* Register the free space section threshold */
if(H5P_register_real(pclass, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, H5F_CRT_FREE_SPACE_THRESHOLD_SIZE, &H5F_def_free_space_threshold_g,
NULL, NULL, NULL, H5F_CRT_FREE_SPACE_THRESHOLD_ENC, H5F_CRT_FREE_SPACE_THRESHOLD_DEC,
NULL, NULL, NULL, NULL) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+ /* Register the file space page size */
+ if(H5P_register_real(pclass, H5F_CRT_FILE_SPACE_PAGE_SIZE_NAME, H5F_CRT_FILE_SPACE_PAGE_SIZE_SIZE, &H5F_def_file_space_page_size_g,
+ NULL, NULL, NULL, H5F_CRT_FILE_SPACE_PAGE_SIZE_ENC, H5F_CRT_FILE_SPACE_PAGE_SIZE_DEC,
+ NULL, NULL, NULL, NULL) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTINSERT, FAIL, "can't insert property into class")
+
done:
FUNC_LEAVE_NOAPI(ret_value)
} /* end H5P_fcrt_reg_prop() */
@@ -1246,15 +1269,13 @@ H5Pget_shared_mesg_phase_change(hid_t plist_id, unsigned *max_list, unsigned *mi
if(NULL == (plist = H5P_object_verify(plist_id,H5P_FILE_CREATE)))
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID");
- /* Get value */
- if (max_list) {
+ /* Get value(s) */
+ if(max_list)
if(H5P_get(plist, H5F_CRT_SHMSG_LIST_MAX_NAME, max_list) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get list maximum");
- }
- if (min_btree) {
+ if(min_btree)
if(H5P_get(plist, H5F_CRT_SHMSG_BTREE_MIN_NAME, min_btree) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get SOHM information");
- }
done:
FUNC_LEAVE_API(ret_value)
@@ -1262,15 +1283,12 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Pset_file_space
+ * Function: H5Pset_file_space_strategy
*
- * Purpose: Sets the strategy that the library employs in managing file space.
- * If strategy is zero, the property is not changed; the existing
- * strategy is retained.
- * Sets the threshold value that the file's free space
- * manager(s) will use to track free space sections.
- * If threshold is zero, the property is not changed; the existing
- * threshold is retained.
+ * Purpose: Sets the "strategy" that the library employs in managing file space
+ * Sets the "persist" value as to persist free-space or not
+ * Sets the "threshold" value that the free space manager(s) will use to track free space sections.
+ * Ignore "persist" and "threshold" for strategies that do not use free-space managers
*
* Return: Non-negative on success/Negative on failure
*
@@ -1279,15 +1297,16 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5Pset_file_space(hid_t plist_id, H5F_file_space_type_t strategy, hsize_t threshold)
+H5Pset_file_space_strategy(hid_t plist_id, H5F_fspace_strategy_t strategy, hbool_t persist, hsize_t threshold)
{
H5P_genplist_t *plist; /* Property list pointer */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
- H5TRACE3("e", "iFfh", plist_id, strategy, threshold);
+ H5TRACE4("e", "iFfbh", plist_id, strategy, persist, threshold);
- if((unsigned)strategy >= H5F_FILE_SPACE_NTYPES)
+ /* Check arguments */
+ if(strategy >= H5F_FSPACE_STRATEGY_NTYPES)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid strategy")
/* Get the plist structure */
@@ -1295,24 +1314,28 @@ H5Pset_file_space(hid_t plist_id, H5F_file_space_type_t strategy, hsize_t thresh
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
/* Set value(s), if non-zero */
- if(strategy)
- if(H5P_set(plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, &strategy) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set file space strategy")
- if(threshold)
- if(H5P_set(plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, &threshold) < 0)
- HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set free-space threshold")
+ if(H5P_set(plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, &strategy) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set file space strategy")
+
+ /* Ignore persist and threshold settings for strategies that do not use FSM */
+ if(strategy == H5F_FSPACE_STRATEGY_FSM_AGGR || strategy == H5F_FSPACE_STRATEGY_PAGE) {
+ if(H5P_set(plist, H5F_CRT_FREE_SPACE_PERSIST_NAME, &persist) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set free-space persisting status")
+
+ if(H5P_set(plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, &threshold) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTSET, FAIL, "can't set free-space threshold")
+ } /* end if */
done:
FUNC_LEAVE_API(ret_value)
-} /* H5Pset_file_space() */
+} /* H5Pset_file_space_strategy() */
/*-------------------------------------------------------------------------
- * Function: H5Pget_file_space
+ * Function: H5Pget_file_space_strategy
*
- * Purpose: Retrieves the strategy that the library uses in managing file space.
- * Retrieves the threshold value that the file's free space
- * managers use to track free space sections.
+ * Purpose: Retrieves the strategy, persist, and threshold that the library
+ * uses in managing file space.
*
* Return: Non-negative on success/Negative on failure
*
@@ -1321,13 +1344,13 @@ done:
*-------------------------------------------------------------------------
*/
herr_t
-H5Pget_file_space(hid_t plist_id, H5F_file_space_type_t *strategy, hsize_t *threshold)
+H5Pget_file_space_strategy(hid_t plist_id, H5F_fspace_strategy_t *strategy, hbool_t *persist, hsize_t *threshold)
{
H5P_genplist_t *plist; /* Property list pointer */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
- H5TRACE3("e", "i*Ff*h", plist_id, strategy, threshold);
+ H5TRACE4("e", "i*Ff*b*h", plist_id, strategy, persist, threshold);
/* Get the plist structure */
if(NULL == (plist = H5P_object_verify(plist_id,H5P_FILE_CREATE)))
@@ -1337,11 +1360,158 @@ H5Pget_file_space(hid_t plist_id, H5F_file_space_type_t *strategy, hsize_t *thre
if(strategy)
if(H5P_get(plist, H5F_CRT_FILE_SPACE_STRATEGY_NAME, strategy) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file space strategy")
+ if(persist)
+ if(H5P_get(plist, H5F_CRT_FREE_SPACE_PERSIST_NAME, persist) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get free-space persisting status")
if(threshold)
if(H5P_get(plist, H5F_CRT_FREE_SPACE_THRESHOLD_NAME, threshold) < 0)
HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get free-space threshold")
done:
FUNC_LEAVE_API(ret_value)
-} /* H5Pget_file_space() */
+} /* H5Pget_file_space_strategy() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P__fcrt_fspace_strategy_enc
+ *
+ * Purpose: Callback routine which is called whenever the free-space
+ * strategy property in the file creation property list
+ * is encoded.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * Friday, December 27, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P__fcrt_fspace_strategy_enc(const void *value, void **_pp, size_t *size)
+{
+ const H5F_fspace_strategy_t *strategy = (const H5F_fspace_strategy_t *)value; /* Create local alias for values */
+ uint8_t **pp = (uint8_t **)_pp;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity check */
+ HDassert(strategy);
+ HDassert(size);
+
+ if(NULL != *pp)
+ /* Encode free-space strategy */
+ *(*pp)++ = (uint8_t)*strategy;
+
+ /* Size of free-space strategy */
+ (*size)++;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5P__fcrt_fspace_strategy_enc() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5P__fcrt_fspace_strategy_dec
+ *
+ * Purpose: Callback routine which is called whenever the free-space
+ * strategy property in the file creation property list
+ * is decoded.
+ *
+ * Return: Success: Non-negative
+ * Failure: Negative
+ *
+ * Programmer: Quincey Koziol
+ * Friday, December 27, 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+H5P__fcrt_fspace_strategy_dec(const void **_pp, void *_value)
+{
+ H5F_fspace_strategy_t *strategy = (H5F_fspace_strategy_t *)_value; /* Free-space strategy */
+ const uint8_t **pp = (const uint8_t **)_pp;
+
+ FUNC_ENTER_STATIC_NOERR
+
+ /* Sanity checks */
+ HDassert(pp);
+ HDassert(*pp);
+ HDassert(strategy);
+
+ /* Decode free-space strategy */
+ *strategy = (H5F_fspace_strategy_t)*(*pp)++;
+
+ FUNC_LEAVE_NOAPI(SUCCEED)
+} /* end H5P__fcrt_fspace_strategy_dec() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pset_file_space_page_size
+ *
+ * Purpose: Sets the file space page size for paged aggregation.
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; August 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pset_file_space_page_size(hid_t plist_id, hsize_t fsp_size)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "ih", plist_id, fsp_size);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id,H5P_FILE_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ if(fsp_size < H5F_FILE_SPACE_PAGE_SIZE_MIN)
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "cannot set file space page size to less than 512")
+
+ /* Set the value*/
+ if(H5P_set(plist, H5F_CRT_FILE_SPACE_PAGE_SIZE_NAME, &fsp_size) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't set file space page size")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pset_file_space_page_size() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: H5Pget_file_space_page_size
+ *
+ * Purpose: Retrieves the file space page size for aggregating small metadata
+ * or raw data in the parameter "fsp_size".
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Vailin Choi; August 2012
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+H5Pget_file_space_page_size(hid_t plist_id, hsize_t *fsp_size)
+{
+ H5P_genplist_t *plist; /* Property list pointer */
+ herr_t ret_value = SUCCEED; /* Return value */
+
+ FUNC_ENTER_API(FAIL)
+ H5TRACE2("e", "i*h", plist_id, fsp_size);
+
+ /* Get the plist structure */
+ if(NULL == (plist = H5P_object_verify(plist_id,H5P_FILE_CREATE)))
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
+
+ /* Get value */
+ if(fsp_size)
+ if(H5P_get(plist, H5F_CRT_FILE_SPACE_PAGE_SIZE_NAME, fsp_size) < 0)
+ HGOTO_ERROR(H5E_PLIST, H5E_CANTGET, FAIL, "can't get file space page size")
+
+done:
+ FUNC_LEAVE_API(ret_value)
+} /* H5Pget_file_space_page_size() */
diff --git a/src/H5Pfmpl.c b/src/H5Pfmpl.c
index de9fa3b..e858a79 100644
--- a/src/H5Pfmpl.c
+++ b/src/H5Pfmpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Pgcpl.c b/src/H5Pgcpl.c
index f028f7a..6f1fab1 100644
--- a/src/H5Pgcpl.c
+++ b/src/H5Pgcpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Pint.c b/src/H5Pint.c
index 8e9a9bc..fe17a19 100644
--- a/src/H5Pint.c
+++ b/src/H5Pint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5Plapl.c b/src/H5Plapl.c
index a0ec7f1..18b81ac 100644
--- a/src/H5Plapl.c
+++ b/src/H5Plapl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Plcpl.c b/src/H5Plcpl.c
index f6e7793..6508a82 100644
--- a/src/H5Plcpl.c
+++ b/src/H5Plcpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Pmodule.h b/src/H5Pmodule.h
index ddf7c0f..d5c471a 100644
--- a/src/H5Pmodule.h
+++ b/src/H5Pmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Pocpl.c b/src/H5Pocpl.c
index 27044d5..0393f7f 100644
--- a/src/H5Pocpl.c
+++ b/src/H5Pocpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Pocpypl.c b/src/H5Pocpypl.c
index faa2a04..47bba05 100644
--- a/src/H5Pocpypl.c
+++ b/src/H5Pocpypl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Ppkg.h b/src/H5Ppkg.h
index 7d29f3d..13463ae 100644
--- a/src/H5Ppkg.h
+++ b/src/H5Ppkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Pprivate.h b/src/H5Pprivate.h
index 29fb919..a468464 100644
--- a/src/H5Pprivate.h
+++ b/src/H5Pprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Ppublic.h b/src/H5Ppublic.h
index c736d7b..55b3877 100644
--- a/src/H5Ppublic.h
+++ b/src/H5Ppublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -300,8 +298,10 @@ H5_DLL herr_t H5Pset_shared_mesg_index(hid_t plist_id, unsigned index_num, unsig
H5_DLL herr_t H5Pget_shared_mesg_index(hid_t plist_id, unsigned index_num, unsigned *mesg_type_flags, unsigned *min_mesg_size);
H5_DLL herr_t H5Pset_shared_mesg_phase_change(hid_t plist_id, unsigned max_list, unsigned min_btree);
H5_DLL herr_t H5Pget_shared_mesg_phase_change(hid_t plist_id, unsigned *max_list, unsigned *min_btree);
-H5_DLL herr_t H5Pset_file_space(hid_t plist_id, H5F_file_space_type_t strategy, hsize_t threshold);
-H5_DLL herr_t H5Pget_file_space(hid_t plist_id, H5F_file_space_type_t *strategy, hsize_t *threshold);
+H5_DLL herr_t H5Pset_file_space_strategy(hid_t plist_id, H5F_fspace_strategy_t strategy, hbool_t persist, hsize_t threshold);
+H5_DLL herr_t H5Pget_file_space_strategy(hid_t plist_id, H5F_fspace_strategy_t *strategy, hbool_t *persist, hsize_t *threshold);
+H5_DLL herr_t H5Pset_file_space_page_size(hid_t plist_id, hsize_t fsp_size);
+H5_DLL herr_t H5Pget_file_space_page_size(hid_t plist_id, hsize_t *fsp_size);
/* File access property list (FAPL) routines */
H5_DLL herr_t H5Pset_alignment(hid_t fapl_id, hsize_t threshold,
@@ -365,6 +365,10 @@ H5_DLL herr_t H5Pget_all_coll_metadata_ops(hid_t plist_id, hbool_t *is_collectiv
H5_DLL herr_t H5Pset_coll_metadata_write(hid_t plist_id, hbool_t is_collective);
H5_DLL herr_t H5Pget_coll_metadata_write(hid_t plist_id, hbool_t *is_collective);
#endif /* H5_HAVE_PARALLEL */
+H5_DLL herr_t H5Pset_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr);
+H5_DLL herr_t H5Pget_mdc_image_config(hid_t plist_id, H5AC_cache_image_config_t *config_ptr /*out*/);
+H5_DLL herr_t H5Pset_page_buffer_size(hid_t plist_id, size_t buf_size, unsigned min_meta_per, unsigned min_raw_per);
+H5_DLL herr_t H5Pget_page_buffer_size(hid_t plist_id, size_t *buf_size, unsigned *min_meta_per, unsigned *min_raw_per);
/* Dataset creation property list (DCPL) routines */
H5_DLL herr_t H5Pset_layout(hid_t plist_id, H5D_layout_t layout);
@@ -532,6 +536,8 @@ H5_DLL herr_t H5Pget_filter_by_id1(hid_t plist_id, H5Z_filter_t id,
H5_DLL herr_t H5Pget_version(hid_t plist_id, unsigned *boot/*out*/,
unsigned *freelist/*out*/, unsigned *stab/*out*/,
unsigned *shhdr/*out*/);
+H5_DLL herr_t H5Pset_file_space(hid_t plist_id, H5F_file_space_type_t strategy, hsize_t threshold);
+H5_DLL herr_t H5Pget_file_space(hid_t plist_id, H5F_file_space_type_t *strategy, hsize_t *threshold);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
#ifdef __cplusplus
diff --git a/src/H5Pstrcpl.c b/src/H5Pstrcpl.c
index 5a09cd7..fb91356 100644
--- a/src/H5Pstrcpl.c
+++ b/src/H5Pstrcpl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Ptest.c b/src/H5Ptest.c
index f6cc97e..475a164 100644
--- a/src/H5Ptest.c
+++ b/src/H5Ptest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5R.c b/src/H5R.c
index 43b0a91..73c1d55 100644
--- a/src/H5R.c
+++ b/src/H5R.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5RS.c b/src/H5RS.c
index 5bbdabd..0a3fff0 100644
--- a/src/H5RS.c
+++ b/src/H5RS.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5RSprivate.h b/src/H5RSprivate.h
index 757e0e4..f69624a 100644
--- a/src/H5RSprivate.h
+++ b/src/H5RSprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Rdeprec.c b/src/H5Rdeprec.c
index 9461327..109bbb4 100644
--- a/src/H5Rdeprec.c
+++ b/src/H5Rdeprec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Rmodule.h b/src/H5Rmodule.h
index 6799483..2eaf050 100644
--- a/src/H5Rmodule.h
+++ b/src/H5Rmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Rpkg.h b/src/H5Rpkg.h
index 8ed8d65..6d5036b 100644
--- a/src/H5Rpkg.h
+++ b/src/H5Rpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Rprivate.h b/src/H5Rprivate.h
index 60df636..7efa225 100644
--- a/src/H5Rprivate.h
+++ b/src/H5Rprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Rpublic.h b/src/H5Rpublic.h
index e990661..446b7cd 100644
--- a/src/H5Rpublic.h
+++ b/src/H5Rpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5S.c b/src/H5S.c
index 2fab71a..9ac40a7 100644
--- a/src/H5S.c
+++ b/src/H5S.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -1546,7 +1544,7 @@ H5Sencode(hid_t obj_id, void *buf, size_t *nalloc)
H5TRACE3("e", "i*x*z", obj_id, buf, nalloc);
/* Check argument and retrieve object */
- if (NULL==(dspace=(H5S_t *)H5I_object_verify(obj_id, H5I_DATASPACE)))
+ if (NULL == (dspace = (H5S_t *)H5I_object_verify(obj_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a dataspace")
if(H5S_encode(dspace, (unsigned char **)&buf, nalloc)<0)
diff --git a/src/H5SL.c b/src/H5SL.c
index f9d7654..c0934ca 100644
--- a/src/H5SL.c
+++ b/src/H5SL.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5SLmodule.h b/src/H5SLmodule.h
index d5bfbc2..34f08a1 100644
--- a/src/H5SLmodule.h
+++ b/src/H5SLmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5SLprivate.h b/src/H5SLprivate.h
index 856099b..1393a25 100644
--- a/src/H5SLprivate.h
+++ b/src/H5SLprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5SM.c b/src/H5SM.c
index e557c0e..d5ede7e 100644
--- a/src/H5SM.c
+++ b/src/H5SM.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
@@ -144,7 +142,7 @@ H5SM_init(H5F_t *f, H5P_genplist_t * fc_plist, const H5O_loc_t *ext_loc, hid_t d
HDassert(!H5F_addr_defined(H5F_SOHM_ADDR(f)));
/* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_US, &dxpl, &orig_ring) < 0)
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_USER, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTSET, FAIL, "unable to set ring value")
/* Initialize master table */
@@ -2021,7 +2019,7 @@ H5SM_get_info(const H5O_loc_t *ext_loc, H5P_genplist_t *fc_plist, hid_t dxpl_id)
cache_udata.f = f;
/* Set the ring type in the DXPL */
- if(H5AC_set_ring(dxpl_id, H5AC_RING_US, &dxpl, &orig_ring) < 0)
+ if(H5AC_set_ring(dxpl_id, H5AC_RING_USER, &dxpl, &orig_ring) < 0)
HGOTO_ERROR(H5E_SOHM, H5E_CANTSET, FAIL, "unable to set ring value")
/* Read the rest of the SOHM table information from the cache */
diff --git a/src/H5SMbtree2.c b/src/H5SMbtree2.c
index 0110c1e..f0c4963 100644
--- a/src/H5SMbtree2.c
+++ b/src/H5SMbtree2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5SMcache.c b/src/H5SMcache.c
index 455dd1a..f0b469a 100644
--- a/src/H5SMcache.c
+++ b/src/H5SMcache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5SMmessage.c b/src/H5SMmessage.c
index 92b6a75..7df9f8f 100644
--- a/src/H5SMmessage.c
+++ b/src/H5SMmessage.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5SMmodule.h b/src/H5SMmodule.h
index b6991b6..656c7dd 100644
--- a/src/H5SMmodule.h
+++ b/src/H5SMmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5SMpkg.h b/src/H5SMpkg.h
index 3b13e23..6dea7ae 100644
--- a/src/H5SMpkg.h
+++ b/src/H5SMpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -256,8 +254,6 @@ H5FL_ARR_EXTERN(H5SM_index_header_t);
H5FL_EXTERN(H5SM_list_t);
H5FL_ARR_EXTERN(H5SM_sohm_t);
-H5_DLLVAR const H5AC_class_t H5AC_SOHM_TABLE[1];
-H5_DLLVAR const H5AC_class_t H5AC_SOHM_LIST[1];
H5_DLLVAR const H5B2_class_t H5SM_INDEX[1];
/****************************/
diff --git a/src/H5SMprivate.h b/src/H5SMprivate.h
index 57afacf..8f9f533 100644
--- a/src/H5SMprivate.h
+++ b/src/H5SMprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5SMtest.c b/src/H5SMtest.c
index 798203d..6a4b63a 100644
--- a/src/H5SMtest.c
+++ b/src/H5SMtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5ST.c b/src/H5ST.c
index 09c4600..dd5b63c 100644
--- a/src/H5ST.c
+++ b/src/H5ST.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* TERNARY SEARCH TREE ALGS
diff --git a/src/H5STprivate.h b/src/H5STprivate.h
index 9b49b07..07e9afe 100644
--- a/src/H5STprivate.h
+++ b/src/H5STprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Sall.c b/src/H5Sall.c
index fb6b45f..710727b 100644
--- a/src/H5Sall.c
+++ b/src/H5Sall.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Sdbg.c b/src/H5Sdbg.c
index b69604c..c9103f7 100644
--- a/src/H5Sdbg.c
+++ b/src/H5Sdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Shyper.c b/src/H5Shyper.c
index 5231c6e..9263cd8 100644
--- a/src/H5Shyper.c
+++ b/src/H5Shyper.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -7573,7 +7571,7 @@ H5Sselect_hyperslab(hid_t space_id, H5S_seloper_t op, const hsize_t start[],
H5TRACE6("e", "iSs*h*h*h*h", space_id, op, start, stride, count, block);
/* Check args */
- if (NULL == (space=H5I_object_verify(space_id, H5I_DATASPACE)))
+ if (NULL == (space = (H5S_t *)H5I_object_verify(space_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
if (H5S_SCALAR==H5S_GET_EXTENT_TYPE(space))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "hyperslab doesn't support H5S_SCALAR space")
@@ -7642,7 +7640,7 @@ H5Scombine_hyperslab(hid_t space_id, H5S_seloper_t op, const hsize_t start[],
H5TRACE6("i", "iSs*h*h*h*h", space_id, op, start, stride, count, block);
/* Check args */
- if (NULL == (space=H5I_object_verify(space_id, H5I_DATASPACE)))
+ if (NULL == (space = (H5S_t *)H5I_object_verify(space_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
if(start==NULL || count==NULL)
HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "hyperslab not specified")
@@ -7770,9 +7768,9 @@ H5Scombine_select(hid_t space1_id, H5S_seloper_t op, hid_t space2_id)
H5TRACE3("i", "iSsi", space1_id, op, space2_id);
/* Check args */
- if (NULL == (space1=H5I_object_verify(space1_id, H5I_DATASPACE)))
+ if (NULL == (space1 = (H5S_t *)H5I_object_verify(space1_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
- if (NULL == (space2=H5I_object_verify(space2_id, H5I_DATASPACE)))
+ if (NULL == (space2 = (H5S_t *)H5I_object_verify(space2_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
if(!(op>H5S_SELECT_NOOP && op<H5S_SELECT_INVALID))
HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "invalid selection operation")
@@ -7898,9 +7896,9 @@ H5Sselect_select(hid_t space1_id, H5S_seloper_t op, hid_t space2_id)
H5TRACE3("e", "iSsi", space1_id, op, space2_id);
/* Check args */
- if (NULL == (space1=H5I_object_verify(space1_id, H5I_DATASPACE)))
+ if (NULL == (space1 = (H5S_t *)H5I_object_verify(space1_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
- if (NULL == (space2=H5I_object_verify(space2_id, H5I_DATASPACE)))
+ if (NULL == (space2 = (H5S_t *)H5I_object_verify(space2_id, H5I_DATASPACE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data space")
if(!(op>H5S_SELECT_NOOP && op<H5S_SELECT_INVALID))
HGOTO_ERROR(H5E_ARGS, H5E_UNSUPPORTED, FAIL, "invalid selection operation")
diff --git a/src/H5Smodule.h b/src/H5Smodule.h
index d4d94f2..962f0a2 100644
--- a/src/H5Smodule.h
+++ b/src/H5Smodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Smpio.c b/src/H5Smpio.c
index 1f97bc8..c24c455 100644
--- a/src/H5Smpio.c
+++ b/src/H5Smpio.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Snone.c b/src/H5Snone.c
index 3492325..104b0bb 100644
--- a/src/H5Snone.c
+++ b/src/H5Snone.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Spkg.h b/src/H5Spkg.h
index e57650a..315af29 100644
--- a/src/H5Spkg.h
+++ b/src/H5Spkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Spoint.c b/src/H5Spoint.c
index 1531bac..251a063 100644
--- a/src/H5Spoint.c
+++ b/src/H5Spoint.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Sprivate.h b/src/H5Sprivate.h
index d00b5be..60e0630 100644
--- a/src/H5Sprivate.h
+++ b/src/H5Sprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Spublic.h b/src/H5Spublic.h
index 721c4bf..5ed6249 100644
--- a/src/H5Spublic.h
+++ b/src/H5Spublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Sselect.c b/src/H5Sselect.c
index 2968bed..c34e1cc 100644
--- a/src/H5Sselect.c
+++ b/src/H5Sselect.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.ued>
diff --git a/src/H5Stest.c b/src/H5Stest.c
index e1f4b61..a7bee2b 100644
--- a/src/H5Stest.c
+++ b/src/H5Stest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/src/H5T.c b/src/H5T.c
index 0fb8b91..a525cd5 100644
--- a/src/H5T.c
+++ b/src/H5T.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -24,24 +22,24 @@
/* Module Setup */
/****************/
-#include "H5Tmodule.h" /* This source code file is part of the H5T module */
+#include "H5Tmodule.h" /* This source code file is part of the H5T module */
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
-#include "H5ACprivate.h" /* Metadata cache */
-#include "H5Dprivate.h" /* Datasets */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5Fprivate.h" /* Files */
-#include "H5FLprivate.h" /* Free Lists */
-#include "H5FOprivate.h" /* File objects */
-#include "H5Gprivate.h" /* Groups */
-#include "H5Iprivate.h" /* IDs */
-#include "H5MMprivate.h" /* Memory management */
-#include "H5Pprivate.h" /* Property lists */
-#include "H5Tpkg.h" /* Datatypes */
+#include "H5private.h" /* Generic Functions */
+#include "H5ACprivate.h" /* Metadata cache */
+#include "H5Dprivate.h" /* Datasets */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fprivate.h" /* Files */
+#include "H5FLprivate.h" /* Free Lists */
+#include "H5FOprivate.h" /* File objects */
+#include "H5Gprivate.h" /* Groups */
+#include "H5Iprivate.h" /* IDs */
+#include "H5MMprivate.h" /* Memory management */
+#include "H5Pprivate.h" /* Property lists */
+#include "H5Tpkg.h" /* Datatypes */
/* Check for header needed for SGI floating-point code */
#ifdef H5_HAVE_SYS_FPU_H
@@ -65,209 +63,222 @@
*/
/* Define the code template for bitfields for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_BITFIELD_CORE { \
- dt->shared->type = H5T_BITFIELD; \
+#define H5T_INIT_TYPE_BITFIELD_CORE { \
+ dt->shared->type = H5T_BITFIELD; \
+}
+
+#define H5T_INIT_TYPE_BITFIELD_COMMON(ENDIANNESS) { \
+ H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
+ H5T_INIT_TYPE_BITFIELD_CORE; \
+}
+
+#define H5T_INIT_TYPE_BITFIELDLE_CORE { \
+ H5T_INIT_TYPE_BITFIELD_COMMON(H5T_ORDER_LE) \
+}
+
+#define H5T_INIT_TYPE_BITFIELDBE_CORE { \
+ H5T_INIT_TYPE_BITFIELD_COMMON(H5T_ORDER_BE) \
}
/* Define the code template for times for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_TIME_CORE { \
- dt->shared->type = H5T_TIME; \
+#define H5T_INIT_TYPE_TIME_CORE { \
+ dt->shared->type = H5T_TIME; \
}
/* Define the code template for types which reset the offset for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_OFFSET_CORE { \
- dt->shared->u.atomic.offset = 0; \
+#define H5T_INIT_TYPE_OFFSET_CORE { \
+ dt->shared->u.atomic.offset = 0; \
}
/* Define common code for all numeric types (floating-point & int, signed & unsigned) */
-#define H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) { \
- dt->shared->u.atomic.order = ENDIANNESS; \
- dt->shared->u.atomic.offset = 0; \
- dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO; \
- dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO; \
+#define H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) { \
+ dt->shared->u.atomic.order = ENDIANNESS; \
+ dt->shared->u.atomic.offset = 0; \
+ dt->shared->u.atomic.lsb_pad = H5T_PAD_ZERO; \
+ dt->shared->u.atomic.msb_pad = H5T_PAD_ZERO; \
}
/* Define the code templates for standard floats for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_FLOAT_COMMON(ENDIANNESS) { \
- H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
- dt->shared->u.atomic.u.f.sign = 31; \
- dt->shared->u.atomic.u.f.epos = 23; \
- dt->shared->u.atomic.u.f.esize = 8; \
- dt->shared->u.atomic.u.f.ebias = 0x7f; \
- dt->shared->u.atomic.u.f.mpos = 0; \
- dt->shared->u.atomic.u.f.msize = 23; \
- dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED; \
- dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO; \
+#define H5T_INIT_TYPE_FLOAT_COMMON(ENDIANNESS) { \
+ H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
+ dt->shared->u.atomic.u.f.sign = 31; \
+ dt->shared->u.atomic.u.f.epos = 23; \
+ dt->shared->u.atomic.u.f.esize = 8; \
+ dt->shared->u.atomic.u.f.ebias = 0x7f; \
+ dt->shared->u.atomic.u.f.mpos = 0; \
+ dt->shared->u.atomic.u.f.msize = 23; \
+ dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED; \
+ dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO; \
}
-#define H5T_INIT_TYPE_FLOATLE_CORE { \
- H5T_INIT_TYPE_FLOAT_COMMON(H5T_ORDER_LE) \
+#define H5T_INIT_TYPE_FLOATLE_CORE { \
+ H5T_INIT_TYPE_FLOAT_COMMON(H5T_ORDER_LE) \
}
-#define H5T_INIT_TYPE_FLOATBE_CORE { \
- H5T_INIT_TYPE_FLOAT_COMMON(H5T_ORDER_BE) \
+#define H5T_INIT_TYPE_FLOATBE_CORE { \
+ H5T_INIT_TYPE_FLOAT_COMMON(H5T_ORDER_BE) \
}
/* Define the code templates for standard doubles for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_DOUBLE_COMMON(ENDIANNESS) { \
- H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
- dt->shared->u.atomic.u.f.sign = 63; \
- dt->shared->u.atomic.u.f.epos = 52; \
- dt->shared->u.atomic.u.f.esize = 11; \
- dt->shared->u.atomic.u.f.ebias = 0x03ff; \
- dt->shared->u.atomic.u.f.mpos = 0; \
- dt->shared->u.atomic.u.f.msize = 52; \
- dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED; \
- dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO; \
+#define H5T_INIT_TYPE_DOUBLE_COMMON(ENDIANNESS) { \
+ H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
+ dt->shared->u.atomic.u.f.sign = 63; \
+ dt->shared->u.atomic.u.f.epos = 52; \
+ dt->shared->u.atomic.u.f.esize = 11; \
+ dt->shared->u.atomic.u.f.ebias = 0x03ff; \
+ dt->shared->u.atomic.u.f.mpos = 0; \
+ dt->shared->u.atomic.u.f.msize = 52; \
+ dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED; \
+ dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO; \
}
-#define H5T_INIT_TYPE_DOUBLELE_CORE { \
- H5T_INIT_TYPE_DOUBLE_COMMON(H5T_ORDER_LE) \
+#define H5T_INIT_TYPE_DOUBLELE_CORE { \
+ H5T_INIT_TYPE_DOUBLE_COMMON(H5T_ORDER_LE) \
}
-#define H5T_INIT_TYPE_DOUBLEBE_CORE { \
- H5T_INIT_TYPE_DOUBLE_COMMON(H5T_ORDER_BE) \
+#define H5T_INIT_TYPE_DOUBLEBE_CORE { \
+ H5T_INIT_TYPE_DOUBLE_COMMON(H5T_ORDER_BE) \
}
/* Define the code templates for VAX float for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_FLOATVAX_CORE { \
- H5T_INIT_TYPE_NUM_COMMON(H5T_ORDER_VAX) \
- dt->shared->u.atomic.u.f.sign = 31; \
- dt->shared->u.atomic.u.f.epos = 23; \
- dt->shared->u.atomic.u.f.esize = 8; \
- dt->shared->u.atomic.u.f.ebias = 0x81; \
- dt->shared->u.atomic.u.f.mpos = 0; \
- dt->shared->u.atomic.u.f.msize = 23; \
- dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED; \
- dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO; \
- dt->shared->version = H5O_DTYPE_VERSION_3; \
+#define H5T_INIT_TYPE_FLOATVAX_CORE { \
+ H5T_INIT_TYPE_NUM_COMMON(H5T_ORDER_VAX) \
+ dt->shared->u.atomic.u.f.sign = 31; \
+ dt->shared->u.atomic.u.f.epos = 23; \
+ dt->shared->u.atomic.u.f.esize = 8; \
+ dt->shared->u.atomic.u.f.ebias = 0x81; \
+ dt->shared->u.atomic.u.f.mpos = 0; \
+ dt->shared->u.atomic.u.f.msize = 23; \
+ dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED; \
+ dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO; \
+ dt->shared->version = H5O_DTYPE_VERSION_3; \
}
/* Define the code templates for VAX double for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_DOUBLEVAX_CORE { \
- H5T_INIT_TYPE_NUM_COMMON(H5T_ORDER_VAX) \
- dt->shared->u.atomic.u.f.sign = 63; \
- dt->shared->u.atomic.u.f.epos = 52; \
- dt->shared->u.atomic.u.f.esize = 11; \
- dt->shared->u.atomic.u.f.ebias = 0x0401; \
- dt->shared->u.atomic.u.f.mpos = 0; \
- dt->shared->u.atomic.u.f.msize = 52; \
- dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED; \
- dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO; \
- dt->shared->version = H5O_DTYPE_VERSION_3; \
+#define H5T_INIT_TYPE_DOUBLEVAX_CORE { \
+ H5T_INIT_TYPE_NUM_COMMON(H5T_ORDER_VAX) \
+ dt->shared->u.atomic.u.f.sign = 63; \
+ dt->shared->u.atomic.u.f.epos = 52; \
+ dt->shared->u.atomic.u.f.esize = 11; \
+ dt->shared->u.atomic.u.f.ebias = 0x0401; \
+ dt->shared->u.atomic.u.f.mpos = 0; \
+ dt->shared->u.atomic.u.f.msize = 52; \
+ dt->shared->u.atomic.u.f.norm = H5T_NORM_IMPLIED; \
+ dt->shared->u.atomic.u.f.pad = H5T_PAD_ZERO; \
+ dt->shared->version = H5O_DTYPE_VERSION_3; \
}
/* Define the code templates for standard signed integers for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_SINT_COMMON(ENDIANNESS) { \
- H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
- dt->shared->u.atomic.u.i.sign = H5T_SGN_2; \
+#define H5T_INIT_TYPE_SINT_COMMON(ENDIANNESS) { \
+ H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_2; \
}
-#define H5T_INIT_TYPE_SINTLE_CORE { \
- H5T_INIT_TYPE_SINT_COMMON(H5T_ORDER_LE) \
+#define H5T_INIT_TYPE_SINTLE_CORE { \
+ H5T_INIT_TYPE_SINT_COMMON(H5T_ORDER_LE) \
}
-#define H5T_INIT_TYPE_SINTBE_CORE { \
- H5T_INIT_TYPE_SINT_COMMON(H5T_ORDER_BE) \
+#define H5T_INIT_TYPE_SINTBE_CORE { \
+ H5T_INIT_TYPE_SINT_COMMON(H5T_ORDER_BE) \
}
/* Define the code templates for standard unsigned integers for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_UINT_COMMON(ENDIANNESS) { \
- H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
- dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE; \
+#define H5T_INIT_TYPE_UINT_COMMON(ENDIANNESS) { \
+ H5T_INIT_TYPE_NUM_COMMON(ENDIANNESS) \
+ dt->shared->u.atomic.u.i.sign = H5T_SGN_NONE; \
}
-#define H5T_INIT_TYPE_UINTLE_CORE { \
- H5T_INIT_TYPE_UINT_COMMON(H5T_ORDER_LE) \
+#define H5T_INIT_TYPE_UINTLE_CORE { \
+ H5T_INIT_TYPE_UINT_COMMON(H5T_ORDER_LE) \
}
-#define H5T_INIT_TYPE_UINTBE_CORE { \
- H5T_INIT_TYPE_UINT_COMMON(H5T_ORDER_BE) \
+#define H5T_INIT_TYPE_UINTBE_CORE { \
+ H5T_INIT_TYPE_UINT_COMMON(H5T_ORDER_BE) \
}
/* Define a macro for common code for all newly allocate datatypes */
-#define H5T_INIT_TYPE_ALLOC_COMMON(TYPE) { \
- dt->sh_loc.type = H5O_SHARE_TYPE_UNSHARED; \
- dt->shared->type = TYPE; \
+#define H5T_INIT_TYPE_ALLOC_COMMON(TYPE) { \
+ dt->sh_loc.type = H5O_SHARE_TYPE_UNSHARED; \
+ dt->shared->type = TYPE; \
}
/* Define the code templates for opaque for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_OPAQ_CORE { \
- H5T_INIT_TYPE_ALLOC_COMMON(H5T_OPAQUE) \
- dt->shared->u.opaque.tag = H5MM_xstrdup(""); \
+#define H5T_INIT_TYPE_OPAQ_CORE { \
+ H5T_INIT_TYPE_ALLOC_COMMON(H5T_OPAQUE) \
+ dt->shared->u.opaque.tag = H5MM_xstrdup(""); \
}
/* Define the code templates for strings for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_STRING_COMMON { \
- H5T_INIT_TYPE_ALLOC_COMMON(H5T_STRING) \
- H5T_INIT_TYPE_NUM_COMMON(H5T_ORDER_NONE) \
- dt->shared->u.atomic.u.s.cset = H5F_DEFAULT_CSET; \
+#define H5T_INIT_TYPE_STRING_COMMON { \
+ H5T_INIT_TYPE_ALLOC_COMMON(H5T_STRING) \
+ H5T_INIT_TYPE_NUM_COMMON(H5T_ORDER_NONE) \
+ dt->shared->u.atomic.u.s.cset = H5F_DEFAULT_CSET; \
}
-#define H5T_INIT_TYPE_CSTRING_CORE { \
- H5T_INIT_TYPE_STRING_COMMON \
- dt->shared->u.atomic.u.s.pad = H5T_STR_NULLTERM; \
+#define H5T_INIT_TYPE_CSTRING_CORE { \
+ H5T_INIT_TYPE_STRING_COMMON \
+ dt->shared->u.atomic.u.s.pad = H5T_STR_NULLTERM; \
}
-#define H5T_INIT_TYPE_FORSTRING_CORE { \
- H5T_INIT_TYPE_STRING_COMMON \
- dt->shared->u.atomic.u.s.pad = H5T_STR_SPACEPAD; \
+#define H5T_INIT_TYPE_FORSTRING_CORE { \
+ H5T_INIT_TYPE_STRING_COMMON \
+ dt->shared->u.atomic.u.s.pad = H5T_STR_SPACEPAD; \
}
/* Define the code templates for references for the "GUTS" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_REF_COMMON { \
- H5T_INIT_TYPE_ALLOC_COMMON(H5T_REFERENCE) \
- H5T_INIT_TYPE_NUM_COMMON(H5T_ORDER_NONE) \
+#define H5T_INIT_TYPE_REF_COMMON { \
+ H5T_INIT_TYPE_ALLOC_COMMON(H5T_REFERENCE) \
+ H5T_INIT_TYPE_NUM_COMMON(H5T_ORDER_NONE) \
}
-#define H5T_INIT_TYPE_OBJREF_CORE { \
- H5T_INIT_TYPE_REF_COMMON \
- dt->shared->force_conv = TRUE; \
- dt->shared->u.atomic.u.r.rtype = H5R_OBJECT; \
- dt->shared->u.atomic.u.r.loc = H5T_LOC_MEMORY; \
+#define H5T_INIT_TYPE_OBJREF_CORE { \
+ H5T_INIT_TYPE_REF_COMMON \
+ dt->shared->force_conv = TRUE; \
+ dt->shared->u.atomic.u.r.rtype = H5R_OBJECT; \
+ dt->shared->u.atomic.u.r.loc = H5T_LOC_MEMORY; \
}
-#define H5T_INIT_TYPE_REGREF_CORE { \
- H5T_INIT_TYPE_REF_COMMON \
- dt->shared->u.atomic.u.r.rtype = H5R_DATASET_REGION; \
+#define H5T_INIT_TYPE_REGREF_CORE { \
+ H5T_INIT_TYPE_REF_COMMON \
+ dt->shared->u.atomic.u.r.rtype = H5R_DATASET_REGION; \
}
/* Define the code templates for the "SIZE_TMPL" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_SET_SIZE(SIZE) { \
- dt->shared->size = SIZE; \
- dt->shared->u.atomic.prec = 8 * SIZE; \
+#define H5T_INIT_TYPE_SET_SIZE(SIZE) { \
+ dt->shared->size = SIZE; \
+ dt->shared->u.atomic.prec = 8 * SIZE; \
}
-#define H5T_INIT_TYPE_NOSET_SIZE(SIZE) { \
+#define H5T_INIT_TYPE_NOSET_SIZE(SIZE) { \
}
/* Define the code templates for the "CRT_TMPL" in the H5T_INIT_TYPE macro */
-#define H5T_INIT_TYPE_COPY_CREATE(BASE) { \
- /* Base off of existing datatype */ \
- if(NULL == (dt = H5T_copy(BASE, H5T_COPY_TRANSIENT))) \
+#define H5T_INIT_TYPE_COPY_CREATE(BASE) { \
+ /* Base off of existing datatype */ \
+ if(NULL == (dt = H5T_copy(BASE, H5T_COPY_TRANSIENT))) \
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, FAIL, "duplicating base type failed") \
}
-#define H5T_INIT_TYPE_ALLOC_CREATE(BASE) { \
- /* Allocate new datatype info */ \
- if(NULL == (dt = H5T__alloc())) \
+#define H5T_INIT_TYPE_ALLOC_CREATE(BASE) { \
+ /* Allocate new datatype info */ \
+ if(NULL == (dt = H5T__alloc())) \
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "memory allocation failed") \
}
-#define H5T_INIT_TYPE(GUTS,GLOBAL,CRT_TMPL,BASE,SIZE_TMPL,SIZE) { \
- /* Get new datatype struct */ \
- H5_GLUE3(H5T_INIT_TYPE_,CRT_TMPL,_CREATE)(BASE) \
- \
- /* Adjust information for all types */ \
- dt->shared->state = H5T_STATE_IMMUTABLE; \
- H5_GLUE3(H5T_INIT_TYPE_,SIZE_TMPL,_SIZE)(SIZE) \
- \
- /* Adjust information for this type */ \
- H5_GLUE3(H5T_INIT_TYPE_, GUTS, _CORE) \
- \
- /* Atomize result */ \
- if((GLOBAL = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0) \
+#define H5T_INIT_TYPE(GUTS,GLOBAL,CRT_TMPL,BASE,SIZE_TMPL,SIZE) { \
+ /* Get new datatype struct */ \
+ H5_GLUE3(H5T_INIT_TYPE_,CRT_TMPL,_CREATE)(BASE) \
+ \
+ /* Adjust information for all types */ \
+ dt->shared->state = H5T_STATE_IMMUTABLE; \
+ H5_GLUE3(H5T_INIT_TYPE_,SIZE_TMPL,_SIZE)(SIZE) \
+ \
+ /* Adjust information for this type */ \
+ H5_GLUE3(H5T_INIT_TYPE_, GUTS, _CORE) \
+ \
+ /* Atomize result */ \
+ if((GLOBAL = H5I_register(H5I_DATATYPE, dt, FALSE)) < 0) \
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register datatype atom") \
}
@@ -281,9 +292,9 @@
/* Local Prototypes */
/********************/
static herr_t H5T_unregister(H5T_pers_t pers, const char *name, H5T_t *src,
- H5T_t *dst, H5T_conv_t func, hid_t dxpl_id);
+ H5T_t *dst, H5T_conv_t func, hid_t dxpl_id);
static herr_t H5T_register(H5T_pers_t pers, const char *name, H5T_t *src,
- H5T_t *dst, H5T_conv_t func, hid_t dxpl_id, hbool_t api_call);
+ H5T_t *dst, H5T_conv_t func, hid_t dxpl_id, hbool_t api_call);
static htri_t H5T_compiler_conv(H5T_t *src, H5T_t *dst);
static herr_t H5T_set_size(H5T_t *dt, size_t size);
@@ -310,103 +321,103 @@ hbool_t H5_PKG_INIT_VAR = FALSE;
* If more of these are added, the new ones must be added to the list of
* types to reset in H5T_term_package().
*/
-hid_t H5T_IEEE_F32BE_g = FAIL;
-hid_t H5T_IEEE_F32LE_g = FAIL;
-hid_t H5T_IEEE_F64BE_g = FAIL;
-hid_t H5T_IEEE_F64LE_g = FAIL;
-
-hid_t H5T_VAX_F32_g = FAIL;
-hid_t H5T_VAX_F64_g = FAIL;
-
-hid_t H5T_STD_I8BE_g = FAIL;
-hid_t H5T_STD_I8LE_g = FAIL;
-hid_t H5T_STD_I16BE_g = FAIL;
-hid_t H5T_STD_I16LE_g = FAIL;
-hid_t H5T_STD_I32BE_g = FAIL;
-hid_t H5T_STD_I32LE_g = FAIL;
-hid_t H5T_STD_I64BE_g = FAIL;
-hid_t H5T_STD_I64LE_g = FAIL;
-hid_t H5T_STD_U8BE_g = FAIL;
-hid_t H5T_STD_U8LE_g = FAIL;
-hid_t H5T_STD_U16BE_g = FAIL;
-hid_t H5T_STD_U16LE_g = FAIL;
-hid_t H5T_STD_U32BE_g = FAIL;
-hid_t H5T_STD_U32LE_g = FAIL;
-hid_t H5T_STD_U64BE_g = FAIL;
-hid_t H5T_STD_U64LE_g = FAIL;
-hid_t H5T_STD_B8BE_g = FAIL;
-hid_t H5T_STD_B8LE_g = FAIL;
-hid_t H5T_STD_B16BE_g = FAIL;
-hid_t H5T_STD_B16LE_g = FAIL;
-hid_t H5T_STD_B32BE_g = FAIL;
-hid_t H5T_STD_B32LE_g = FAIL;
-hid_t H5T_STD_B64BE_g = FAIL;
-hid_t H5T_STD_B64LE_g = FAIL;
-hid_t H5T_STD_REF_OBJ_g = FAIL;
-hid_t H5T_STD_REF_DSETREG_g = FAIL;
-
-hid_t H5T_UNIX_D32BE_g = FAIL;
-hid_t H5T_UNIX_D32LE_g = FAIL;
-hid_t H5T_UNIX_D64BE_g = FAIL;
-hid_t H5T_UNIX_D64LE_g = FAIL;
-
-hid_t H5T_C_S1_g = FAIL;
-
-hid_t H5T_FORTRAN_S1_g = FAIL;
-
-hid_t H5T_NATIVE_SCHAR_g = FAIL;
-hid_t H5T_NATIVE_UCHAR_g = FAIL;
-hid_t H5T_NATIVE_SHORT_g = FAIL;
-hid_t H5T_NATIVE_USHORT_g = FAIL;
-hid_t H5T_NATIVE_INT_g = FAIL;
-hid_t H5T_NATIVE_UINT_g = FAIL;
-hid_t H5T_NATIVE_LONG_g = FAIL;
-hid_t H5T_NATIVE_ULONG_g = FAIL;
-hid_t H5T_NATIVE_LLONG_g = FAIL;
-hid_t H5T_NATIVE_ULLONG_g = FAIL;
-hid_t H5T_NATIVE_FLOAT_g = FAIL;
-hid_t H5T_NATIVE_DOUBLE_g = FAIL;
+hid_t H5T_IEEE_F32BE_g = FAIL;
+hid_t H5T_IEEE_F32LE_g = FAIL;
+hid_t H5T_IEEE_F64BE_g = FAIL;
+hid_t H5T_IEEE_F64LE_g = FAIL;
+
+hid_t H5T_VAX_F32_g = FAIL;
+hid_t H5T_VAX_F64_g = FAIL;
+
+hid_t H5T_STD_I8BE_g = FAIL;
+hid_t H5T_STD_I8LE_g = FAIL;
+hid_t H5T_STD_I16BE_g = FAIL;
+hid_t H5T_STD_I16LE_g = FAIL;
+hid_t H5T_STD_I32BE_g = FAIL;
+hid_t H5T_STD_I32LE_g = FAIL;
+hid_t H5T_STD_I64BE_g = FAIL;
+hid_t H5T_STD_I64LE_g = FAIL;
+hid_t H5T_STD_U8BE_g = FAIL;
+hid_t H5T_STD_U8LE_g = FAIL;
+hid_t H5T_STD_U16BE_g = FAIL;
+hid_t H5T_STD_U16LE_g = FAIL;
+hid_t H5T_STD_U32BE_g = FAIL;
+hid_t H5T_STD_U32LE_g = FAIL;
+hid_t H5T_STD_U64BE_g = FAIL;
+hid_t H5T_STD_U64LE_g = FAIL;
+hid_t H5T_STD_B8BE_g = FAIL;
+hid_t H5T_STD_B8LE_g = FAIL;
+hid_t H5T_STD_B16BE_g = FAIL;
+hid_t H5T_STD_B16LE_g = FAIL;
+hid_t H5T_STD_B32BE_g = FAIL;
+hid_t H5T_STD_B32LE_g = FAIL;
+hid_t H5T_STD_B64BE_g = FAIL;
+hid_t H5T_STD_B64LE_g = FAIL;
+hid_t H5T_STD_REF_OBJ_g = FAIL;
+hid_t H5T_STD_REF_DSETREG_g = FAIL;
+
+hid_t H5T_UNIX_D32BE_g = FAIL;
+hid_t H5T_UNIX_D32LE_g = FAIL;
+hid_t H5T_UNIX_D64BE_g = FAIL;
+hid_t H5T_UNIX_D64LE_g = FAIL;
+
+hid_t H5T_C_S1_g = FAIL;
+
+hid_t H5T_FORTRAN_S1_g = FAIL;
+
+hid_t H5T_NATIVE_SCHAR_g = FAIL;
+hid_t H5T_NATIVE_UCHAR_g = FAIL;
+hid_t H5T_NATIVE_SHORT_g = FAIL;
+hid_t H5T_NATIVE_USHORT_g = FAIL;
+hid_t H5T_NATIVE_INT_g = FAIL;
+hid_t H5T_NATIVE_UINT_g = FAIL;
+hid_t H5T_NATIVE_LONG_g = FAIL;
+hid_t H5T_NATIVE_ULONG_g = FAIL;
+hid_t H5T_NATIVE_LLONG_g = FAIL;
+hid_t H5T_NATIVE_ULLONG_g = FAIL;
+hid_t H5T_NATIVE_FLOAT_g = FAIL;
+hid_t H5T_NATIVE_DOUBLE_g = FAIL;
#if H5_SIZEOF_LONG_DOUBLE !=0
-hid_t H5T_NATIVE_LDOUBLE_g = FAIL;
+hid_t H5T_NATIVE_LDOUBLE_g = FAIL;
#endif
-hid_t H5T_NATIVE_B8_g = FAIL;
-hid_t H5T_NATIVE_B16_g = FAIL;
-hid_t H5T_NATIVE_B32_g = FAIL;
-hid_t H5T_NATIVE_B64_g = FAIL;
-hid_t H5T_NATIVE_OPAQUE_g = FAIL;
-hid_t H5T_NATIVE_HADDR_g = FAIL;
-hid_t H5T_NATIVE_HSIZE_g = FAIL;
-hid_t H5T_NATIVE_HSSIZE_g = FAIL;
-hid_t H5T_NATIVE_HERR_g = FAIL;
-hid_t H5T_NATIVE_HBOOL_g = FAIL;
-
-hid_t H5T_NATIVE_INT8_g = FAIL;
-hid_t H5T_NATIVE_UINT8_g = FAIL;
-hid_t H5T_NATIVE_INT_LEAST8_g = FAIL;
-hid_t H5T_NATIVE_UINT_LEAST8_g = FAIL;
-hid_t H5T_NATIVE_INT_FAST8_g = FAIL;
-hid_t H5T_NATIVE_UINT_FAST8_g = FAIL;
-
-hid_t H5T_NATIVE_INT16_g = FAIL;
-hid_t H5T_NATIVE_UINT16_g = FAIL;
-hid_t H5T_NATIVE_INT_LEAST16_g = FAIL;
-hid_t H5T_NATIVE_UINT_LEAST16_g = FAIL;
-hid_t H5T_NATIVE_INT_FAST16_g = FAIL;
-hid_t H5T_NATIVE_UINT_FAST16_g = FAIL;
-
-hid_t H5T_NATIVE_INT32_g = FAIL;
-hid_t H5T_NATIVE_UINT32_g = FAIL;
-hid_t H5T_NATIVE_INT_LEAST32_g = FAIL;
-hid_t H5T_NATIVE_UINT_LEAST32_g = FAIL;
-hid_t H5T_NATIVE_INT_FAST32_g = FAIL;
-hid_t H5T_NATIVE_UINT_FAST32_g = FAIL;
-
-hid_t H5T_NATIVE_INT64_g = FAIL;
-hid_t H5T_NATIVE_UINT64_g = FAIL;
-hid_t H5T_NATIVE_INT_LEAST64_g = FAIL;
-hid_t H5T_NATIVE_UINT_LEAST64_g = FAIL;
-hid_t H5T_NATIVE_INT_FAST64_g = FAIL;
-hid_t H5T_NATIVE_UINT_FAST64_g = FAIL;
+hid_t H5T_NATIVE_B8_g = FAIL;
+hid_t H5T_NATIVE_B16_g = FAIL;
+hid_t H5T_NATIVE_B32_g = FAIL;
+hid_t H5T_NATIVE_B64_g = FAIL;
+hid_t H5T_NATIVE_OPAQUE_g = FAIL;
+hid_t H5T_NATIVE_HADDR_g = FAIL;
+hid_t H5T_NATIVE_HSIZE_g = FAIL;
+hid_t H5T_NATIVE_HSSIZE_g = FAIL;
+hid_t H5T_NATIVE_HERR_g = FAIL;
+hid_t H5T_NATIVE_HBOOL_g = FAIL;
+
+hid_t H5T_NATIVE_INT8_g = FAIL;
+hid_t H5T_NATIVE_UINT8_g = FAIL;
+hid_t H5T_NATIVE_INT_LEAST8_g = FAIL;
+hid_t H5T_NATIVE_UINT_LEAST8_g = FAIL;
+hid_t H5T_NATIVE_INT_FAST8_g = FAIL;
+hid_t H5T_NATIVE_UINT_FAST8_g = FAIL;
+
+hid_t H5T_NATIVE_INT16_g = FAIL;
+hid_t H5T_NATIVE_UINT16_g = FAIL;
+hid_t H5T_NATIVE_INT_LEAST16_g = FAIL;
+hid_t H5T_NATIVE_UINT_LEAST16_g = FAIL;
+hid_t H5T_NATIVE_INT_FAST16_g = FAIL;
+hid_t H5T_NATIVE_UINT_FAST16_g = FAIL;
+
+hid_t H5T_NATIVE_INT32_g = FAIL;
+hid_t H5T_NATIVE_UINT32_g = FAIL;
+hid_t H5T_NATIVE_INT_LEAST32_g = FAIL;
+hid_t H5T_NATIVE_UINT_LEAST32_g = FAIL;
+hid_t H5T_NATIVE_INT_FAST32_g = FAIL;
+hid_t H5T_NATIVE_UINT_FAST32_g = FAIL;
+
+hid_t H5T_NATIVE_INT64_g = FAIL;
+hid_t H5T_NATIVE_UINT64_g = FAIL;
+hid_t H5T_NATIVE_INT_LEAST64_g = FAIL;
+hid_t H5T_NATIVE_UINT_LEAST64_g = FAIL;
+hid_t H5T_NATIVE_INT_FAST64_g = FAIL;
+hid_t H5T_NATIVE_UINT_FAST64_g = FAIL;
/*
* Alignment constraints for native types. These are initialized at run time
@@ -414,45 +425,45 @@ hid_t H5T_NATIVE_UINT_FAST64_g = FAIL;
* datatype or C structures, which are different from the alignments for memory
* address below this group of variables.
*/
-size_t H5T_NATIVE_SCHAR_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_UCHAR_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_SHORT_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_USHORT_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_LONG_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_ULONG_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_LLONG_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_ULLONG_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_FLOAT_COMP_ALIGN_g = 0;
-size_t H5T_NATIVE_DOUBLE_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_SCHAR_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_UCHAR_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_SHORT_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_USHORT_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_LONG_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_ULONG_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_LLONG_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_ULLONG_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_FLOAT_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_DOUBLE_COMP_ALIGN_g = 0;
#if H5_SIZEOF_LONG_DOUBLE !=0
-size_t H5T_NATIVE_LDOUBLE_COMP_ALIGN_g = 0;
+size_t H5T_NATIVE_LDOUBLE_COMP_ALIGN_g = 0;
#endif
-size_t H5T_POINTER_COMP_ALIGN_g = 0;
-size_t H5T_HVL_COMP_ALIGN_g = 0;
-size_t H5T_HOBJREF_COMP_ALIGN_g = 0;
-size_t H5T_HDSETREGREF_COMP_ALIGN_g = 0;
+size_t H5T_POINTER_COMP_ALIGN_g = 0;
+size_t H5T_HVL_COMP_ALIGN_g = 0;
+size_t H5T_HOBJREF_COMP_ALIGN_g = 0;
+size_t H5T_HDSETREGREF_COMP_ALIGN_g = 0;
/*
* Alignment constraints for native types. These are initialized at run time
* in H5Tinit.c
*/
-size_t H5T_NATIVE_SCHAR_ALIGN_g = 0;
-size_t H5T_NATIVE_UCHAR_ALIGN_g = 0;
-size_t H5T_NATIVE_SHORT_ALIGN_g = 0;
-size_t H5T_NATIVE_USHORT_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_ALIGN_g = 0;
-size_t H5T_NATIVE_LONG_ALIGN_g = 0;
-size_t H5T_NATIVE_ULONG_ALIGN_g = 0;
-size_t H5T_NATIVE_LLONG_ALIGN_g = 0;
-size_t H5T_NATIVE_ULLONG_ALIGN_g = 0;
-size_t H5T_NATIVE_FLOAT_ALIGN_g = 0;
-size_t H5T_NATIVE_DOUBLE_ALIGN_g = 0;
+size_t H5T_NATIVE_SCHAR_ALIGN_g = 0;
+size_t H5T_NATIVE_UCHAR_ALIGN_g = 0;
+size_t H5T_NATIVE_SHORT_ALIGN_g = 0;
+size_t H5T_NATIVE_USHORT_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_ALIGN_g = 0;
+size_t H5T_NATIVE_LONG_ALIGN_g = 0;
+size_t H5T_NATIVE_ULONG_ALIGN_g = 0;
+size_t H5T_NATIVE_LLONG_ALIGN_g = 0;
+size_t H5T_NATIVE_ULLONG_ALIGN_g = 0;
+size_t H5T_NATIVE_FLOAT_ALIGN_g = 0;
+size_t H5T_NATIVE_DOUBLE_ALIGN_g = 0;
#if H5_SIZEOF_LONG_DOUBLE !=0
-size_t H5T_NATIVE_LDOUBLE_ALIGN_g = 0;
+size_t H5T_NATIVE_LDOUBLE_ALIGN_g = 0;
#endif
/*
@@ -460,40 +471,40 @@ size_t H5T_NATIVE_LDOUBLE_ALIGN_g = 0;
* H5Tinit.c if the types are provided by the system. Otherwise we set their
* values to 0 here (no alignment calculated).
*/
-size_t H5T_NATIVE_INT8_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT8_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_LEAST8_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_LEAST8_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_FAST8_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_FAST8_ALIGN_g = 0;
-
-size_t H5T_NATIVE_INT16_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT16_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_LEAST16_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_LEAST16_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_FAST16_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_FAST16_ALIGN_g = 0;
-
-size_t H5T_NATIVE_INT32_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT32_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_LEAST32_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_LEAST32_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_FAST32_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_FAST32_ALIGN_g = 0;
-
-size_t H5T_NATIVE_INT64_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT64_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_LEAST64_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_LEAST64_ALIGN_g = 0;
-size_t H5T_NATIVE_INT_FAST64_ALIGN_g = 0;
-size_t H5T_NATIVE_UINT_FAST64_ALIGN_g = 0;
+size_t H5T_NATIVE_INT8_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT8_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_LEAST8_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_LEAST8_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_FAST8_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_FAST8_ALIGN_g = 0;
+
+size_t H5T_NATIVE_INT16_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT16_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_LEAST16_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_LEAST16_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_FAST16_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_FAST16_ALIGN_g = 0;
+
+size_t H5T_NATIVE_INT32_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT32_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_LEAST32_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_LEAST32_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_FAST32_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_FAST32_ALIGN_g = 0;
+
+size_t H5T_NATIVE_INT64_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT64_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_LEAST64_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_LEAST64_ALIGN_g = 0;
+size_t H5T_NATIVE_INT_FAST64_ALIGN_g = 0;
+size_t H5T_NATIVE_UINT_FAST64_ALIGN_g = 0;
/* Useful floating-point values for conversion routines */
/* (+/- Inf for all floating-point types) */
-float H5T_NATIVE_FLOAT_POS_INF_g = 0.0f;
-float H5T_NATIVE_FLOAT_NEG_INF_g = 0.0f;
-double H5T_NATIVE_DOUBLE_POS_INF_g = (double)0.0f;
-double H5T_NATIVE_DOUBLE_NEG_INF_g = (double)0.0f;
+float H5T_NATIVE_FLOAT_POS_INF_g = 0.0f;
+float H5T_NATIVE_FLOAT_NEG_INF_g = 0.0f;
+double H5T_NATIVE_DOUBLE_POS_INF_g = (double)0.0f;
+double H5T_NATIVE_DOUBLE_NEG_INF_g = (double)0.0f;
/* Declare the free list for H5T_t's and H5T_shared_t's */
H5FL_DEFINE(H5T_t);
@@ -509,12 +520,12 @@ H5FL_DEFINE(H5T_shared_t);
* which is used as the key by which the `entries' array is sorted.
*/
static struct {
- int npaths; /*number of paths defined */
- size_t apaths; /*number of paths allocated */
- H5T_path_t **path; /*sorted array of path pointers */
- int nsoft; /*number of soft conversions defined */
- size_t asoft; /*number of soft conversions allocated */
- H5T_soft_t *soft; /*unsorted array of soft conversions */
+ int npaths; /*number of paths defined */
+ size_t apaths; /*number of paths allocated */
+ H5T_path_t **path; /*sorted array of path pointers */
+ int nsoft; /*number of soft conversions defined */
+ size_t asoft; /*number of soft conversions allocated */
+ H5T_soft_t *soft; /*unsorted array of soft conversions */
} H5T_g;
/* Declare the free list for H5T_path_t's */
@@ -522,10 +533,10 @@ H5FL_DEFINE_STATIC(H5T_path_t);
/* Datatype ID class */
static const H5I_class_t H5I_DATATYPE_CLS[1] = {{
- H5I_DATATYPE, /* ID class value */
- 0, /* Class flags */
- 8, /* # of reserved IDs for class */
- (H5I_free_t)H5T_close /* Callback routine for closing objects of this class */
+ H5I_DATATYPE, /* ID class value */
+ 0, /* Class flags */
+ 8, /* # of reserved IDs for class */
+ (H5I_free_t)H5T_close /* Callback routine for closing objects of this class */
}};
/* Flag indicating "top" of interface has been initialized */
@@ -534,14 +545,14 @@ static hbool_t H5T_top_package_initialize_s = FALSE;
/*-------------------------------------------------------------------------
- * Function: H5T_init
+ * Function: H5T_init
*
- * Purpose: Initialize the interface from some other package.
+ * Purpose: Initialize the interface from some other package.
*
- * Return: Success: non-negative
- * Failure: negative
+ * Return: Success: non-negative
+ * Failure: negative
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Wednesday, December 16, 1998
*
*-------------------------------------------------------------------------
@@ -560,15 +571,15 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T__init_inf
+ * Function: H5T__init_inf
*
- * Purpose: Initialize the +/- Infinity floating-poing values for type
+ * Purpose: Initialize the +/- Infinity floating-poing values for type
* conversion.
*
- * Return: Success: non-negative
- * Failure: negative
+ * Return: Success: non-negative
+ * Failure: negative
*
- * Programmer: Quincey Koziol
+ * Programmer: Quincey Koziol
* Saturday, November 22, 2003
*
*-------------------------------------------------------------------------
@@ -576,12 +587,12 @@ done:
static herr_t
H5T__init_inf(void)
{
- H5T_t *dst_p; /* Datatype type operate on */
- H5T_atomic_t *dst; /* Datatype's atomic info */
- uint8_t *d; /* Pointer to value to set */
- size_t half_size; /* Half the type size */
- size_t u; /* Local index value */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5T_t *dst_p; /* Datatype type operate on */
+ H5T_atomic_t *dst; /* Datatype's atomic info */
+ uint8_t *d; /* Pointer to value to set */
+ size_t half_size; /* Half the type size */
+ size_t u; /* Local index value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_STATIC
@@ -709,26 +720,26 @@ H5T__init_package(void)
H5T_t *std_u32be=NULL; /* Datatype structure for unsigned 32-bit big-endian integer */
H5T_t *std_u64le=NULL; /* Datatype structure for unsigned 64-bit little-endian integer */
H5T_t *std_u64be=NULL; /* Datatype structure for unsigned 64-bit big-endian integer */
- H5T_t *dt = NULL;
- H5T_t *fixedpt=NULL; /* Datatype structure for native int */
- H5T_t *floatpt=NULL; /* Datatype structure for native float */
- H5T_t *string=NULL; /* Datatype structure for C string */
- H5T_t *bitfield=NULL; /* Datatype structure for bitfield */
- H5T_t *compound=NULL; /* Datatype structure for compound objects */
- H5T_t *enum_type=NULL; /* Datatype structure for enum objects */
- H5T_t *vlen=NULL; /* Datatype structure for vlen objects */
- H5T_t *array=NULL; /* Datatype structure for array objects */
- H5T_t *objref=NULL; /* Datatype structure for object reference objects */
+ H5T_t *dt = NULL;
+ H5T_t *fixedpt=NULL; /* Datatype structure for native int */
+ H5T_t *floatpt=NULL; /* Datatype structure for native float */
+ H5T_t *string=NULL; /* Datatype structure for C string */
+ H5T_t *bitfield=NULL; /* Datatype structure for bitfield */
+ H5T_t *compound=NULL; /* Datatype structure for compound objects */
+ H5T_t *enum_type=NULL; /* Datatype structure for enum objects */
+ H5T_t *vlen=NULL; /* Datatype structure for vlen objects */
+ H5T_t *array=NULL; /* Datatype structure for array objects */
+ H5T_t *objref=NULL; /* Datatype structure for object reference objects */
hsize_t dim[1]={1}; /* Dimension info for array datatype */
- herr_t status;
+ herr_t status;
unsigned copied_dtype=1; /* Flag to indicate whether datatype was copied or allocated (for error cleanup) */
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
/* Initialize the atom group for the file IDs */
if(H5I_register_type(H5I_DATATYPE_CLS) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to initialize interface")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to initialize interface")
/* Make certain there aren't too many classes of datatypes defined */
/* Only 16 (numbered 0-15) are supported in the current file format */
@@ -739,7 +750,7 @@ H5T__init_package(void)
* the library configuration by H5detect.
*/
if(H5T__init_native() < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to initialize interface")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to initialize interface")
/* Get the atomic datatype structures needed by the initialization code below */
if(NULL == (native_schar = (H5T_t *)H5I_object(H5T_NATIVE_SCHAR_g)))
@@ -898,29 +909,29 @@ H5T__init_package(void)
*/
/* little-endian (order is irrelevant) 8-bit bitfield */
- H5T_INIT_TYPE(BITFIELD, H5T_STD_B8LE_g, COPY, std_u8le, NOSET, -)
+ H5T_INIT_TYPE(BITFIELDLE, H5T_STD_B8LE_g, COPY, std_u8le, NOSET, -)
bitfield=dt; /* Keep type for later */
/* big-endian (order is irrelevant) 8-bit bitfield */
- H5T_INIT_TYPE(BITFIELD, H5T_STD_B8BE_g, COPY, std_u8be, NOSET, -)
+ H5T_INIT_TYPE(BITFIELDBE, H5T_STD_B8BE_g, COPY, std_u8be, NOSET, -)
/* Little-endian 16-bit bitfield */
- H5T_INIT_TYPE(BITFIELD, H5T_STD_B16LE_g, COPY, std_u16le, NOSET, -)
+ H5T_INIT_TYPE(BITFIELDLE, H5T_STD_B16LE_g, COPY, std_u16le, NOSET, -)
/* Big-endian 16-bit bitfield */
- H5T_INIT_TYPE(BITFIELD, H5T_STD_B16BE_g, COPY, std_u16be, NOSET, -)
+ H5T_INIT_TYPE(BITFIELDBE, H5T_STD_B16BE_g, COPY, std_u16be, NOSET, -)
/* Little-endian 32-bit bitfield */
- H5T_INIT_TYPE(BITFIELD, H5T_STD_B32LE_g, COPY, std_u32le, NOSET, -)
+ H5T_INIT_TYPE(BITFIELDLE, H5T_STD_B32LE_g, COPY, std_u32le, NOSET, -)
/* Big-endian 32-bit bitfield */
- H5T_INIT_TYPE(BITFIELD, H5T_STD_B32BE_g, COPY, std_u32be, NOSET, -)
+ H5T_INIT_TYPE(BITFIELDBE, H5T_STD_B32BE_g, COPY, std_u32be, NOSET, -)
/* Little-endian 64-bit bitfield */
- H5T_INIT_TYPE(BITFIELD, H5T_STD_B64LE_g, COPY, std_u64le, NOSET, -)
+ H5T_INIT_TYPE(BITFIELDLE, H5T_STD_B64LE_g, COPY, std_u64le, NOSET, -)
/* Big-endian 64-bit bitfield */
- H5T_INIT_TYPE(BITFIELD, H5T_STD_B64BE_g, COPY, std_u64be, NOSET, -)
+ H5T_INIT_TYPE(BITFIELDBE, H5T_STD_B64BE_g, COPY, std_u64be, NOSET, -)
/*------------------------------------------------------------
* The Unix architecture for dates and times.
@@ -984,13 +995,13 @@ H5T__init_package(void)
fixedpt = native_int;
floatpt = native_float;
if (NULL == (compound = H5T__create(H5T_COMPOUND, (size_t)1)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if (NULL == (enum_type = H5T__create(H5T_ENUM, (size_t)1)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if (NULL == (vlen = H5T__vlen_create(native_int)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if (NULL == (array = H5T__array_create(native_int, 1, dim)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
status = 0;
status |= H5T_register(H5T_PERS_SOFT, "i_i", fixedpt, fixedpt, H5T__conv_i_i, H5AC_noio_dxpl_id, FALSE);
@@ -1249,7 +1260,7 @@ H5T__init_package(void)
status |= H5T__init_inf();
if(status < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to register conversion function(s)")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to register conversion function(s)")
/* Register datatype creation property class properties here. See similar
* code in H5D__init_package(), etc. for example.
@@ -1297,15 +1308,15 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T__unlock_cb
+ * Function: H5T__unlock_cb
*
- * Purpose: Clear the immutable flag for a datatype. This function is
- * called when the library is closing in order to unlock all
- * registered datatypes and thus make them free-able.
+ * Purpose: Clear the immutable flag for a datatype. This function is
+ * called when the library is closing in order to unlock all
+ * registered datatypes and thus make them free-able.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Monday, April 27, 1998
*
*-------------------------------------------------------------------------
@@ -1313,16 +1324,15 @@ done:
static int
H5T__unlock_cb(void *_dt, hid_t H5_ATTR_UNUSED id, void *_udata)
{
- H5T_t *dt = (H5T_t *)_dt;
- int *n = (int *)_udata;
+ H5T_t *dt = (H5T_t *)_dt;
+ int *n = (int *)_udata;
FUNC_ENTER_STATIC_NOERR
HDassert(dt && dt->shared);
if(H5T_STATE_IMMUTABLE==dt->shared->state) {
- dt->shared->state = H5T_STATE_RDONLY;
-
+ dt->shared->state = H5T_STATE_RDONLY;
(*n)++;
} /* end if */
@@ -1331,15 +1341,15 @@ H5T__unlock_cb(void *_dt, hid_t H5_ATTR_UNUSED id, void *_udata)
/*-------------------------------------------------------------------------
- * Function: H5T_top_term_package
+ * Function: H5T_top_term_package
*
- * Purpose: Close the "top" of the interface, releasing IDs, etc.
+ * Purpose: Close the "top" of the interface, releasing IDs, etc.
*
- * Return: Success: Positive if any action might have caused a
- * change in some other interface; zero otherwise.
- * Failure: Negative
+ * Return: Success: Positive if any action might have caused a
+ * change in some other interface; zero otherwise.
+ * Failure: Negative
*
- * Programmer: Quincey Koziol
+ * Programmer: Quincey Koziol
* Thursday, September 10, 2015
*
*-------------------------------------------------------------------------
@@ -1347,13 +1357,13 @@ H5T__unlock_cb(void *_dt, hid_t H5_ATTR_UNUSED id, void *_udata)
int
H5T_top_term_package(void)
{
- int n = 0;
+ int n = 0;
FUNC_ENTER_NOAPI_NOINIT_NOERR
if(H5T_top_package_initialize_s) {
- /* Unregister all conversion functions */
- if(H5T_g.path) {
+ /* Unregister all conversion functions */
+ if(H5T_g.path) {
int i, nprint = 0;
for(i = 0; i < H5T_g.npaths; i++) {
@@ -1369,9 +1379,9 @@ H5T_top_term_package(void)
#ifdef H5T_DEBUG
if (H5DEBUG(T)) {
fprintf(H5DEBUG(T), "H5T: conversion function "
- "0x%08lx failed to free private data for "
- "%s (ignored)\n",
- (unsigned long)(path->func), path->name);
+ "0x%08lx failed to free private data for "
+ "%s (ignored)\n",
+ (unsigned long)(path->func), path->name);
} /* end if */
#endif
H5E_clear_stack(NULL); /*ignore the error*/
@@ -1397,118 +1407,118 @@ H5T_top_term_package(void)
n++;
} /* end if */
- /* Unlock all datatypes, then free them */
- /* note that we are ignoring the return value from H5I_iterate() */
+ /* Unlock all datatypes, then free them */
+ /* note that we are ignoring the return value from H5I_iterate() */
/* Also note that we are incrementing 'n' in the callback */
- H5I_iterate(H5I_DATATYPE, H5T__unlock_cb, &n, FALSE);
+ H5I_iterate(H5I_DATATYPE, H5T__unlock_cb, &n, FALSE);
/* Release all datatype IDs */
- if(H5I_nmembers(H5I_DATATYPE) > 0) {
- (void)H5I_clear_type(H5I_DATATYPE, FALSE, FALSE);
+ if(H5I_nmembers(H5I_DATATYPE) > 0) {
+ (void)H5I_clear_type(H5I_DATATYPE, FALSE, FALSE);
n++; /*H5I*/
- } /* end if */
+ } /* end if */
/* Reset all the datatype IDs */
if(H5T_IEEE_F32BE_g > 0) {
- H5T_IEEE_F32BE_g = FAIL;
- H5T_IEEE_F32LE_g = FAIL;
- H5T_IEEE_F64BE_g = FAIL;
- H5T_IEEE_F64LE_g = FAIL;
-
- H5T_STD_I8BE_g = FAIL;
- H5T_STD_I8LE_g = FAIL;
- H5T_STD_I16BE_g = FAIL;
- H5T_STD_I16LE_g = FAIL;
- H5T_STD_I32BE_g = FAIL;
- H5T_STD_I32LE_g = FAIL;
- H5T_STD_I64BE_g = FAIL;
- H5T_STD_I64LE_g = FAIL;
- H5T_STD_U8BE_g = FAIL;
- H5T_STD_U8LE_g = FAIL;
- H5T_STD_U16BE_g = FAIL;
- H5T_STD_U16LE_g = FAIL;
- H5T_STD_U32BE_g = FAIL;
- H5T_STD_U32LE_g = FAIL;
- H5T_STD_U64BE_g = FAIL;
- H5T_STD_U64LE_g = FAIL;
- H5T_STD_B8BE_g = FAIL;
- H5T_STD_B8LE_g = FAIL;
- H5T_STD_B16BE_g = FAIL;
- H5T_STD_B16LE_g = FAIL;
- H5T_STD_B32BE_g = FAIL;
- H5T_STD_B32LE_g = FAIL;
- H5T_STD_B64BE_g = FAIL;
- H5T_STD_B64LE_g = FAIL;
- H5T_STD_REF_OBJ_g = FAIL;
- H5T_STD_REF_DSETREG_g = FAIL;
-
- H5T_UNIX_D32BE_g = FAIL;
- H5T_UNIX_D32LE_g = FAIL;
- H5T_UNIX_D64BE_g = FAIL;
- H5T_UNIX_D64LE_g = FAIL;
-
- H5T_C_S1_g = FAIL;
-
- H5T_FORTRAN_S1_g = FAIL;
-
- H5T_NATIVE_SCHAR_g = FAIL;
- H5T_NATIVE_UCHAR_g = FAIL;
- H5T_NATIVE_SHORT_g = FAIL;
- H5T_NATIVE_USHORT_g = FAIL;
- H5T_NATIVE_INT_g = FAIL;
- H5T_NATIVE_UINT_g = FAIL;
- H5T_NATIVE_LONG_g = FAIL;
- H5T_NATIVE_ULONG_g = FAIL;
- H5T_NATIVE_LLONG_g = FAIL;
- H5T_NATIVE_ULLONG_g = FAIL;
- H5T_NATIVE_FLOAT_g = FAIL;
- H5T_NATIVE_DOUBLE_g = FAIL;
+ H5T_IEEE_F32BE_g = FAIL;
+ H5T_IEEE_F32LE_g = FAIL;
+ H5T_IEEE_F64BE_g = FAIL;
+ H5T_IEEE_F64LE_g = FAIL;
+
+ H5T_STD_I8BE_g = FAIL;
+ H5T_STD_I8LE_g = FAIL;
+ H5T_STD_I16BE_g = FAIL;
+ H5T_STD_I16LE_g = FAIL;
+ H5T_STD_I32BE_g = FAIL;
+ H5T_STD_I32LE_g = FAIL;
+ H5T_STD_I64BE_g = FAIL;
+ H5T_STD_I64LE_g = FAIL;
+ H5T_STD_U8BE_g = FAIL;
+ H5T_STD_U8LE_g = FAIL;
+ H5T_STD_U16BE_g = FAIL;
+ H5T_STD_U16LE_g = FAIL;
+ H5T_STD_U32BE_g = FAIL;
+ H5T_STD_U32LE_g = FAIL;
+ H5T_STD_U64BE_g = FAIL;
+ H5T_STD_U64LE_g = FAIL;
+ H5T_STD_B8BE_g = FAIL;
+ H5T_STD_B8LE_g = FAIL;
+ H5T_STD_B16BE_g = FAIL;
+ H5T_STD_B16LE_g = FAIL;
+ H5T_STD_B32BE_g = FAIL;
+ H5T_STD_B32LE_g = FAIL;
+ H5T_STD_B64BE_g = FAIL;
+ H5T_STD_B64LE_g = FAIL;
+ H5T_STD_REF_OBJ_g = FAIL;
+ H5T_STD_REF_DSETREG_g = FAIL;
+
+ H5T_UNIX_D32BE_g = FAIL;
+ H5T_UNIX_D32LE_g = FAIL;
+ H5T_UNIX_D64BE_g = FAIL;
+ H5T_UNIX_D64LE_g = FAIL;
+
+ H5T_C_S1_g = FAIL;
+
+ H5T_FORTRAN_S1_g = FAIL;
+
+ H5T_NATIVE_SCHAR_g = FAIL;
+ H5T_NATIVE_UCHAR_g = FAIL;
+ H5T_NATIVE_SHORT_g = FAIL;
+ H5T_NATIVE_USHORT_g = FAIL;
+ H5T_NATIVE_INT_g = FAIL;
+ H5T_NATIVE_UINT_g = FAIL;
+ H5T_NATIVE_LONG_g = FAIL;
+ H5T_NATIVE_ULONG_g = FAIL;
+ H5T_NATIVE_LLONG_g = FAIL;
+ H5T_NATIVE_ULLONG_g = FAIL;
+ H5T_NATIVE_FLOAT_g = FAIL;
+ H5T_NATIVE_DOUBLE_g = FAIL;
#if H5_SIZEOF_LONG_DOUBLE !=0
- H5T_NATIVE_LDOUBLE_g = FAIL;
+ H5T_NATIVE_LDOUBLE_g = FAIL;
#endif
- H5T_NATIVE_B8_g = FAIL;
- H5T_NATIVE_B16_g = FAIL;
- H5T_NATIVE_B32_g = FAIL;
- H5T_NATIVE_B64_g = FAIL;
- H5T_NATIVE_OPAQUE_g = FAIL;
- H5T_NATIVE_HADDR_g = FAIL;
- H5T_NATIVE_HSIZE_g = FAIL;
- H5T_NATIVE_HSSIZE_g = FAIL;
- H5T_NATIVE_HERR_g = FAIL;
- H5T_NATIVE_HBOOL_g = FAIL;
-
- H5T_NATIVE_INT8_g = FAIL;
- H5T_NATIVE_UINT8_g = FAIL;
- H5T_NATIVE_INT_LEAST8_g = FAIL;
- H5T_NATIVE_UINT_LEAST8_g = FAIL;
- H5T_NATIVE_INT_FAST8_g = FAIL;
- H5T_NATIVE_UINT_FAST8_g = FAIL;
-
- H5T_NATIVE_INT16_g = FAIL;
- H5T_NATIVE_UINT16_g = FAIL;
- H5T_NATIVE_INT_LEAST16_g = FAIL;
- H5T_NATIVE_UINT_LEAST16_g = FAIL;
- H5T_NATIVE_INT_FAST16_g = FAIL;
- H5T_NATIVE_UINT_FAST16_g = FAIL;
-
- H5T_NATIVE_INT32_g = FAIL;
- H5T_NATIVE_UINT32_g = FAIL;
- H5T_NATIVE_INT_LEAST32_g = FAIL;
- H5T_NATIVE_UINT_LEAST32_g = FAIL;
- H5T_NATIVE_INT_FAST32_g = FAIL;
- H5T_NATIVE_UINT_FAST32_g = FAIL;
-
- H5T_NATIVE_INT64_g = FAIL;
- H5T_NATIVE_UINT64_g = FAIL;
- H5T_NATIVE_INT_LEAST64_g = FAIL;
- H5T_NATIVE_UINT_LEAST64_g = FAIL;
- H5T_NATIVE_INT_FAST64_g = FAIL;
- H5T_NATIVE_UINT_FAST64_g = FAIL;
+ H5T_NATIVE_B8_g = FAIL;
+ H5T_NATIVE_B16_g = FAIL;
+ H5T_NATIVE_B32_g = FAIL;
+ H5T_NATIVE_B64_g = FAIL;
+ H5T_NATIVE_OPAQUE_g = FAIL;
+ H5T_NATIVE_HADDR_g = FAIL;
+ H5T_NATIVE_HSIZE_g = FAIL;
+ H5T_NATIVE_HSSIZE_g = FAIL;
+ H5T_NATIVE_HERR_g = FAIL;
+ H5T_NATIVE_HBOOL_g = FAIL;
+
+ H5T_NATIVE_INT8_g = FAIL;
+ H5T_NATIVE_UINT8_g = FAIL;
+ H5T_NATIVE_INT_LEAST8_g = FAIL;
+ H5T_NATIVE_UINT_LEAST8_g = FAIL;
+ H5T_NATIVE_INT_FAST8_g = FAIL;
+ H5T_NATIVE_UINT_FAST8_g = FAIL;
+
+ H5T_NATIVE_INT16_g = FAIL;
+ H5T_NATIVE_UINT16_g = FAIL;
+ H5T_NATIVE_INT_LEAST16_g = FAIL;
+ H5T_NATIVE_UINT_LEAST16_g = FAIL;
+ H5T_NATIVE_INT_FAST16_g = FAIL;
+ H5T_NATIVE_UINT_FAST16_g = FAIL;
+
+ H5T_NATIVE_INT32_g = FAIL;
+ H5T_NATIVE_UINT32_g = FAIL;
+ H5T_NATIVE_INT_LEAST32_g = FAIL;
+ H5T_NATIVE_UINT_LEAST32_g = FAIL;
+ H5T_NATIVE_INT_FAST32_g = FAIL;
+ H5T_NATIVE_UINT_FAST32_g = FAIL;
+
+ H5T_NATIVE_INT64_g = FAIL;
+ H5T_NATIVE_UINT64_g = FAIL;
+ H5T_NATIVE_INT_LEAST64_g = FAIL;
+ H5T_NATIVE_UINT_LEAST64_g = FAIL;
+ H5T_NATIVE_INT_FAST64_g = FAIL;
+ H5T_NATIVE_UINT_FAST64_g = FAIL;
n++;
} /* end if */
- /* Mark "top" of interface as closed */
+ /* Mark "top" of interface as closed */
if(0 == n)
H5T_top_package_initialize_s = FALSE;
} /* end if */
@@ -1518,19 +1528,19 @@ H5T_top_term_package(void)
/*-------------------------------------------------------------------------
- * Function: H5T_term_package
+ * Function: H5T_term_package
*
- * Purpose: Close this interface.
+ * Purpose: Close this interface.
*
- * Note: Finishes shutting down the interface, after
- * H5T_top_term_package() is called
+ * Note: Finishes shutting down the interface, after
+ * H5T_top_term_package() is called
*
- * Return: Success: Positive if any action might have caused a
- * change in some other interface; zero
- * otherwise.
- * Failure: Negative
+ * Return: Success: Positive if any action might have caused a
+ * change in some other interface; zero
+ * otherwise.
+ * Failure: Negative
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Friday, November 20, 1998
*
*-------------------------------------------------------------------------
@@ -1538,7 +1548,7 @@ H5T_top_term_package(void)
int
H5T_term_package(void)
{
- int n = 0;
+ int n = 0;
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -1548,9 +1558,9 @@ H5T_term_package(void)
HDassert(FALSE == H5T_top_package_initialize_s);
/* Destroy the datatype object id group */
- n += (H5I_dec_type_ref(H5I_DATATYPE) > 0);
+ n += (H5I_dec_type_ref(H5I_DATATYPE) > 0);
- /* Mark interface as closed */
+ /* Mark interface as closed */
if(0 == n)
H5_PKG_INIT_VAR = FALSE;
} /* end if */
@@ -1560,47 +1570,44 @@ H5T_term_package(void)
/*-------------------------------------------------------------------------
- * Function: H5Tcreate
+ * Function: H5Tcreate
*
- * Purpose: Create a new type and initialize it to reasonable values.
- * The type is a member of type class TYPE and is SIZE bytes.
+ * Purpose: Create a new type and initialize it to reasonable values.
+ * The type is a member of type class TYPE and is SIZE bytes.
*
- * Return: Success: A new type identifier.
+ * Return: Success: A new type identifier.
*
- * Failure: Negative
+ * Failure: Negative
*
* Errors:
- * ARGS BADVALUE Invalid size.
- * DATATYPE CANTINIT Can't create type.
- * DATATYPE CANTREGISTER Can't register datatype atom.
- *
- * Programmer: Robb Matzke
- * Friday, December 5, 1997
- *
- * Modifications:
+ * ARGS BADVALUE Invalid size.
+ * DATATYPE CANTINIT Can't create type.
+ * DATATYPE CANTREGISTER Can't register datatype atom.
*
+ * Programmer: Robb Matzke
+ * Friday, December 5, 1997
*-------------------------------------------------------------------------
*/
hid_t
H5Tcreate(H5T_class_t type, size_t size)
{
- H5T_t *dt = NULL; /* New datatype constructed */
- hid_t ret_value; /* Return value */
+ H5T_t *dt = NULL; /* New datatype constructed */
+ hid_t ret_value; /* Return value */
FUNC_ENTER_API(FAIL)
H5TRACE2("i", "Ttz", type, size);
/* check args. We support string (fixed-size or variable-length) now. */
if(size <= 0 && size != H5T_VARIABLE)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "size must be positive")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "size must be positive")
/* create the type */
if(NULL == (dt = H5T__create(type, size)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to create type")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to create type")
/* Get an ID for the datatype */
if((ret_value = H5I_register(H5I_DATATYPE, dt, TRUE)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register datatype ID")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register datatype ID")
done:
FUNC_LEAVE_API(ret_value)
@@ -1608,35 +1615,35 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tcopy
+ * Function: H5Tcopy
*
- * Purpose: Copies a datatype. The resulting datatype is not locked.
- * The datatype should be closed when no longer needed by
- * calling H5Tclose().
+ * Purpose: Copies a datatype. The resulting datatype is not locked.
+ * The datatype should be closed when no longer needed by
+ * calling H5Tclose().
*
- * Return: Success: The ID of a new datatype.
+ * Return: Success: The ID of a new datatype.
*
- * Failure: Negative
+ * Failure: Negative
*
- * Programmer: Robb Matzke
- * Tuesday, December 9, 1997
+ * Programmer: Robb Matzke
+ * Tuesday, December 9, 1997
*
* Modifications:
*
- * Robb Matzke, 4 Jun 1998
- * The returned type is always transient and unlocked. If the TYPE_ID
- * argument is a dataset instead of a datatype then this function
- * returns a transient, modifiable datatype which is a copy of the
- * dataset's datatype.
+ * Robb Matzke, 4 Jun 1998
+ * The returned type is always transient and unlocked. If the TYPE_ID
+ * argument is a dataset instead of a datatype then this function
+ * returns a transient, modifiable datatype which is a copy of the
+ * dataset's datatype.
*
*-------------------------------------------------------------------------
*/
hid_t
H5Tcopy(hid_t type_id)
{
- H5T_t *dt; /* Pointer to the datatype to copy */
- H5T_t *new_dt = NULL;
- hid_t ret_value; /* Return value */
+ H5T_t *dt; /* Pointer to the datatype to copy */
+ H5T_t *new_dt = NULL;
+ hid_t ret_value; /* Return value */
FUNC_ENTER_API(FAIL)
H5TRACE1("i", "i", type_id);
@@ -1650,7 +1657,7 @@ H5Tcopy(hid_t type_id)
case H5I_DATASET:
{
- H5D_t *dset; /* Dataset for datatype */
+ H5D_t *dset; /* Dataset for datatype */
/* The argument is a dataset handle */
if(NULL == (dset = (H5D_t *)H5I_object(type_id)))
@@ -1680,11 +1687,11 @@ H5Tcopy(hid_t type_id)
/* Copy datatype */
if(NULL == (new_dt = H5T_copy(dt, H5T_COPY_TRANSIENT)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to copy");
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to copy");
/* Atomize result */
if((ret_value = H5I_register(H5I_DATATYPE, new_dt, TRUE)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register datatype atom")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register datatype atom")
done:
if(ret_value < 0)
@@ -1696,23 +1703,20 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tclose
- *
- * Purpose: Frees a datatype and all associated memory.
+ * Function: H5Tclose
*
- * Return: Non-negative on success/Negative on failure
+ * Purpose: Frees a datatype and all associated memory.
*
- * Programmer: Robb Matzke
- * Tuesday, December 9, 1997
- *
- * Modifications:
+ * Return: Non-negative on success/Negative on failure
*
+ * Programmer: Robb Matzke
+ * Tuesday, December 9, 1997
*-------------------------------------------------------------------------
*/
herr_t
H5Tclose(hid_t type_id)
{
- H5T_t *dt; /* Pointer to datatype to close */
+ H5T_t *dt; /* Pointer to datatype to close */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1720,13 +1724,13 @@ H5Tclose(hid_t type_id)
/* Check args */
if(NULL == (dt = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if(H5T_STATE_IMMUTABLE == dt->shared->state)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "immutable datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "immutable datatype")
/* When the reference count reaches zero the resources are freed */
if(H5I_dec_app_ref(type_id) < 0)
- HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "problem freeing id")
+ HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "problem freeing id")
done:
FUNC_LEAVE_API(ret_value)
@@ -1734,34 +1738,34 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tequal
+ * Function: H5Tequal
*
- * Purpose: Determines if two datatypes are equal.
+ * Purpose: Determines if two datatypes are equal.
*
- * Return: Success: TRUE if equal, FALSE if unequal
+ * Return: Success: TRUE if equal, FALSE if unequal
*
- * Failure: Negative
+ * Failure: Negative
*
- * Programmer: Robb Matzke
- * Wednesday, December 10, 1997
+ * Programmer: Robb Matzke
+ * Wednesday, December 10, 1997
*
*-------------------------------------------------------------------------
*/
htri_t
H5Tequal(hid_t type1_id, hid_t type2_id)
{
- const H5T_t *dt1; /* Pointer to first datatype */
- const H5T_t *dt2; /* Pointer to second datatype */
- htri_t ret_value; /* Return value */
+ const H5T_t *dt1; /* Pointer to first datatype */
+ const H5T_t *dt2; /* Pointer to second datatype */
+ htri_t ret_value; /* Return value */
FUNC_ENTER_API(FAIL)
H5TRACE2("t", "ii", type1_id, type2_id);
/* check args */
if(NULL == (dt1 = (H5T_t *)H5I_object_verify(type1_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if(NULL == (dt2 = (H5T_t *)H5I_object_verify(type2_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
ret_value = (0 == H5T_cmp(dt1, dt2, FALSE)) ? TRUE : FALSE;
@@ -1771,33 +1775,33 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tlock
+ * Function: H5Tlock
*
- * Purpose: Locks a type, making it read only and non-destructable. This
- * is normally done by the library for predefined datatypes so
- * the application doesn't inadvertently change or delete a
- * predefined type.
+ * Purpose: Locks a type, making it read only and non-destructable. This
+ * is normally done by the library for predefined datatypes so
+ * the application doesn't inadvertently change or delete a
+ * predefined type.
*
- * Once a datatype is locked it can never be unlocked unless
- * the entire library is closed.
+ * Once a datatype is locked it can never be unlocked unless
+ * the entire library is closed.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Robb Matzke
- * Friday, January 9, 1998
+ * Programmer: Robb Matzke
+ * Friday, January 9, 1998
*
* Modifications:
*
- * Robb Matzke, 1 Jun 1998
- * It is illegal to lock a named datatype since we must allow named
- * types to be closed (to release file resources) but locking a type
- * prevents that.
+ * Robb Matzke, 1 Jun 1998
+ * It is illegal to lock a named datatype since we must allow named
+ * types to be closed (to release file resources) but locking a type
+ * prevents that.
*-------------------------------------------------------------------------
*/
herr_t
H5Tlock(hid_t type_id)
{
- H5T_t *dt; /* Datatype to operate on */
+ H5T_t *dt; /* Datatype to operate on */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1805,12 +1809,12 @@ H5Tlock(hid_t type_id)
/* Check args */
if(NULL == (dt = (H5T_t *)H5I_object_verify(type_id,H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if(H5T_STATE_NAMED==dt->shared->state || H5T_STATE_OPEN==dt->shared->state)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "unable to lock named datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "unable to lock named datatype")
if(H5T_lock(dt, TRUE) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to lock transient datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to lock transient datatype")
done:
FUNC_LEAVE_API(ret_value)
@@ -1818,24 +1822,23 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tget_class
+ * Function: H5Tget_class
*
- * Purpose: Returns the datatype class identifier for datatype TYPE_ID.
+ * Purpose: Returns the datatype class identifier for datatype TYPE_ID.
*
- * Return: Success: One of the non-negative datatype class
- * constants.
+ * Return: Success: One of the non-negative datatype class constants.
*
- * Failure: H5T_NO_CLASS (Negative)
+ * Failure: H5T_NO_CLASS (Negative)
*
- * Programmer: Robb Matzke
- * Monday, December 8, 1997
+ * Programmer: Robb Matzke
+ * Monday, December 8, 1997
*
*-------------------------------------------------------------------------
*/
H5T_class_t
H5Tget_class(hid_t type_id)
{
- H5T_t *dt; /* Pointer to datatype */
+ H5T_t *dt; /* Pointer to datatype */
H5T_class_t ret_value; /* Return value */
FUNC_ENTER_API(H5T_NO_CLASS)
@@ -1843,7 +1846,7 @@ H5Tget_class(hid_t type_id)
/* Check args */
if(NULL == (dt = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5T_NO_CLASS, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, H5T_NO_CLASS, "not a datatype")
/* Set return value */
ret_value = H5T_get_class(dt, FALSE);
@@ -1854,17 +1857,16 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_get_class
+ * Function: H5T_get_class
*
- * Purpose: Returns the data type class identifier for a datatype ptr.
+ * Purpose: Returns the data type class identifier for a datatype ptr.
*
- * Return: Success: One of the non-negative data type class
- * constants.
+ * Return: Success: One of the non-negative data type class constants.
*
- * Failure: H5T_NO_CLASS (Negative)
+ * Failure: H5T_NO_CLASS (Negative)
*
- * Programmer: Robb Matzke
- * Monday, December 8, 1997
+ * Programmer: Robb Matzke
+ * Monday, December 8, 1997
*
* Modifications:
* Broke out from H5Tget_class - QAK - 6/4/99
@@ -1883,7 +1885,8 @@ H5T_get_class(const H5T_t *dt, htri_t internal)
/* Externally, a VL string is a string; internally, a VL string is a VL. */
if(internal) {
ret_value=dt->shared->type;
- } else {
+ }
+ else {
if(H5T_IS_VL_STRING(dt->shared))
ret_value=H5T_STRING;
else
@@ -1896,24 +1899,21 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tdetect_class
- *
- * Purpose: Check whether a datatype contains (or is) a certain type of
- * datatype.
+ * Function: H5Tdetect_class
*
- * Return: TRUE (1) or FALSE (0) on success/Negative on failure
+ * Purpose: Check whether a datatype contains (or is) a certain type of
+ * datatype.
*
- * Programmer: Quincey Koziol
- * Wednesday, November 29, 2000
- *
- * Modifications:
+ * Return: TRUE (1) or FALSE (0) on success/Negative on failure
*
+ * Programmer: Quincey Koziol
+ * Wednesday, November 29, 2000
*-------------------------------------------------------------------------
*/
htri_t
H5Tdetect_class(hid_t type, H5T_class_t cls)
{
- H5T_t *dt; /* Datatype to query */
+ H5T_t *dt; /* Datatype to query */
htri_t ret_value; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -1935,15 +1935,15 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_detect_class
+ * Function: H5T_detect_class
*
- * Purpose: Check whether a datatype contains (or is) a certain type of
- * datatype.
+ * Purpose: Check whether a datatype contains (or is) a certain type of
+ * datatype.
*
- * Return: TRUE (1) or FALSE (0) on success/Negative on failure
+ * Return: TRUE (1) or FALSE (0) on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * Wednesday, November 29, 2000
+ * Programmer: Quincey Koziol
+ * Wednesday, November 29, 2000
*
* Modifications:
* Raymond Lu
@@ -1959,7 +1959,7 @@ done:
htri_t
H5T_detect_class(const H5T_t *dt, H5T_class_t cls, hbool_t from_api)
{
- unsigned i;
+ unsigned i;
htri_t ret_value = FALSE; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -2020,21 +2020,21 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tis_variable_str
+ * Function: H5Tis_variable_str
*
- * Purpose: Check whether a datatype is a variable-length string
+ * Purpose: Check whether a datatype is a variable-length string
*
- * Return: TRUE (1) or FALSE (0) on success/Negative on failure
+ * Return: TRUE (1) or FALSE (0) on success/Negative on failure
*
- * Programmer: Raymond Lu
- * November 4, 2002
+ * Programmer: Raymond Lu
+ * November 4, 2002
*
*-------------------------------------------------------------------------
*/
htri_t
H5Tis_variable_str(hid_t dtype_id)
{
- H5T_t *dt; /* Datatype to query */
+ H5T_t *dt; /* Datatype to query */
htri_t ret_value; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -2054,14 +2054,14 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_is_variable_str
+ * Function: H5T_is_variable_str
*
- * Purpose: Check whether a datatype is a variable-length string
+ * Purpose: Check whether a datatype is a variable-length string
*
- * Return: TRUE (1) or FALSE (0) on success/Negative on failure
+ * Return: TRUE (1) or FALSE (0) on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * October 17, 2007
+ * Programmer: Quincey Koziol
+ * October 17, 2007
*
*-------------------------------------------------------------------------
*/
@@ -2075,33 +2075,33 @@ H5T_is_variable_str(const H5T_t *dt)
/*-------------------------------------------------------------------------
- * Function: H5Tget_size
+ * Function: H5Tget_size
*
- * Purpose: Determines the total size of a datatype in bytes.
+ * Purpose: Determines the total size of a datatype in bytes.
*
- * Return: Success: Size of the datatype in bytes. The size of
- * datatype is the size of an instance of that
- * datatype.
+ * Return: Success: Size of the datatype in bytes. The size of
+ * datatype is the size of an instance of that
+ * datatype.
*
- * Failure: 0 (valid datatypes are never zero size)
+ * Failure: 0 (valid datatypes are never zero size)
*
- * Programmer: Robb Matzke
- * Monday, December 8, 1997
+ * Programmer: Robb Matzke
+ * Monday, December 8, 1997
*
*-------------------------------------------------------------------------
*/
size_t
H5Tget_size(hid_t type_id)
{
- H5T_t *dt; /* Datatype to query */
- size_t ret_value; /* Return value */
+ H5T_t *dt; /* Datatype to query */
+ size_t ret_value; /* Return value */
FUNC_ENTER_API(0)
H5TRACE1("z", "i", type_id);
/* Check args */
if(NULL == (dt = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, 0, "not a datatype")
/* size */
ret_value = H5T_GET_SIZE(dt);
@@ -2112,33 +2112,33 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tset_size
+ * Function: H5Tset_size
*
- * Purpose: Sets the total size in bytes for a datatype (this operation
- * is not permitted on reference datatypes). If the size is
- * decreased so that the significant bits of the datatype
- * extend beyond the edge of the new size, then the `offset'
- * property is decreased toward zero. If the `offset' becomes
- * zero and the significant bits of the datatype still hang
- * over the edge of the new size, then the number of significant
- * bits is decreased.
+ * Purpose: Sets the total size in bytes for a datatype (this operation
+ * is not permitted on reference datatypes). If the size is
+ * decreased so that the significant bits of the datatype
+ * extend beyond the edge of the new size, then the `offset'
+ * property is decreased toward zero. If the `offset' becomes
+ * zero and the significant bits of the datatype still hang
+ * over the edge of the new size, then the number of significant
+ * bits is decreased.
*
- * Adjusting the size of an H5T_STRING automatically sets the
- * precision to 8*size.
+ * Adjusting the size of an H5T_STRING automatically sets the
+ * precision to 8*size.
*
- * All datatypes have a positive size.
+ * All datatypes have a positive size.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Robb Matzke
- * Wednesday, January 7, 1998
+ * Programmer: Robb Matzke
+ * Wednesday, January 7, 1998
*
*-------------------------------------------------------------------------
*/
herr_t
H5Tset_size(hid_t type_id, size_t size)
{
- H5T_t *dt; /* Datatype to modify */
+ H5T_t *dt; /* Datatype to modify */
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
@@ -2146,21 +2146,21 @@ H5Tset_size(hid_t type_id, size_t size)
/* Check args */
if(NULL == (dt = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if(H5T_STATE_TRANSIENT!=dt->shared->state)
- HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "datatype is read-only")
+ HGOTO_ERROR(H5E_ARGS, H5E_CANTINIT, FAIL, "datatype is read-only")
if(size <= 0 && size != H5T_VARIABLE)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "size must be positive")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "size must be positive")
if(size == H5T_VARIABLE && !H5T_IS_STRING(dt->shared))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "only strings may be variable length")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "only strings may be variable length")
if(H5T_ENUM == dt->shared->type && dt->shared->u.enumer.nmembs > 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not allowed after members are defined")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not allowed after members are defined")
if(H5T_REFERENCE == dt->shared->type)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for this datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "operation not defined for this datatype")
/* Modify the datatype */
if(H5T_set_size(dt, size) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to set size for datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to set size for datatype")
done:
FUNC_LEAVE_API(ret_value)
@@ -2168,38 +2168,35 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tget_super
+ * Function: H5Tget_super
*
- * Purpose: Returns the type from which TYPE is derived. In the case of
- * an enumeration type the return value is an integer type.
+ * Purpose: Returns the type from which TYPE is derived. In the case of
+ * an enumeration type the return value is an integer type.
*
- * Return: Success: Type ID for base datatype.
+ * Return: Success: Type ID for base datatype.
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Wednesday, December 23, 1998
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
hid_t
H5Tget_super(hid_t type)
{
- H5T_t *dt; /* Datatype to query */
- H5T_t *super = NULL; /* Supertype */
- hid_t ret_value; /* Return value */
+ H5T_t *dt; /* Datatype to query */
+ H5T_t *super = NULL; /* Supertype */
+ hid_t ret_value; /* Return value */
FUNC_ENTER_API(FAIL)
H5TRACE1("i", "i", type);
if(NULL == (dt = (H5T_t *)H5I_object_verify(type,H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if(NULL == (super = H5T_get_super(dt)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "not a datatype")
if((ret_value = H5I_register(H5I_DATATYPE, super, TRUE)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register parent datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register parent datatype")
done:
if(ret_value < 0)
@@ -2211,36 +2208,33 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_get_super
+ * Function: H5T_get_super
*
- * Purpose: Private function for H5Tget_super. Returns the type from
- * which TYPE is derived. In the case of an enumeration type
- * the return value is an integer type.
+ * Purpose: Private function for H5Tget_super. Returns the type from
+ * which TYPE is derived. In the case of an enumeration type
+ * the return value is an integer type.
*
- * Return: Success: Data type for base data type.
+ * Return: Success: Data type for base data type.
*
- * Failure: NULL
+ * Failure: NULL
*
- * Programmer: Raymond Lu
+ * Programmer: Raymond Lu
* October 9, 2002
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
H5T_t *
H5T_get_super(const H5T_t *dt)
{
- H5T_t *ret_value=NULL;
+ H5T_t *ret_value=NULL;
FUNC_ENTER_NOAPI(NULL)
HDassert(dt);
if (!dt->shared->parent)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "not a derived data type");
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "not a derived data type");
if (NULL==(ret_value=H5T_copy(dt->shared->parent, H5T_COPY_ALL)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy parent data type");
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy parent data type");
done:
FUNC_LEAVE_NOAPI(ret_value)
@@ -2248,37 +2242,34 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_register
- *
- * Purpose: Register a hard or soft conversion function for a data type
- * conversion path. The path is specified by the source and
- * destination data types SRC_ID and DST_ID (for soft functions
- * only the class of these types is important). If FUNC is a
- * hard function then it replaces any previous path; if it's a
- * soft function then it replaces all existing paths to which it
- * applies and is used for any new path to which it applies as
- * long as that path doesn't have a hard function.
- *
- * Return: Non-negative on success/Negative on failure
+ * Function: H5T_register
*
- * Programmer: Robb Matzke
- * Friday, January 9, 1998
+ * Purpose: Register a hard or soft conversion function for a data type
+ * conversion path. The path is specified by the source and
+ * destination data types SRC_ID and DST_ID (for soft functions
+ * only the class of these types is important). If FUNC is a
+ * hard function then it replaces any previous path; if it's a
+ * soft function then it replaces all existing paths to which it
+ * applies and is used for any new path to which it applies as
+ * long as that path doesn't have a hard function.
*
- * Modifications:
+ * Return: Non-negative on success/Negative on failure
*
+ * Programmer: Robb Matzke
+ * Friday, January 9, 1998
*-------------------------------------------------------------------------
*/
static herr_t
H5T_register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst,
- H5T_conv_t func, hid_t dxpl_id, hbool_t api_call)
+ H5T_conv_t func, hid_t dxpl_id, hbool_t api_call)
{
- hid_t tmp_sid=-1, tmp_did=-1;/*temporary data type IDs */
- H5T_path_t *old_path=NULL; /*existing conversion path */
- H5T_path_t *new_path=NULL; /*new conversion path */
- H5T_cdata_t cdata; /*temporary conversion data */
- int nprint=0; /*number of paths shut down */
- int i; /*counter */
- herr_t ret_value=SUCCEED; /*return value */
+ hid_t tmp_sid = -1, tmp_did = -1; /*temporary data type IDs */
+ H5T_path_t *old_path = NULL; /*existing conversion path */
+ H5T_path_t *new_path = NULL; /*new conversion path */
+ H5T_cdata_t cdata; /*temporary conversion data */
+ int nprint=0; /*number of paths shut down */
+ int i; /*counter */
+ herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -2306,7 +2297,7 @@ H5T_register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst,
for(i = 0; i < H5T_g.npaths; i++)
if(new_path != H5T_g.path[i])
H5T_g.path[i]->cdata.recalc = TRUE;
- } /* end if */
+ } /* end if */
} /* end if */
else {
/* Add function to end of soft list */
@@ -2377,11 +2368,11 @@ H5T_register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst,
if ((old_path->func)(tmp_sid, tmp_did, &(old_path->cdata),
(size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id)<0) {
#ifdef H5T_DEBUG
- if (H5DEBUG(T)) {
- fprintf (H5DEBUG(T), "H5T: conversion function 0x%08lx "
- "failed to free private data for %s (ignored)\n",
- (unsigned long)(old_path->func), old_path->name);
- }
+ if (H5DEBUG(T)) {
+ fprintf (H5DEBUG(T), "H5T: conversion function 0x%08lx "
+ "failed to free private data for %s (ignored)\n",
+ (unsigned long)(old_path->func), old_path->name);
+ }
#endif
} /* end if */
H5T_close(old_path->src);
@@ -2394,23 +2385,23 @@ H5T_register(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst,
tmp_sid = tmp_did = -1;
/* We don't care about any failures during the freeing process */
- H5E_clear_stack(NULL);
+ H5E_clear_stack(NULL);
} /* end for */
} /* end else */
done:
if(ret_value < 0) {
- if(new_path) {
- if(new_path->src)
- H5T_close(new_path->src);
- if(new_path->dst)
- H5T_close(new_path->dst);
- new_path = H5FL_FREE(H5T_path_t, new_path);
- } /* end if */
- if(tmp_sid >= 0)
- H5I_dec_ref(tmp_sid);
- if(tmp_did >= 0)
- H5I_dec_ref(tmp_did);
+ if(new_path) {
+ if(new_path->src)
+ H5T_close(new_path->src);
+ if(new_path->dst)
+ H5T_close(new_path->dst);
+ new_path = H5FL_FREE(H5T_path_t, new_path);
+ } /* end if */
+ if(tmp_sid >= 0)
+ H5I_dec_ref(tmp_sid);
+ if(tmp_did >= 0)
+ H5I_dec_ref(tmp_did);
} /* end if */
FUNC_LEAVE_NOAPI(ret_value)
@@ -2418,50 +2409,50 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tregister
+ * Function: H5Tregister
*
- * Purpose: Register a hard or soft conversion function for a data type
- * conversion path. The path is specified by the source and
- * destination data types SRC_ID and DST_ID (for soft functions
- * only the class of these types is important). If FUNC is a
- * hard function then it replaces any previous path; if it's a
- * soft function then it replaces all existing paths to which it
- * applies and is used for any new path to which it applies as
- * long as that path doesn't have a hard function.
+ * Purpose: Register a hard or soft conversion function for a data type
+ * conversion path. The path is specified by the source and
+ * destination data types SRC_ID and DST_ID (for soft functions
+ * only the class of these types is important). If FUNC is a
+ * hard function then it replaces any previous path; if it's a
+ * soft function then it replaces all existing paths to which it
+ * applies and is used for any new path to which it applies as
+ * long as that path doesn't have a hard function.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Robb Matzke
- * Friday, January 9, 1998
+ * Programmer: Robb Matzke
+ * Friday, January 9, 1998
*
*-------------------------------------------------------------------------
*/
herr_t
H5Tregister(H5T_pers_t pers, const char *name, hid_t src_id, hid_t dst_id,
- H5T_conv_t func)
+ H5T_conv_t func)
{
- H5T_t *src; /*source data type descriptor */
- H5T_t *dst; /*destination data type desc */
- herr_t ret_value = SUCCEED; /*return value */
+ H5T_t *src; /*source data type descriptor */
+ H5T_t *dst; /*destination data type desc */
+ herr_t ret_value = SUCCEED; /*return value */
FUNC_ENTER_API(FAIL)
H5TRACE5("e", "Te*siix", pers, name, src_id, dst_id, func);
/* Check args */
if(H5T_PERS_HARD != pers && H5T_PERS_SOFT != pers)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid function persistence")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "invalid function persistence")
if(!name || !*name)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conversion must have a name for debugging")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "conversion must have a name for debugging")
if(NULL == (src = (H5T_t *)H5I_object_verify(src_id,H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
if(NULL == (dst = (H5T_t *)H5I_object_verify(dst_id,H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
if(!func)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no conversion function specified")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "no conversion function specified")
/* Go register the function */
if(H5T_register(pers, name, src, dst, func, H5AC_noio_dxpl_id, TRUE) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register conversion function")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "can't register conversion function")
done:
FUNC_LEAVE_API(ret_value)
@@ -2469,18 +2460,18 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_unregister
+ * Function: H5T_unregister
*
- * Purpose: Removes conversion paths that match the specified criteria.
- * All arguments are optional. Missing arguments are wild cards.
- * The special no-op path cannot be removed.
+ * Purpose: Removes conversion paths that match the specified criteria.
+ * All arguments are optional. Missing arguments are wild cards.
+ * The special no-op path cannot be removed.
*
- * Return: Succeess: non-negative
+ * Return: Succeess: non-negative
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Robb Matzke
- * Tuesday, January 13, 1998
+ * Programmer: Robb Matzke
+ * Tuesday, January 13, 1998
*
* Modifications:
* Adapted to non-API function - QAK, 11/17/99
@@ -2489,12 +2480,12 @@ done:
*/
static herr_t
H5T_unregister(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst,
- H5T_conv_t func, hid_t dxpl_id)
+ H5T_conv_t func, hid_t dxpl_id)
{
- H5T_path_t *path = NULL; /*conversion path */
- H5T_soft_t *soft = NULL; /*soft conversion information */
- int nprint = 0; /*number of paths shut down */
- int i; /*counter */
+ H5T_path_t *path = NULL; /*conversion path */
+ H5T_soft_t *soft = NULL; /*soft conversion information */
+ int nprint = 0; /*number of paths shut down */
+ int i; /*counter */
FUNC_ENTER_NOAPI_NOINIT_NOERR
@@ -2568,27 +2559,27 @@ H5T_unregister(H5T_pers_t pers, const char *name, H5T_t *src, H5T_t *dst,
/*-------------------------------------------------------------------------
- * Function: H5Tunregister
+ * Function: H5Tunregister
*
- * Purpose: Removes conversion paths that match the specified criteria.
- * All arguments are optional. Missing arguments are wild cards.
- * The special no-op path cannot be removed.
+ * Purpose: Removes conversion paths that match the specified criteria.
+ * All arguments are optional. Missing arguments are wild cards.
+ * The special no-op path cannot be removed.
*
- * Return: Succeess: non-negative
+ * Return: Succeess: non-negative
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Robb Matzke
- * Tuesday, January 13, 1998
+ * Programmer: Robb Matzke
+ * Tuesday, January 13, 1998
*
*-------------------------------------------------------------------------
*/
herr_t
H5Tunregister(H5T_pers_t pers, const char *name, hid_t src_id, hid_t dst_id,
- H5T_conv_t func)
+ H5T_conv_t func)
{
- H5T_t *src = NULL, *dst = NULL; /* Datatype descriptors */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5T_t *src = NULL, *dst = NULL; /* Datatype descriptors */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
H5TRACE5("e", "Te*siix", pers, name, src_id, dst_id, func);
@@ -2608,29 +2599,29 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tfind
+ * Function: H5Tfind
*
- * Purpose: Finds a conversion function that can handle a conversion from
- * type SRC_ID to type DST_ID. The PCDATA argument is a pointer
- * to a pointer to type conversion data which was created and
- * initialized by the type conversion function of this path
- * when the conversion function was installed on the path.
+ * Purpose: Finds a conversion function that can handle a conversion from
+ * type SRC_ID to type DST_ID. The PCDATA argument is a pointer
+ * to a pointer to type conversion data which was created and
+ * initialized by the type conversion function of this path
+ * when the conversion function was installed on the path.
*
- * Return: Success: A pointer to a suitable conversion function.
+ * Return: Success: A pointer to a suitable conversion function.
*
- * Failure: NULL
+ * Failure: NULL
*
- * Programmer: Robb Matzke
- * Tuesday, January 13, 1998
+ * Programmer: Robb Matzke
+ * Tuesday, January 13, 1998
*
*-------------------------------------------------------------------------
*/
H5T_conv_t
H5Tfind(hid_t src_id, hid_t dst_id, H5T_cdata_t **pcdata)
{
- H5T_t *src, *dst;
- H5T_path_t *path;
- H5T_conv_t ret_value; /* Return value */
+ H5T_t *src, *dst;
+ H5T_path_t *path;
+ H5T_conv_t ret_value; /* Return value */
FUNC_ENTER_API(NULL)
H5TRACE3("x", "ii**x", src_id, dst_id, pcdata);
@@ -2638,13 +2629,13 @@ H5Tfind(hid_t src_id, hid_t dst_id, H5T_cdata_t **pcdata)
/* Check args */
if(NULL == (src = (H5T_t *)H5I_object_verify(src_id, H5I_DATATYPE)) ||
NULL == (dst = (H5T_t *)H5I_object_verify(dst_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a data type")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, NULL, "not a data type")
if(!pcdata)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "no address to receive cdata pointer")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, NULL, "no address to receive cdata pointer")
/* Find it */
if(NULL == (path = H5T_path_find(src, dst, NULL, NULL, H5AC_noio_dxpl_id, FALSE)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, NULL, "conversion function not found")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, NULL, "conversion function not found")
if(pcdata)
*pcdata = &(path->cdata);
@@ -2658,27 +2649,27 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tcompiler_conv
+ * Function: H5Tcompiler_conv
*
- * Purpose: Finds out whether the library's conversion function from
- * type src_id to type dst_id is a compiler (hard) conversion.
- * A hard conversion uses compiler's casting; a soft conversion
- * uses the library's own conversion function.
+ * Purpose: Finds out whether the library's conversion function from
+ * type src_id to type dst_id is a compiler (hard) conversion.
+ * A hard conversion uses compiler's casting; a soft conversion
+ * uses the library's own conversion function.
*
- * Return: TRUE: hard conversion.
- * FALSE: soft conversion.
- * FAIL: failed.
+ * Return: TRUE: hard conversion.
+ * FALSE: soft conversion.
+ * FAIL: failed.
*
- * Programmer: Raymond Lu
- * Friday, Sept 2, 2005
+ * Programmer: Raymond Lu
+ * Friday, Sept 2, 2005
*
*-------------------------------------------------------------------------
*/
htri_t
H5Tcompiler_conv(hid_t src_id, hid_t dst_id)
{
- H5T_t *src, *dst;
- htri_t ret_value; /* Return value */
+ H5T_t *src, *dst;
+ htri_t ret_value; /* Return value */
FUNC_ENTER_API(FAIL)
H5TRACE2("t", "ii", src_id, dst_id);
@@ -2686,11 +2677,11 @@ H5Tcompiler_conv(hid_t src_id, hid_t dst_id)
/* Check args */
if(NULL == (src = (H5T_t *)H5I_object_verify(src_id, H5I_DATATYPE)) ||
NULL == (dst = (H5T_t *)H5I_object_verify(dst_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
/* Find it */
if((ret_value = H5T_compiler_conv(src, dst)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, FAIL, "conversion function not found")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, FAIL, "conversion function not found")
done:
FUNC_LEAVE_API(ret_value)
@@ -2698,35 +2689,35 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tconvert
- *
- * Purpose: Convert NELMTS elements from type SRC_ID to type DST_ID. The
- * source elements are packed in BUF and on return the
- * destination will be packed in BUF. That is, the conversion
- * is performed in place. The optional background buffer is an
- * array of NELMTS values of destination type which are merged
- * with the converted values to fill in cracks (for instance,
- * BACKGROUND might be an array of structs with the `a' and `b'
- * fields already initialized and the conversion of BUF supplies
- * the `c' and `d' field values). The PLIST_ID a dataset transfer
- * property list which is passed to the conversion functions. (It's
- * currently only used to pass along the VL datatype custom allocation
- * information -QAK 7/1/99)
- *
- * Return: Non-negative on success/Negative on failure
- *
- * Programmer: Robb Matzke
+ * Function: H5Tconvert
+ *
+ * Purpose: Convert NELMTS elements from type SRC_ID to type DST_ID. The
+ * source elements are packed in BUF and on return the
+ * destination will be packed in BUF. That is, the conversion
+ * is performed in place. The optional background buffer is an
+ * array of NELMTS values of destination type which are merged
+ * with the converted values to fill in cracks (for instance,
+ * BACKGROUND might be an array of structs with the `a' and `b'
+ * fields already initialized and the conversion of BUF supplies
+ * the `c' and `d' field values). The PLIST_ID a dataset transfer
+ * property list which is passed to the conversion functions. (It's
+ * currently only used to pass along the VL datatype custom allocation
+ * information -QAK 7/1/99)
+ *
+ * Return: Non-negative on success/Negative on failure
+ *
+ * Programmer: Robb Matzke
* Wednesday, June 10, 1998
*
*-------------------------------------------------------------------------
*/
herr_t
H5Tconvert(hid_t src_id, hid_t dst_id, size_t nelmts, void *buf,
- void *background, hid_t dxpl_id)
+ void *background, hid_t dxpl_id)
{
- H5T_path_t *tpath; /*type conversion info */
- H5T_t *src, *dst; /*unatomized types */
- herr_t ret_value = SUCCEED; /* Return value */
+ H5T_path_t *tpath; /* type conversion info */
+ H5T_t *src, *dst; /* unatomized types */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_API(FAIL)
H5TRACE6("e", "iiz*x*xi", src_id, dst_id, nelmts, buf, background, dxpl_id);
@@ -2734,7 +2725,7 @@ H5Tconvert(hid_t src_id, hid_t dst_id, size_t nelmts, void *buf,
/* Check args */
if(NULL == (src = (H5T_t *)H5I_object_verify(src_id, H5I_DATATYPE)) ||
NULL == (dst = (H5T_t *)H5I_object_verify(dst_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a data type")
if(H5P_DEFAULT == dxpl_id)
dxpl_id = H5P_DATASET_XFER_DEFAULT;
else
@@ -2743,7 +2734,7 @@ H5Tconvert(hid_t src_id, hid_t dst_id, size_t nelmts, void *buf,
/* Find the conversion function */
if(NULL == (tpath = H5T_path_find(src, dst, NULL, NULL, dxpl_id, FALSE)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to convert between src and dst data types")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to convert between src and dst data types")
if(H5T_convert(tpath, src_id, dst_id, nelmts, (size_t)0, (size_t)0, buf, background, dxpl_id) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "data type conversion failed")
@@ -2754,17 +2745,16 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tencode
+ * Function: H5Tencode
*
- * Purpose: Given an datatype ID, converts the object description into
- * binary in a buffer.
+ * Purpose: Given an datatype ID, converts the object description into
+ * binary in a buffer.
*
- * Return: Success: non-negative
+ * Return: Success: non-negative
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Raymond Lu
- * slu@ncsa.uiuc.edu
+ * Programmer: Raymond Lu
* July 14, 2004
*
*-------------------------------------------------------------------------
@@ -2780,13 +2770,13 @@ H5Tencode(hid_t obj_id, void *buf, size_t *nalloc)
/* Check argument and retrieve object */
if(NULL == (dtype = (H5T_t *)H5I_object_verify(obj_id, H5I_DATATYPE)))
- HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
if(nalloc == NULL)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL pointer for buffer size")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "NULL pointer for buffer size")
/* Go encode the datatype */
if(H5T_encode(dtype, (unsigned char *)buf, nalloc) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTENCODE, FAIL, "can't encode datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTENCODE, FAIL, "can't encode datatype")
done:
FUNC_LEAVE_API(ret_value)
@@ -2794,24 +2784,22 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5Tdecode
+ * Function: H5Tdecode
*
- * Purpose: Decode a binary object description and return a new object
- * handle.
+ * Purpose: Decode a binary object description and return a new object
+ * handle.
*
- * Return: Success: datatype ID(non-negative)
+ * Return: Success: datatype ID(non-negative)
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Raymond Lu
- * slu@ncsa.uiuc.edu
+ * Programmer: Raymond Lu
* July 14, 2004
*
* Modification:Raymond Lu
- * songyulu@hdfgroup.org
* 17 February 2011
* I changed the value for the APP_REF parameter of H5I_register
- * from FALSE to TRUE.
+ * from FALSE to TRUE.
*-------------------------------------------------------------------------
*/
hid_t
@@ -2825,15 +2813,15 @@ H5Tdecode(const void *buf)
/* Check args */
if(buf == NULL)
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty buffer")
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "empty buffer")
/* Create datatype by decoding buffer */
if(NULL == (dt = H5T_decode((const unsigned char *)buf)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "can't decode object")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, FAIL, "can't decode object")
/* Register the type and return the ID */
if((ret_value = H5I_register(H5I_DATATYPE, dt, TRUE)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register data type")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, FAIL, "unable to register data type")
done:
FUNC_LEAVE_API(ret_value)
@@ -2846,17 +2834,16 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_encode
+ * Function: H5T_encode
*
- * Purpose: Private function for H5Tencode. Converts an object
- * description into binary in a buffer.
+ * Purpose: Private function for H5Tencode. Converts an object
+ * description into binary in a buffer.
*
- * Return: Success: non-negative
+ * Return: Success: non-negative
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Raymond Lu
- * slu@ncsa.uiuc.edu
+ * Programmer: Raymond Lu
* July 14, 2004
*
*-------------------------------------------------------------------------
@@ -2872,11 +2859,11 @@ H5T_encode(H5T_t *obj, unsigned char *buf, size_t *nalloc)
/* Allocate "fake" file structure */
if(NULL == (f = H5F_fake_alloc((uint8_t)0)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "can't allocate fake file struct")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, FAIL, "can't allocate fake file struct")
/* Find out the size of buffer needed */
if((buf_size = H5O_msg_raw_size(f, H5O_DTYPE_ID, TRUE, obj)) == 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_BADSIZE, FAIL, "can't find datatype size")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADSIZE, FAIL, "can't find datatype size")
/* Don't encode if buffer size isn't big enough or buffer is empty */
if(!buf || *nalloc < (buf_size + 1 + 1))
@@ -2903,17 +2890,16 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_decode
+ * Function: H5T_decode
*
- * Purpose: Private function for H5Tdecode. Reconstructs a binary
- * description of datatype and returns a new object handle.
+ * Purpose: Private function for H5Tdecode. Reconstructs a binary
+ * description of datatype and returns a new object handle.
*
- * Return: Success: datatype ID(non-negative)
+ * Return: Success: datatype ID(non-negative)
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Raymond Lu
- * slu@ncsa.uiuc.edu
+ * Programmer: Raymond Lu
* July 14, 2004
*
*-------------------------------------------------------------------------
@@ -2928,19 +2914,19 @@ H5T_decode(const unsigned char *buf)
/* Allocate "fake" file structure */
if(NULL == (f = H5F_fake_alloc((uint8_t)0)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, NULL, "can't allocate fake file struct")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTALLOC, NULL, "can't allocate fake file struct")
/* Decode the type of the information */
if(*buf++ != H5O_DTYPE_ID)
- HGOTO_ERROR(H5E_DATATYPE, H5E_BADMESG, NULL, "not an encoded datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADMESG, NULL, "not an encoded datatype")
/* Decode the version of the datatype information */
if(*buf++ != H5T_ENCODE_VERSION)
- HGOTO_ERROR(H5E_DATATYPE, H5E_VERSION, NULL, "unknown version of encoded datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_VERSION, NULL, "unknown version of encoded datatype")
/* Decode the serialized datatype message */
if(NULL == (ret_value = (H5T_t *)H5O_msg_decode(f, H5AC_noio_dxpl_id, NULL, H5O_DTYPE_ID, buf)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, NULL, "can't decode object")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTDECODE, NULL, "can't decode object")
/* Mark datatype as being in memory now */
if(H5T_set_loc(ret_value, NULL, H5T_LOC_MEMORY) < 0)
@@ -2956,18 +2942,18 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T__create
+ * Function: H5T__create
*
- * Purpose: Creates a new data type and initializes it to reasonable
- * values. The new data type is SIZE bytes and an instance of
- * the class TYPE.
+ * Purpose: Creates a new data type and initializes it to reasonable
+ * values. The new data type is SIZE bytes and an instance of
+ * the class TYPE.
*
- * Return: Success: Pointer to the new type.
+ * Return: Success: Pointer to the new type.
*
- * Failure: NULL
+ * Failure: NULL
*
- * Programmer: Robb Matzke
- * Friday, December 5, 1997
+ * Programmer: Robb Matzke
+ * Friday, December 5, 1997
*
* Modifications:
* Raymond Lu
@@ -2991,16 +2977,16 @@ H5T__create(H5T_class_t type, size_t size)
{
H5T_t *origin_dt = NULL;
- if(NULL == (origin_dt = (H5T_t *)H5I_object(H5T_C_S1)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_BADTYPE, NULL, "can't get structure for string type")
+ if(NULL == (origin_dt = (H5T_t *)H5I_object(H5T_C_S1)))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADTYPE, NULL, "can't get structure for string type")
- /* Copy the default string datatype */
- if(NULL == (dt = H5T_copy(origin_dt, H5T_COPY_TRANSIENT)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy");
+ /* Copy the default string datatype */
+ if(NULL == (dt = H5T_copy(origin_dt, H5T_COPY_TRANSIENT)))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy");
- /* Modify the datatype */
- if(H5T_set_size(dt, size) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to set size for string type")
+ /* Modify the datatype */
+ if(H5T_set_size(dt, size) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to set size for string type")
}
break;
@@ -3085,35 +3071,35 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_copy
+ * Function: H5T_copy
*
- * Purpose: Copies datatype OLD_DT. The resulting data type is not
- * locked and is a transient type.
+ * Purpose: Copies datatype OLD_DT. The resulting data type is not
+ * locked and is a transient type.
*
- * Return: Success: Pointer to a new copy of the OLD_DT argument.
+ * Return: Success: Pointer to a new copy of the OLD_DT argument.
*
- * Failure: NULL
+ * Failure: NULL
*
- * Programmer: Robb Matzke
- * Thursday, December 4, 1997
+ * Programmer: Robb Matzke
+ * Thursday, December 4, 1997
*
* Modifications:
*
- * Robb Matzke, 4 Jun 1998
- * Added the METHOD argument. If it's H5T_COPY_TRANSIENT then the
- * result will be an unlocked transient type. Otherwise if it's
- * H5T_COPY_ALL then the result is a named type if the original is a
- * named type, but the result is not opened. Finally, if it's
- * H5T_COPY_REOPEN and the original type is a named type then the result
- * is a named type and the type object header is opened again. The
- * H5T_COPY_REOPEN method is used when returning a named type to the
- * application.
+ * Robb Matzke, 4 Jun 1998
+ * Added the METHOD argument. If it's H5T_COPY_TRANSIENT then the
+ * result will be an unlocked transient type. Otherwise if it's
+ * H5T_COPY_ALL then the result is a named type if the original is a
+ * named type, but the result is not opened. Finally, if it's
+ * H5T_COPY_REOPEN and the original type is a named type then the result
+ * is a named type and the type object header is opened again. The
+ * H5T_COPY_REOPEN method is used when returning a named type to the
+ * application.
*
- * Robb Matzke, 22 Dec 1998
- * Now able to copy enumeration data types.
+ * Robb Matzke, 22 Dec 1998
+ * Now able to copy enumeration data types.
*
* Robb Matzke, 20 May 1999
- * Now able to copy opaque types.
+ * Now able to copy opaque types.
*
* Pedro Vicente, <pvn@ncsa.uiuc.edu> 21 Sep 2002
* Added a deep copy of the symbol table entry
@@ -3123,11 +3109,11 @@ done:
H5T_t *
H5T_copy(H5T_t *old_dt, H5T_copy_t method)
{
- H5T_t *new_dt = NULL, *tmp = NULL;
+ H5T_t *new_dt = NULL, *tmp = NULL;
H5T_shared_t *reopened_fo = NULL;
- unsigned i;
- char *s;
- H5T_t *ret_value = NULL; /* Return value */
+ unsigned i;
+ char *s;
+ H5T_t *ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI(NULL)
@@ -3209,7 +3195,8 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINC, NULL, "can't increment object count")
} /* end else */
new_dt->shared->state = H5T_STATE_OPEN;
- } else if(H5T_STATE_IMMUTABLE == old_dt->shared->state) {
+ }
+ else if(H5T_STATE_IMMUTABLE == old_dt->shared->state) {
new_dt->shared->state = H5T_STATE_RDONLY;
}
break;
@@ -3227,71 +3214,71 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
switch(new_dt->shared->type) {
case H5T_COMPOUND:
{
- ssize_t accum_change = 0; /* Amount of change in the offset of the fields */
-
- /*
- * Copy all member fields to new type, then overwrite the
- * name and type fields of each new member with copied values.
- * That is, H5T_copy() is a deep copy.
- */
- /* Only malloc if space has been allocated for members - NAF */
- if(new_dt->shared->u.compnd.nalloc > 0) {
- new_dt->shared->u.compnd.memb = (H5T_cmemb_t *)H5MM_malloc(new_dt->shared->u.compnd.nalloc *
- sizeof(H5T_cmemb_t));
- if (NULL==new_dt->shared->u.compnd.memb)
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
-
- HDmemcpy(new_dt->shared->u.compnd.memb, old_dt->shared->u.compnd.memb,
- new_dt->shared->u.compnd.nmembs * sizeof(H5T_cmemb_t));
- } /* end if */
-
- for(i = 0; i < new_dt->shared->u.compnd.nmembs; i++) {
- unsigned j;
- int old_match;
-
- s = new_dt->shared->u.compnd.memb[i].name;
- new_dt->shared->u.compnd.memb[i].name = H5MM_xstrdup(s);
- tmp = H5T_copy (old_dt->shared->u.compnd.memb[i].type, method);
- new_dt->shared->u.compnd.memb[i].type = tmp;
- HDassert(tmp != NULL);
-
- /* Range check against compound member's offset */
- if ((accum_change < 0) && ((ssize_t) new_dt->shared->u.compnd.memb[i].offset < accum_change))
- HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, NULL, "invalid field size in datatype");
-
- /* Apply the accumulated size change to the offset of the field */
- new_dt->shared->u.compnd.memb[i].offset += (size_t) accum_change;
+ ssize_t accum_change = 0; /* Amount of change in the offset of the fields */
- if(old_dt->shared->u.compnd.sorted != H5T_SORT_VALUE) {
- for(old_match = -1, j = 0; j < old_dt->shared->u.compnd.nmembs; j++) {
- if(!HDstrcmp(new_dt->shared->u.compnd.memb[i].name, old_dt->shared->u.compnd.memb[j].name)) {
- old_match = (int) j;
- break;
- } /* end if */
- } /* end for */
-
- /* check if we couldn't find a match */
- if(old_match < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "fields in datatype corrupted");
+ /*
+ * Copy all member fields to new type, then overwrite the
+ * name and type fields of each new member with copied values.
+ * That is, H5T_copy() is a deep copy.
+ */
+ /* Only malloc if space has been allocated for members - NAF */
+ if(new_dt->shared->u.compnd.nalloc > 0) {
+ new_dt->shared->u.compnd.memb =
+ (H5T_cmemb_t *)H5MM_malloc(new_dt->shared->u.compnd.nalloc * sizeof(H5T_cmemb_t));
+ if (NULL == new_dt->shared->u.compnd.memb)
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
+
+ HDmemcpy(new_dt->shared->u.compnd.memb, old_dt->shared->u.compnd.memb,
+ new_dt->shared->u.compnd.nmembs * sizeof(H5T_cmemb_t));
} /* end if */
- else
- old_match = (int) i;
- /* If the field changed size, add that change to the accumulated size change */
- if(new_dt->shared->u.compnd.memb[i].type->shared->size != old_dt->shared->u.compnd.memb[old_match].type->shared->size) {
- /* Adjust the size of the member */
- new_dt->shared->u.compnd.memb[i].size = (old_dt->shared->u.compnd.memb[old_match].size*tmp->shared->size)/old_dt->shared->u.compnd.memb[old_match].type->shared->size;
+ for(i = 0; i < new_dt->shared->u.compnd.nmembs; i++) {
+ unsigned j;
+ int old_match;
+
+ s = new_dt->shared->u.compnd.memb[i].name;
+ new_dt->shared->u.compnd.memb[i].name = H5MM_xstrdup(s);
+ tmp = H5T_copy (old_dt->shared->u.compnd.memb[i].type, method);
+ new_dt->shared->u.compnd.memb[i].type = tmp;
+ HDassert(tmp != NULL);
+
+ /* Range check against compound member's offset */
+ if ((accum_change < 0) && ((ssize_t) new_dt->shared->u.compnd.memb[i].offset < accum_change))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, NULL, "invalid field size in datatype");
+
+ /* Apply the accumulated size change to the offset of the field */
+ new_dt->shared->u.compnd.memb[i].offset += (size_t) accum_change;
+
+ if(old_dt->shared->u.compnd.sorted != H5T_SORT_VALUE) {
+ for(old_match = -1, j = 0; j < old_dt->shared->u.compnd.nmembs; j++) {
+ if(!HDstrcmp(new_dt->shared->u.compnd.memb[i].name, old_dt->shared->u.compnd.memb[j].name)) {
+ old_match = (int) j;
+ break;
+ } /* end if */
+ } /* end for */
+
+ /* check if we couldn't find a match */
+ if(old_match < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCOPY, NULL, "fields in datatype corrupted");
+ } /* end if */
+ else
+ old_match = (int) i;
+
+ /* If the field changed size, add that change to the accumulated size change */
+ if(new_dt->shared->u.compnd.memb[i].type->shared->size != old_dt->shared->u.compnd.memb[old_match].type->shared->size) {
+ /* Adjust the size of the member */
+ new_dt->shared->u.compnd.memb[i].size = (old_dt->shared->u.compnd.memb[old_match].size*tmp->shared->size)/old_dt->shared->u.compnd.memb[old_match].type->shared->size;
- accum_change += (ssize_t) (new_dt->shared->u.compnd.memb[i].type->shared->size - old_dt->shared->u.compnd.memb[old_match].type->shared->size);
- } /* end if */
- } /* end for */
+ accum_change += (ssize_t) (new_dt->shared->u.compnd.memb[i].type->shared->size - old_dt->shared->u.compnd.memb[old_match].type->shared->size);
+ } /* end if */
+ } /* end for */
- /* Range check against datatype size */
- if ((accum_change < 0) && ((ssize_t) new_dt->shared->size < accum_change))
- HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, NULL, "invalid field size in datatype");
+ /* Range check against datatype size */
+ if ((accum_change < 0) && ((ssize_t) new_dt->shared->size < accum_change))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, NULL, "invalid field size in datatype");
- /* Apply the accumulated size change to the size of the compound struct */
- new_dt->shared->size += (size_t) accum_change;
+ /* Apply the accumulated size change to the size of the compound struct */
+ new_dt->shared->size += (size_t) accum_change;
}
break;
@@ -3301,14 +3288,14 @@ H5T_copy(H5T_t *old_dt, H5T_copy_t method)
* of each new member with copied values. That is, H5T_copy() is a
* deep copy.
*/
- new_dt->shared->u.enumer.name = (char **)H5MM_malloc(new_dt->shared->u.enumer.nalloc *
- sizeof(char*));
- new_dt->shared->u.enumer.value = (uint8_t *)H5MM_malloc(new_dt->shared->u.enumer.nalloc *
- new_dt->shared->size);
+ new_dt->shared->u.enumer.name =
+ (char **)H5MM_malloc(new_dt->shared->u.enumer.nalloc * sizeof(char*));
+ new_dt->shared->u.enumer.value =
+ (uint8_t *)H5MM_malloc(new_dt->shared->u.enumer.nalloc * new_dt->shared->size);
if(NULL == new_dt->shared->u.enumer.value)
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed");
HDmemcpy(new_dt->shared->u.enumer.value, old_dt->shared->u.enumer.value,
- new_dt->shared->u.enumer.nmembs * new_dt->shared->size);
+ new_dt->shared->u.enumer.nmembs * new_dt->shared->size);
for(i = 0; i < new_dt->shared->u.enumer.nmembs; i++) {
s = old_dt->shared->u.enumer.name[i];
new_dt->shared->u.enumer.name[i] = H5MM_xstrdup(s);
@@ -3392,22 +3379,19 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_lock
+ * Function: H5T_lock
*
- * Purpose: Lock a transient data type making it read-only. If IMMUTABLE
- * is set then the type cannot be closed except when the library
- * itself closes.
+ * Purpose: Lock a transient data type making it read-only. If IMMUTABLE
+ * is set then the type cannot be closed except when the library
+ * itself closes.
*
- * This function is a no-op if the type is not transient or if
- * the type is already read-only or immutable.
+ * This function is a no-op if the type is not transient or if
+ * the type is already read-only or immutable.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Thursday, June 4, 1998
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -3441,14 +3425,14 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T__alloc
+ * Function: H5T__alloc
*
- * Purpose: Allocates a new H5T_t structure, initializing it correctly.
+ * Purpose: Allocates a new H5T_t structure, initializing it correctly.
*
- * Return: Pointer to new H5T_t on success/NULL on failure
+ * Return: Pointer to new H5T_t on success/NULL on failure
*
- * Programmer: Quincey Koziol
- * Monday, August 29, 2005
+ * Programmer: Quincey Koziol
+ * Monday, August 29, 2005
*
*-------------------------------------------------------------------------
*/
@@ -3488,23 +3472,23 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T__free
+ * Function: H5T__free
*
- * Purpose: Frees all memory associated with a datatype, but does not
- * free the H5T_t or H5T_shared_t structures (which should
- * be done in H5T_close).
+ * Purpose: Frees all memory associated with a datatype, but does not
+ * free the H5T_t or H5T_shared_t structures (which should
+ * be done in H5T_close).
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Quincey Koziol
- * Monday, January 6, 2003
+ * Programmer: Quincey Koziol
+ * Monday, January 6, 2003
*
*-------------------------------------------------------------------------
*/
herr_t
H5T__free(H5T_t *dt)
{
- unsigned i;
+ unsigned i;
herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_PACKAGE
@@ -3537,7 +3521,7 @@ H5T__free(H5T_t *dt)
* Don't free locked datatypes.
*/
if(H5T_STATE_IMMUTABLE==dt->shared->state)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CLOSEERROR, FAIL, "unable to close immutable datatype")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CLOSEERROR, FAIL, "unable to close immutable datatype")
/* Close the datatype */
switch(dt->shared->type) {
@@ -3580,7 +3564,7 @@ H5T__free(H5T_t *dt)
/* Close the parent */
HDassert(dt->shared->parent != dt);
if(dt->shared->parent && H5T_close(dt->shared->parent) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "unable to close parent data type")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTCLOSEOBJ, FAIL, "unable to close parent data type")
dt->shared->parent = NULL;
done:
@@ -3589,22 +3573,22 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_close
+ * Function: H5T_close
*
- * Purpose: Frees a data type and all associated memory. If the data
- * type is locked then nothing happens.
+ * Purpose: Frees a data type and all associated memory. If the data
+ * type is locked then nothing happens.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
- * Programmer: Robb Matzke
- * Monday, December 8, 1997
+ * Programmer: Robb Matzke
+ * Monday, December 8, 1997
*
*-------------------------------------------------------------------------
*/
herr_t
H5T_close(H5T_t *dt)
{
- herr_t ret_value = SUCCEED; /* Return value */
+ herr_t ret_value = SUCCEED; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -3614,16 +3598,16 @@ H5T_close(H5T_t *dt)
dt->shared->fo_count--;
if(dt->shared->state != H5T_STATE_OPEN || dt->shared->fo_count == 0) {
- /* Uncork cache entries with object address tag for named datatype only */
- if(dt->shared->state == H5T_STATE_OPEN && dt->shared->fo_count == 0) {
- hbool_t corked; /* Whether the named datatype is corked or not */
-
- if(H5AC_cork(dt->oloc.file, dt->oloc.addr, H5AC__GET_CORKED, &corked) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, FAIL, "unable to retrieve an object's cork status")
- if(corked)
- if(H5AC_cork(dt->oloc.file, dt->oloc.addr, H5AC__UNCORK, NULL) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTUNCORK, FAIL, "unable to uncork an object")
- } /* end if */
+ /* Uncork cache entries with object address tag for named datatype only */
+ if(dt->shared->state == H5T_STATE_OPEN && dt->shared->fo_count == 0) {
+ hbool_t corked; /* Whether the named datatype is corked or not */
+
+ if(H5AC_cork(dt->oloc.file, dt->oloc.addr, H5AC__GET_CORKED, &corked) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTGET, FAIL, "unable to retrieve an object's cork status")
+ if(corked)
+ if(H5AC_cork(dt->oloc.file, dt->oloc.addr, H5AC__UNCORK, NULL) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTUNCORK, FAIL, "unable to uncork an object")
+ } /* end if */
if(H5T__free(dt) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTFREE, FAIL, "unable to free datatype");
@@ -3668,40 +3652,40 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_set_size
+ * Function: H5T_set_size
*
- * Purpose: Sets the total size in bytes for a data type (this operation
- * is not permitted on reference data types). If the size is
- * decreased so that the significant bits of the data type
- * extend beyond the edge of the new size, then the `offset'
- * property is decreased toward zero. If the `offset' becomes
- * zero and the significant bits of the data type still hang
- * over the edge of the new size, then the number of significant
- * bits is decreased.
+ * Purpose: Sets the total size in bytes for a data type (this operation
+ * is not permitted on reference data types). If the size is
+ * decreased so that the significant bits of the data type
+ * extend beyond the edge of the new size, then the `offset'
+ * property is decreased toward zero. If the `offset' becomes
+ * zero and the significant bits of the data type still hang
+ * over the edge of the new size, then the number of significant
+ * bits is decreased.
*
- * Adjusting the size of an H5T_STRING automatically sets the
- * precision to 8*size.
+ * Adjusting the size of an H5T_STRING automatically sets the
+ * precision to 8*size.
*
- * All data types have a positive size.
+ * All data types have a positive size.
*
- * Return: Success: non-negative
+ * Return: Success: non-negative
*
- * Failure: nagative
+ * Failure: nagative
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Tuesday, December 22, 1998
*
* Modifications:
- * Robb Matzke, 22 Dec 1998
- * Also works with derived data types.
+ * Robb Matzke, 22 Dec 1998
+ * Also works with derived data types.
*
*-------------------------------------------------------------------------
*/
static herr_t
H5T_set_size(H5T_t *dt, size_t size)
{
- size_t prec, offset;
- herr_t ret_value=SUCCEED; /* Return value */
+ size_t prec, offset;
+ herr_t ret_value=SUCCEED; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
@@ -3720,7 +3704,8 @@ H5T_set_size(H5T_t *dt, size_t size)
dt->shared->size = dt->shared->parent->shared->size * dt->shared->u.array.nelem;
else if(dt->shared->type!=H5T_VLEN)
dt->shared->size = dt->shared->parent->shared->size;
- } else {
+ }
+ else {
if (H5T_IS_ATOMIC(dt->shared)) {
offset = dt->shared->u.atomic.offset;
prec = dt->shared->u.atomic.prec;
@@ -3733,7 +3718,8 @@ H5T_set_size(H5T_t *dt, size_t size)
offset = 8 * size - prec;
if (prec > 8*size)
prec = 8 * size;
- } else {
+ }
+ else {
prec = offset = 0;
}
@@ -3747,28 +3733,28 @@ H5T_set_size(H5T_t *dt, size_t size)
case H5T_COMPOUND:
/* If decreasing size, check the last member isn't being cut. */
- if(size<dt->shared->size) {
+ if(size < dt->shared->size) {
int num_membs = 0;
- unsigned i, max_index=0;
- size_t memb_offset, max_offset=0;
+ unsigned i, max_index = 0;
+ size_t memb_offset, max_offset = 0;
size_t max_size;
- if((num_membs = H5T_get_nmembers(dt))<0)
+ if((num_membs = H5T_get_nmembers(dt)) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "unable to get number of members");
if(num_membs) {
- for(i=0; i<(unsigned)num_membs; i++) {
- memb_offset = H5T_get_member_offset(dt, i);
- if(memb_offset > max_offset) {
- max_offset = memb_offset;
- max_index = i;
- }
- }
-
- max_size = H5T__get_member_size(dt, max_index);
-
- if(size<(max_offset+max_size))
- HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "size shrinking will cut off last member ");
+ for(i = 0; i < (unsigned)num_membs; i++) {
+ memb_offset = H5T_get_member_offset(dt, i);
+ if(memb_offset > max_offset) {
+ max_offset = memb_offset;
+ max_index = i;
+ }
+ }
+
+ max_size = H5T__get_member_size(dt, max_index);
+
+ if(size < (max_offset + max_size))
+ HGOTO_ERROR(H5E_ARGS, H5E_BADVALUE, FAIL, "size shrinking will cut off last member ");
}
/* Compound must not have been packed previously */
@@ -3781,10 +3767,10 @@ H5T_set_size(H5T_t *dt, size_t size)
case H5T_STRING:
/* Convert string to variable-length datatype */
- if(size==H5T_VARIABLE) {
- H5T_t *base = NULL; /* base data type */
- H5T_cset_t tmp_cset; /* Temp. cset info */
- H5T_str_t tmp_strpad; /* Temp. strpad info */
+ if(size == H5T_VARIABLE) {
+ H5T_t *base = NULL; /* base data type */
+ H5T_cset_t tmp_cset; /* Temp. cset info */
+ H5T_str_t tmp_strpad; /* Temp. strpad info */
/* Get a copy of unsigned char type as the base/parent type */
if(NULL == (base = (H5T_t *)H5I_object(H5T_NATIVE_UCHAR)))
@@ -3796,14 +3782,14 @@ H5T_set_size(H5T_t *dt, size_t size)
/*
* Force conversions (i.e. memory to memory conversions
- * should duplicate data, not point to the same VL strings)
+ * should duplicate data, not point to the same VL strings)
*/
dt->shared->force_conv = TRUE;
- /* Before we mess with the info in the union, extract the
- * values we need */
- tmp_cset=dt->shared->u.atomic.u.s.cset;
- tmp_strpad=dt->shared->u.atomic.u.s.pad;
+ /* Before we mess with the info in the union, extract the
+ * values we need */
+ tmp_cset = dt->shared->u.atomic.u.s.cset;
+ tmp_strpad = dt->shared->u.atomic.u.s.pad;
/* This is a string, not a sequence */
dt->shared->u.vlen.type = H5T_VLEN_STRING;
@@ -3816,7 +3802,8 @@ H5T_set_size(H5T_t *dt, size_t size)
if (H5T_set_loc(dt, NULL, H5T_LOC_MEMORY)<0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "invalid datatype location");
- } else {
+ }
+ else {
prec = 8 * size;
offset = 0;
} /* end else */
@@ -3850,7 +3837,7 @@ H5T_set_size(H5T_t *dt, size_t size)
}
/* Commit (if we didn't convert this type to a VL string) */
- if(dt->shared->type!=H5T_VLEN) {
+ if(dt->shared->type != H5T_VLEN) {
dt->shared->size = size;
if (H5T_IS_ATOMIC(dt->shared)) {
dt->shared->u.atomic.offset = offset;
@@ -3869,21 +3856,18 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_get_size
- *
- * Purpose: Determines the total size of a data type in bytes.
+ * Function: H5T_get_size
*
- * Return: Success: Size of the data type in bytes. The size of
- * the data type is the size of an instance of
- * that data type.
+ * Purpose: Determines the total size of a data type in bytes.
*
- * Failure: 0 (valid data types are never zero size)
+ * Return: Success: Size of the data type in bytes. The size of
+ * the data type is the size of an instance of
+ * that data type.
*
- * Programmer: Robb Matzke
- * Tuesday, December 9, 1997
- *
- * Modifications:
+ * Failure: 0 (valid data types are never zero size)
*
+ * Programmer: Robb Matzke
+ * Tuesday, December 9, 1997
*-------------------------------------------------------------------------
*/
size_t
@@ -3900,30 +3884,30 @@ H5T_get_size(const H5T_t *dt)
/*-------------------------------------------------------------------------
- * Function: H5T_cmp
+ * Function: H5T_cmp
*
- * Purpose: Compares two data types.
+ * Purpose: Compares two data types.
*
- * Return: Success: 0 if DT1 and DT2 are equal.
- * <0 if DT1 is less than DT2.
- * >0 if DT1 is greater than DT2.
+ * Return: Success: 0 if DT1 and DT2 are equal.
+ * <0 if DT1 is less than DT2.
+ * >0 if DT1 is greater than DT2.
*
- * Failure: 0, never fails
+ * Failure: 0, never fails
*
- * Programmer: Robb Matzke
- * Wednesday, December 10, 1997
+ * Programmer: Robb Matzke
+ * Wednesday, December 10, 1997
*
*-------------------------------------------------------------------------
*/
int
H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
{
- unsigned *idx1 = NULL, *idx2 = NULL;
- size_t base_size;
- hbool_t swapped;
- unsigned u;
- int tmp;
- int ret_value = 0;
+ unsigned *idx1 = NULL, *idx2 = NULL;
+ size_t base_size;
+ hbool_t swapped;
+ unsigned u;
+ int tmp;
+ int ret_value = 0;
FUNC_ENTER_NOAPI(0)
@@ -3951,10 +3935,10 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
if(!dt1->shared->parent && dt2->shared->parent)
HGOTO_DONE(1);
if(dt1->shared->parent) {
- tmp = H5T_cmp(dt1->shared->parent, dt2->shared->parent, superset);
- if(tmp < 0)
+ tmp = H5T_cmp(dt1->shared->parent, dt2->shared->parent, superset);
+ if(tmp < 0)
HGOTO_DONE(-1);
- if(tmp > 0)
+ if(tmp > 0)
HGOTO_DONE(1);
} /* end if */
@@ -4005,7 +3989,7 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
#ifdef H5T_DEBUG
/* I don't quite trust the code above yet :-) --RPM */
- for (u=0; u<dt1->shared->u.compnd.nmembs-1; u++) {
+ for(u=0; u<dt1->shared->u.compnd.nmembs-1; u++) {
HDassert(HDstrcmp(dt1->shared->u.compnd.memb[idx1[u]].name,
dt1->shared->u.compnd.memb[idx1[u + 1]].name));
HDassert(HDstrcmp(dt2->shared->u.compnd.memb[idx2[u]].name,
@@ -4014,24 +3998,24 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
#endif
/* Compare the members */
- for (u=0; u<dt1->shared->u.compnd.nmembs; u++) {
+ for(u=0; u<dt1->shared->u.compnd.nmembs; u++) {
tmp = HDstrcmp(dt1->shared->u.compnd.memb[idx1[u]].name,
dt2->shared->u.compnd.memb[idx2[u]].name);
- if (tmp < 0)
+ if(tmp < 0)
HGOTO_DONE(-1);
- if (tmp > 0)
+ if(tmp > 0)
HGOTO_DONE(1);
- if (dt1->shared->u.compnd.memb[idx1[u]].offset < dt2->shared->u.compnd.memb[idx2[u]].offset) HGOTO_DONE(-1);
- if (dt1->shared->u.compnd.memb[idx1[u]].offset > dt2->shared->u.compnd.memb[idx2[u]].offset) HGOTO_DONE(1);
+ if(dt1->shared->u.compnd.memb[idx1[u]].offset < dt2->shared->u.compnd.memb[idx2[u]].offset) HGOTO_DONE(-1);
+ if(dt1->shared->u.compnd.memb[idx1[u]].offset > dt2->shared->u.compnd.memb[idx2[u]].offset) HGOTO_DONE(1);
- if (dt1->shared->u.compnd.memb[idx1[u]].size < dt2->shared->u.compnd.memb[idx2[u]].size) HGOTO_DONE(-1);
- if (dt1->shared->u.compnd.memb[idx1[u]].size > dt2->shared->u.compnd.memb[idx2[u]].size) HGOTO_DONE(1);
+ if(dt1->shared->u.compnd.memb[idx1[u]].size < dt2->shared->u.compnd.memb[idx2[u]].size) HGOTO_DONE(-1);
+ if(dt1->shared->u.compnd.memb[idx1[u]].size > dt2->shared->u.compnd.memb[idx2[u]].size) HGOTO_DONE(1);
tmp = H5T_cmp(dt1->shared->u.compnd.memb[idx1[u]].type,
dt2->shared->u.compnd.memb[idx2[u]].type, superset);
- if (tmp < 0) HGOTO_DONE(-1);
- if (tmp > 0) HGOTO_DONE(1);
+ if(tmp < 0) HGOTO_DONE(-1);
+ if(tmp > 0) HGOTO_DONE(1);
}
break;
@@ -4044,13 +4028,13 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
* more members than dt1
*/
if(superset) {
- if (dt1->shared->u.enumer.nmembs > dt2->shared->u.enumer.nmembs)
+ if(dt1->shared->u.enumer.nmembs > dt2->shared->u.enumer.nmembs)
HGOTO_DONE(1);
} /* end if */
else {
- if (dt1->shared->u.enumer.nmembs < dt2->shared->u.enumer.nmembs)
+ if(dt1->shared->u.enumer.nmembs < dt2->shared->u.enumer.nmembs)
HGOTO_DONE(-1);
- if (dt1->shared->u.enumer.nmembs > dt2->shared->u.enumer.nmembs)
+ if(dt1->shared->u.enumer.nmembs > dt2->shared->u.enumer.nmembs)
HGOTO_DONE(1);
} /* end else */
@@ -4058,15 +4042,15 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
if(NULL == (idx1 = (unsigned *)H5MM_malloc(dt1->shared->u.enumer.nmembs * sizeof(unsigned))) ||
NULL == (idx2 = (unsigned *)H5MM_malloc(dt2->shared->u.enumer.nmembs * sizeof(unsigned))))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, 0, "memory allocation failed");
- for (u=0; u<dt1->shared->u.enumer.nmembs; u++)
+ for(u=0; u<dt1->shared->u.enumer.nmembs; u++)
idx1[u] = u;
if(dt1->shared->u.enumer.nmembs > 1) {
int i;
for (i = (int) dt1->shared->u.enumer.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
int j;
- for (j = 0, swapped = FALSE; j < i; j++)
- if (HDstrcmp(dt1->shared->u.enumer.name[idx1[j]],
+ for(j = 0, swapped = FALSE; j < i; j++)
+ if(HDstrcmp(dt1->shared->u.enumer.name[idx1[j]],
dt1->shared->u.enumer.name[idx1[j+1]]) > 0) {
unsigned tmp_idx = idx1[j];
idx1[j] = idx1[j+1];
@@ -4075,16 +4059,16 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
}
}
}
- for (u=0; u<dt2->shared->u.enumer.nmembs; u++)
+ for(u=0; u<dt2->shared->u.enumer.nmembs; u++)
idx2[u] = u;
if(dt2->shared->u.enumer.nmembs > 1) {
int i;
- for (i = (int) dt2->shared->u.enumer.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
+ for(i = (int) dt2->shared->u.enumer.nmembs - 1, swapped = TRUE; swapped && i >= 0; --i) {
int j;
- for (j = 0, swapped = FALSE; j < i; j++)
- if (HDstrcmp(dt2->shared->u.enumer.name[idx2[j]],
+ for(j = 0, swapped = FALSE; j < i; j++)
+ if(HDstrcmp(dt2->shared->u.enumer.name[idx2[j]],
dt2->shared->u.enumer.name[idx2[j+1]]) > 0) {
unsigned tmp_idx = idx2[j];
idx2[j] = idx2[j+1];
@@ -4096,7 +4080,7 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
#ifdef H5T_DEBUG
/* I don't quite trust the code above yet :-) --RPM */
- for (u=0; u<dt1->shared->u.enumer.nmembs-1; u++) {
+ for(u=0; u<dt1->shared->u.enumer.nmembs-1; u++) {
HDassert(HDstrcmp(dt1->shared->u.enumer.name[idx1[u]],
dt1->shared->u.enumer.name[idx1[u+1]]));
HDassert(HDstrcmp(dt2->shared->u.enumer.name[idx2[u]],
@@ -4106,12 +4090,12 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
/* Compare the members */
base_size = dt1->shared->parent->shared->size;
- for (u=0; u<dt1->shared->u.enumer.nmembs; u++) {
+ for(u=0; u<dt1->shared->u.enumer.nmembs; u++) {
unsigned idx = 0;
if(superset) {
unsigned lt = 0, rt; /* Final, left & right key indices */
- int cmp = 1; /* Key comparison value */
+ int cmp = 1; /* Key comparison value */
/* If a superset is allowed, dt2 may have more members
* than dt1, so binary search for matching member name in
@@ -4119,18 +4103,18 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
*/
rt = dt2->shared->u.enumer.nmembs;
- while (lt < rt && cmp) {
+ while(lt < rt && cmp) {
idx = (lt + rt) / 2;
/* compare */
- if ((cmp = HDstrcmp(dt1->shared->u.enumer.name[idx1[u]],
+ if((cmp = HDstrcmp(dt1->shared->u.enumer.name[idx1[u]],
dt2->shared->u.enumer.name[idx2[idx]] ) ) < 0)
rt = idx;
else
lt = idx+1;
}
/* Leave, if we couldn't find match */
- if (cmp)
+ if(cmp)
HGOTO_DONE(-1);
} /* end if */
else {
@@ -4149,8 +4133,8 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
tmp = HDmemcmp(dt1->shared->u.enumer.value+idx1[u]*base_size,
dt2->shared->u.enumer.value+idx2[idx]*base_size,
base_size);
- if (tmp<0) HGOTO_DONE(-1);
- if (tmp>0) HGOTO_DONE(1);
+ if(tmp<0) HGOTO_DONE(-1);
+ if(tmp>0) HGOTO_DONE(1);
}
break;
@@ -4161,29 +4145,32 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
HDassert(dt2->shared->u.vlen.loc>=H5T_LOC_BADLOC && dt2->shared->u.vlen.loc<H5T_LOC_MAXLOC);
/* Arbitrarily sort sequence VL datatypes before string VL datatypes */
- if (dt1->shared->u.vlen.type==H5T_VLEN_SEQUENCE &&
+ if(dt1->shared->u.vlen.type==H5T_VLEN_SEQUENCE &&
dt2->shared->u.vlen.type==H5T_VLEN_STRING) {
HGOTO_DONE(-1);
- } else if (dt1->shared->u.vlen.type==H5T_VLEN_STRING &&
+ }
+ else if(dt1->shared->u.vlen.type==H5T_VLEN_STRING &&
dt2->shared->u.vlen.type==H5T_VLEN_SEQUENCE) {
HGOTO_DONE(1);
}
/* Arbitrarily sort VL datatypes in memory before disk */
- if (dt1->shared->u.vlen.loc==H5T_LOC_MEMORY &&
+ if(dt1->shared->u.vlen.loc==H5T_LOC_MEMORY &&
dt2->shared->u.vlen.loc==H5T_LOC_DISK) {
HGOTO_DONE(-1);
- } else if (dt1->shared->u.vlen.loc==H5T_LOC_DISK &&
+ }
+ else if(dt1->shared->u.vlen.loc==H5T_LOC_DISK &&
dt2->shared->u.vlen.loc==H5T_LOC_MEMORY) {
HGOTO_DONE(1);
- } else if (dt1->shared->u.vlen.loc==H5T_LOC_BADLOC &&
+ }
+ else if(dt1->shared->u.vlen.loc==H5T_LOC_BADLOC &&
dt2->shared->u.vlen.loc!=H5T_LOC_BADLOC) {
HGOTO_DONE(1);
}
/* Don't allow VL types in different files to compare as equal */
- if (dt1->shared->u.vlen.f < dt2->shared->u.vlen.f)
+ if(dt1->shared->u.vlen.f < dt2->shared->u.vlen.f)
HGOTO_DONE(-1);
- if (dt1->shared->u.vlen.f > dt2->shared->u.vlen.f)
+ if(dt1->shared->u.vlen.f > dt2->shared->u.vlen.f)
HGOTO_DONE(1);
break;
@@ -4193,22 +4180,22 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
break;
case H5T_ARRAY:
- if (dt1->shared->u.array.ndims < dt2->shared->u.array.ndims)
+ if(dt1->shared->u.array.ndims < dt2->shared->u.array.ndims)
HGOTO_DONE(-1);
- if (dt1->shared->u.array.ndims > dt2->shared->u.array.ndims)
+ if(dt1->shared->u.array.ndims > dt2->shared->u.array.ndims)
HGOTO_DONE(1);
- for (u=0; u<dt1->shared->u.array.ndims; u++) {
- if (dt1->shared->u.array.dim[u] < dt2->shared->u.array.dim[u])
+ for(u=0; u<dt1->shared->u.array.ndims; u++) {
+ if(dt1->shared->u.array.dim[u] < dt2->shared->u.array.dim[u])
HGOTO_DONE(-1);
- if (dt1->shared->u.array.dim[u] > dt2->shared->u.array.dim[u])
+ if(dt1->shared->u.array.dim[u] > dt2->shared->u.array.dim[u])
HGOTO_DONE(1);
}
tmp = H5T_cmp(dt1->shared->parent, dt2->shared->parent, superset);
- if (tmp < 0)
+ if(tmp < 0)
HGOTO_DONE(-1);
- if (tmp > 0)
+ if(tmp > 0)
HGOTO_DONE(1);
break;
@@ -4224,62 +4211,62 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
/*
* Atomic datatypes...
*/
- if (dt1->shared->u.atomic.order < dt2->shared->u.atomic.order) HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.order > dt2->shared->u.atomic.order) HGOTO_DONE(1);
+ if(dt1->shared->u.atomic.order < dt2->shared->u.atomic.order) HGOTO_DONE(-1);
+ if(dt1->shared->u.atomic.order > dt2->shared->u.atomic.order) HGOTO_DONE(1);
- if (dt1->shared->u.atomic.prec < dt2->shared->u.atomic.prec) HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.prec > dt2->shared->u.atomic.prec) HGOTO_DONE(1);
+ if(dt1->shared->u.atomic.prec < dt2->shared->u.atomic.prec) HGOTO_DONE(-1);
+ if(dt1->shared->u.atomic.prec > dt2->shared->u.atomic.prec) HGOTO_DONE(1);
- if (dt1->shared->u.atomic.offset < dt2->shared->u.atomic.offset) HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.offset > dt2->shared->u.atomic.offset) HGOTO_DONE(1);
+ if(dt1->shared->u.atomic.offset < dt2->shared->u.atomic.offset) HGOTO_DONE(-1);
+ if(dt1->shared->u.atomic.offset > dt2->shared->u.atomic.offset) HGOTO_DONE(1);
- if (dt1->shared->u.atomic.lsb_pad < dt2->shared->u.atomic.lsb_pad) HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.lsb_pad > dt2->shared->u.atomic.lsb_pad) HGOTO_DONE(1);
+ if(dt1->shared->u.atomic.lsb_pad < dt2->shared->u.atomic.lsb_pad) HGOTO_DONE(-1);
+ if(dt1->shared->u.atomic.lsb_pad > dt2->shared->u.atomic.lsb_pad) HGOTO_DONE(1);
- if (dt1->shared->u.atomic.msb_pad < dt2->shared->u.atomic.msb_pad) HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.msb_pad > dt2->shared->u.atomic.msb_pad) HGOTO_DONE(1);
+ if(dt1->shared->u.atomic.msb_pad < dt2->shared->u.atomic.msb_pad) HGOTO_DONE(-1);
+ if(dt1->shared->u.atomic.msb_pad > dt2->shared->u.atomic.msb_pad) HGOTO_DONE(1);
switch (dt1->shared->type) {
case H5T_INTEGER:
- if (dt1->shared->u.atomic.u.i.sign < dt2->shared->u.atomic.u.i.sign)
+ if(dt1->shared->u.atomic.u.i.sign < dt2->shared->u.atomic.u.i.sign)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.i.sign > dt2->shared->u.atomic.u.i.sign)
+ if(dt1->shared->u.atomic.u.i.sign > dt2->shared->u.atomic.u.i.sign)
HGOTO_DONE(1);
break;
case H5T_FLOAT:
- if (dt1->shared->u.atomic.u.f.sign < dt2->shared->u.atomic.u.f.sign)
+ if(dt1->shared->u.atomic.u.f.sign < dt2->shared->u.atomic.u.f.sign)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.f.sign > dt2->shared->u.atomic.u.f.sign)
+ if(dt1->shared->u.atomic.u.f.sign > dt2->shared->u.atomic.u.f.sign)
HGOTO_DONE(1);
- if (dt1->shared->u.atomic.u.f.epos < dt2->shared->u.atomic.u.f.epos)
+ if(dt1->shared->u.atomic.u.f.epos < dt2->shared->u.atomic.u.f.epos)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.f.epos > dt2->shared->u.atomic.u.f.epos)
+ if(dt1->shared->u.atomic.u.f.epos > dt2->shared->u.atomic.u.f.epos)
HGOTO_DONE(1);
- if (dt1->shared->u.atomic.u.f.esize < dt2->shared->u.atomic.u.f.esize) HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.f.esize > dt2->shared->u.atomic.u.f.esize) HGOTO_DONE(1);
+ if(dt1->shared->u.atomic.u.f.esize < dt2->shared->u.atomic.u.f.esize) HGOTO_DONE(-1);
+ if(dt1->shared->u.atomic.u.f.esize > dt2->shared->u.atomic.u.f.esize) HGOTO_DONE(1);
- if (dt1->shared->u.atomic.u.f.ebias < dt2->shared->u.atomic.u.f.ebias) HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.f.ebias > dt2->shared->u.atomic.u.f.ebias) HGOTO_DONE(1);
+ if(dt1->shared->u.atomic.u.f.ebias < dt2->shared->u.atomic.u.f.ebias) HGOTO_DONE(-1);
+ if(dt1->shared->u.atomic.u.f.ebias > dt2->shared->u.atomic.u.f.ebias) HGOTO_DONE(1);
- if (dt1->shared->u.atomic.u.f.mpos < dt2->shared->u.atomic.u.f.mpos)
+ if(dt1->shared->u.atomic.u.f.mpos < dt2->shared->u.atomic.u.f.mpos)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.f.mpos > dt2->shared->u.atomic.u.f.mpos)
+ if(dt1->shared->u.atomic.u.f.mpos > dt2->shared->u.atomic.u.f.mpos)
HGOTO_DONE(1);
- if (dt1->shared->u.atomic.u.f.msize < dt2->shared->u.atomic.u.f.msize) HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.f.msize > dt2->shared->u.atomic.u.f.msize) HGOTO_DONE(1);
+ if(dt1->shared->u.atomic.u.f.msize < dt2->shared->u.atomic.u.f.msize) HGOTO_DONE(-1);
+ if(dt1->shared->u.atomic.u.f.msize > dt2->shared->u.atomic.u.f.msize) HGOTO_DONE(1);
- if (dt1->shared->u.atomic.u.f.norm < dt2->shared->u.atomic.u.f.norm)
+ if(dt1->shared->u.atomic.u.f.norm < dt2->shared->u.atomic.u.f.norm)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.f.norm > dt2->shared->u.atomic.u.f.norm)
+ if(dt1->shared->u.atomic.u.f.norm > dt2->shared->u.atomic.u.f.norm)
HGOTO_DONE(1);
- if (dt1->shared->u.atomic.u.f.pad < dt2->shared->u.atomic.u.f.pad)
+ if(dt1->shared->u.atomic.u.f.pad < dt2->shared->u.atomic.u.f.pad)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.f.pad > dt2->shared->u.atomic.u.f.pad)
+ if(dt1->shared->u.atomic.u.f.pad > dt2->shared->u.atomic.u.f.pad)
HGOTO_DONE(1);
break;
@@ -4289,14 +4276,14 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
break;
case H5T_STRING:
- if (dt1->shared->u.atomic.u.s.cset < dt2->shared->u.atomic.u.s.cset)
+ if(dt1->shared->u.atomic.u.s.cset < dt2->shared->u.atomic.u.s.cset)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.s.cset > dt2->shared->u.atomic.u.s.cset)
+ if(dt1->shared->u.atomic.u.s.cset > dt2->shared->u.atomic.u.s.cset)
HGOTO_DONE(1);
- if (dt1->shared->u.atomic.u.s.pad < dt2->shared->u.atomic.u.s.pad)
+ if(dt1->shared->u.atomic.u.s.pad < dt2->shared->u.atomic.u.s.pad)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.s.pad > dt2->shared->u.atomic.u.s.pad)
+ if(dt1->shared->u.atomic.u.s.pad > dt2->shared->u.atomic.u.s.pad)
HGOTO_DONE(1);
break;
@@ -4306,16 +4293,16 @@ H5T_cmp(const H5T_t *dt1, const H5T_t *dt2, hbool_t superset)
break;
case H5T_REFERENCE:
- if (dt1->shared->u.atomic.u.r.rtype < dt2->shared->u.atomic.u.r.rtype)
+ if(dt1->shared->u.atomic.u.r.rtype < dt2->shared->u.atomic.u.r.rtype)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.r.rtype > dt2->shared->u.atomic.u.r.rtype)
+ if(dt1->shared->u.atomic.u.r.rtype > dt2->shared->u.atomic.u.r.rtype)
HGOTO_DONE(1);
switch(dt1->shared->u.atomic.u.r.rtype) {
case H5R_OBJECT:
- if (dt1->shared->u.atomic.u.r.loc < dt2->shared->u.atomic.u.r.loc)
+ if(dt1->shared->u.atomic.u.r.loc < dt2->shared->u.atomic.u.r.loc)
HGOTO_DONE(-1);
- if (dt1->shared->u.atomic.u.r.loc > dt2->shared->u.atomic.u.r.loc)
+ if(dt1->shared->u.atomic.u.r.loc > dt2->shared->u.atomic.u.r.loc)
HGOTO_DONE(1);
break;
@@ -4359,29 +4346,29 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_path_find
+ * Function: H5T_path_find
*
- * Purpose: Finds the path which converts type SRC_ID to type DST_ID,
- * creating a new path if necessary. If FUNC is non-zero then
- * it is set as the hard conversion function for that path
- * regardless of whether the path previously existed. Changing
- * the conversion function of a path causes statistics to be
- * reset to zero after printing them. The NAME is used only
- * when creating a new path and is just for debugging.
+ * Purpose: Finds the path which converts type SRC_ID to type DST_ID,
+ * creating a new path if necessary. If FUNC is non-zero then
+ * it is set as the hard conversion function for that path
+ * regardless of whether the path previously existed. Changing
+ * the conversion function of a path causes statistics to be
+ * reset to zero after printing them. The NAME is used only
+ * when creating a new path and is just for debugging.
*
- * If SRC and DST are both null pointers then the special no-op
- * conversion path is used. This path is always stored as the
- * first path in the path table.
+ * If SRC and DST are both null pointers then the special no-op
+ * conversion path is used. This path is always stored as the
+ * first path in the path table.
*
- * Return: Success: Pointer to the path, valid until the path
- * database is modified.
+ * Return: Success: Pointer to the path, valid until the path
+ * database is modified.
*
- * Failure: NULL if the path does not exist and no
- * function can be found to apply to the new
- * path.
+ * Failure: NULL if the path does not exist and no
+ * function can be found to apply to the new
+ * path.
*
- * Programmer: Robb Matzke
- * Tuesday, January 13, 1998
+ * Programmer: Robb Matzke
+ * Tuesday, January 13, 1998
*
* Modifications:
* Added a parameter IS_API to indicate whether to an API
@@ -4399,16 +4386,16 @@ H5T_path_t *
H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
H5T_conv_t func, hid_t dxpl_id, hbool_t is_api)
{
- int lt, rt; /*left and right edges */
- int md; /*middle */
- int cmp; /*comparison result */
- int old_npaths; /* Previous number of paths in table */
- H5T_path_t *table = NULL; /*path existing in the table */
- H5T_path_t *path = NULL; /*new path */
- hid_t src_id = -1, dst_id = -1; /*src and dst type identifiers */
- int i; /*counter */
- int nprint = 0; /*lines of output printed */
- H5T_path_t *ret_value = NULL; /* Return value */
+ int lt, rt; /* left and right edges */
+ int md; /* middle */
+ int cmp; /* comparison result */
+ int old_npaths; /* Previous number of paths in table */
+ H5T_path_t *table = NULL; /* path existing in the table */
+ H5T_path_t *path = NULL; /* new path */
+ hid_t src_id = -1, dst_id = -1; /* src and dst type identifiers */
+ int i; /* counter */
+ int nprint = 0; /* lines of output printed */
+ H5T_path_t *ret_value = NULL; /* Return value */
FUNC_ENTER_NOAPI(NULL)
@@ -4422,23 +4409,23 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
* Make sure the first entry in the table is the no-op conversion path.
*/
if(0 == H5T_g.npaths) {
- if(NULL == (H5T_g.path = (H5T_path_t **)H5MM_malloc(128 * sizeof(H5T_path_t *))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for type conversion path table")
- H5T_g.apaths = 128;
- if(NULL == (H5T_g.path[0] = H5FL_CALLOC(H5T_path_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for no-op conversion path")
+ if(NULL == (H5T_g.path = (H5T_path_t **)H5MM_malloc(128 * sizeof(H5T_path_t *))))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for type conversion path table")
+ H5T_g.apaths = 128;
+ if(NULL == (H5T_g.path[0] = H5FL_CALLOC(H5T_path_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for no-op conversion path")
HDsnprintf(H5T_g.path[0]->name, sizeof(H5T_g.path[0]->name), "no-op");
- H5T_g.path[0]->func = H5T__conv_noop;
- H5T_g.path[0]->cdata.command = H5T_CONV_INIT;
- if(H5T__conv_noop((hid_t)FAIL, (hid_t)FAIL, &(H5T_g.path[0]->cdata), (size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id) < 0) {
+ H5T_g.path[0]->func = H5T__conv_noop;
+ H5T_g.path[0]->cdata.command = H5T_CONV_INIT;
+ if(H5T__conv_noop((hid_t)FAIL, (hid_t)FAIL, &(H5T_g.path[0]->cdata), (size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id) < 0) {
#ifdef H5T_DEBUG
- if(H5DEBUG(T))
- fprintf(H5DEBUG(T), "H5T: unable to initialize no-op conversion function (ignored)\n");
+ if(H5DEBUG(T))
+ fprintf(H5DEBUG(T), "H5T: unable to initialize no-op conversion function (ignored)\n");
#endif
- H5E_clear_stack(NULL); /*ignore the error*/
- } /* end if */
- H5T_g.path[0]->is_noop = TRUE;
- H5T_g.npaths = 1;
+ H5E_clear_stack(NULL); /*ignore the error*/
+ } /* end if */
+ H5T_g.path[0]->is_noop = TRUE;
+ H5T_g.npaths = 1;
} /* end if */
/*
@@ -4451,28 +4438,28 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
* are set
*/
if(src->shared->force_conv == FALSE && dst->shared->force_conv == FALSE && 0 == H5T_cmp(src, dst, TRUE)) {
- table = H5T_g.path[0];
- cmp = 0;
- md = 0;
+ table = H5T_g.path[0];
+ cmp = 0;
+ md = 0;
} /* end if */
else {
- lt = md = 1;
- rt = H5T_g.npaths;
- cmp = -1;
-
- while(cmp && lt < rt) {
- md = (lt + rt) / 2;
- HDassert(H5T_g.path[md]);
- cmp = H5T_cmp(src, H5T_g.path[md]->src, FALSE);
- if(0 == cmp)
+ lt = md = 1;
+ rt = H5T_g.npaths;
+ cmp = -1;
+
+ while(cmp && lt < rt) {
+ md = (lt + rt) / 2;
+ HDassert(H5T_g.path[md]);
+ cmp = H5T_cmp(src, H5T_g.path[md]->src, FALSE);
+ if(0 == cmp)
cmp = H5T_cmp(dst, H5T_g.path[md]->dst, FALSE);
- if(cmp < 0)
- rt = md;
- else if(cmp > 0)
- lt = md + 1;
- else
- table = H5T_g.path[md];
- } /* end while */
+ if(cmp < 0)
+ rt = md;
+ else if(cmp > 0)
+ lt = md + 1;
+ else
+ table = H5T_g.path[md];
+ } /* end while */
} /* end else */
/* Keep a record of the number of paths in the table, in case one of the
@@ -4488,21 +4475,21 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
* create a new path and add the new function to the path.
*/
if(!table || (table && func && is_api) || (table && !table->is_hard && func && !is_api)) {
- if(NULL == (path = H5FL_CALLOC(H5T_path_t)))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for type conversion path")
- if(name && *name) {
- HDstrncpy(path->name, name, (size_t)H5T_NAMELEN);
- path->name[H5T_NAMELEN - 1] = '\0';
+ if(NULL == (path = H5FL_CALLOC(H5T_path_t)))
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed for type conversion path")
+ if(name && *name) {
+ HDstrncpy(path->name, name, (size_t)H5T_NAMELEN);
+ path->name[H5T_NAMELEN - 1] = '\0';
} /* end if */
- else
- HDsnprintf(path->name, sizeof(path->name), "NONAME");
- if(NULL == (path->src = H5T_copy(src, H5T_COPY_ALL)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy datatype for conversion path")
+ else
+ HDsnprintf(path->name, sizeof(path->name), "NONAME");
+ if(NULL == (path->src = H5T_copy(src, H5T_COPY_ALL)))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy datatype for conversion path")
if(NULL == (path->dst = H5T_copy(dst, H5T_COPY_ALL)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy datatype for conversion path")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to copy datatype for conversion path")
} /* end if */
else
- path = table;
+ path = table;
/*
* If a hard conversion function is specified and none is defined for the
@@ -4511,22 +4498,22 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
* and initialize its conversion data.
*/
if(func && (!table || (table && is_api) || (table && !table->is_hard && !is_api))) {
- HDassert(path != table);
- HDassert(NULL == path->func);
- if(path->src && (src_id = H5I_register(H5I_DATATYPE, H5T_copy(path->src, H5T_COPY_ALL), FALSE)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register source conversion type for query")
- if(path->dst && (dst_id = H5I_register(H5I_DATATYPE, H5T_copy(path->dst, H5T_COPY_ALL), FALSE)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register destination conversion type for query")
- path->cdata.command = H5T_CONV_INIT;
- if((func)(src_id, dst_id, &(path->cdata), (size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to initialize conversion function")
- if(src_id >= 0)
+ HDassert(path != table);
+ HDassert(NULL == path->func);
+ if(path->src && (src_id = H5I_register(H5I_DATATYPE, H5T_copy(path->src, H5T_COPY_ALL), FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register source conversion type for query")
+ if(path->dst && (dst_id = H5I_register(H5I_DATATYPE, H5T_copy(path->dst, H5T_COPY_ALL), FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register destination conversion type for query")
+ path->cdata.command = H5T_CONV_INIT;
+ if((func)(src_id, dst_id, &(path->cdata), (size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "unable to initialize conversion function")
+ if(src_id >= 0)
H5I_dec_ref(src_id);
- if(dst_id >= 0)
+ if(dst_id >= 0)
H5I_dec_ref(dst_id);
- src_id = dst_id = -1;
- path->func = func;
- path->is_hard = TRUE;
+ src_id = dst_id = -1;
+ path->func = func;
+ path->is_hard = TRUE;
} /* end if */
/*
@@ -4537,29 +4524,29 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
*/
HDassert(path->func || (src && dst));
for(i = H5T_g.nsoft - 1; i >= 0 && !path->func; --i) {
- if(src->shared->type != H5T_g.soft[i].src || dst->shared->type != H5T_g.soft[i].dst)
- continue;
- if((src_id = H5I_register(H5I_DATATYPE, H5T_copy(path->src, H5T_COPY_ALL), FALSE)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register src conversion type for query")
- if((dst_id = H5I_register(H5I_DATATYPE, H5T_copy(path->dst, H5T_COPY_ALL), FALSE)) < 0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register dst conversion type for query")
- path->cdata.command = H5T_CONV_INIT;
- if((H5T_g.soft[i].func)(src_id, dst_id, &(path->cdata), (size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id) < 0) {
- HDmemset(&(path->cdata), 0, sizeof(H5T_cdata_t));
- H5E_clear_stack(H5E_DEFAULT); /*ignore the error*/
- } /* end if */
+ if(src->shared->type != H5T_g.soft[i].src || dst->shared->type != H5T_g.soft[i].dst)
+ continue;
+ if((src_id = H5I_register(H5I_DATATYPE, H5T_copy(path->src, H5T_COPY_ALL), FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register src conversion type for query")
+ if((dst_id = H5I_register(H5I_DATATYPE, H5T_copy(path->dst, H5T_COPY_ALL), FALSE)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTREGISTER, NULL, "unable to register dst conversion type for query")
+ path->cdata.command = H5T_CONV_INIT;
+ if((H5T_g.soft[i].func)(src_id, dst_id, &(path->cdata), (size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id) < 0) {
+ HDmemset(&(path->cdata), 0, sizeof(H5T_cdata_t));
+ H5E_clear_stack(H5E_DEFAULT); /*ignore the error*/
+ } /* end if */
else {
- HDstrncpy(path->name, H5T_g.soft[i].name, (size_t)H5T_NAMELEN);
- path->name[H5T_NAMELEN - 1] = '\0';
- path->func = H5T_g.soft[i].func;
- path->is_hard = FALSE;
- } /* end else */
- H5I_dec_ref(src_id);
- H5I_dec_ref(dst_id);
- src_id = dst_id = -1;
+ HDstrncpy(path->name, H5T_g.soft[i].name, (size_t)H5T_NAMELEN);
+ path->name[H5T_NAMELEN - 1] = '\0';
+ path->func = H5T_g.soft[i].func;
+ path->is_hard = FALSE;
+ } /* end else */
+ H5I_dec_ref(src_id);
+ H5I_dec_ref(dst_id);
+ src_id = dst_id = -1;
} /* end for */
if(!path->func)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "no appropriate function for conversion path")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, NULL, "no appropriate function for conversion path")
/* Check if paths were inserted into the table through a recursive call
* and re-compute the correct location for this path if so. - QAK, 1/26/02
@@ -4586,34 +4573,35 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
/* Replace an existing table entry or add a new entry */
if(table && path != table) {
- HDassert(table == H5T_g.path[md]);
- H5T__print_stats(table, &nprint/*in,out*/);
- table->cdata.command = H5T_CONV_FREE;
- if((table->func)((hid_t)FAIL, (hid_t)FAIL, &(table->cdata), (size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id) < 0) {
+ HDassert(table == H5T_g.path[md]);
+ H5T__print_stats(table, &nprint/*in,out*/);
+ table->cdata.command = H5T_CONV_FREE;
+ if((table->func)((hid_t)FAIL, (hid_t)FAIL, &(table->cdata), (size_t)0, (size_t)0, (size_t)0, NULL, NULL, dxpl_id) < 0) {
#ifdef H5T_DEBUG
- if(H5DEBUG(T)) {
- fprintf(H5DEBUG(T), "H5T: conversion function 0x%08lx free "
- "failed for %s (ignored)\n",
- (unsigned long)(path->func), path->name);
- } /* end if */
+ if(H5DEBUG(T)) {
+ fprintf(H5DEBUG(T), "H5T: conversion function 0x%08lx free "
+ "failed for %s (ignored)\n",
+ (unsigned long)(path->func), path->name);
+ } /* end if */
#endif
- H5E_clear_stack(NULL); /*ignore the failure*/
- } /* end if */
- if(table->src)
+ H5E_clear_stack(NULL); /*ignore the failure*/
+ } /* end if */
+ if(table->src)
H5T_close(table->src);
- if(table->dst)
+ if(table->dst)
H5T_close(table->dst);
table = H5FL_FREE(H5T_path_t, table);
- table = path;
- H5T_g.path[md] = path;
- } else if(path != table) {
- HDassert(cmp);
+ table = path;
+ H5T_g.path[md] = path;
+ }
+ else if(path != table) {
+ HDassert(cmp);
if((size_t)H5T_g.npaths >= H5T_g.apaths) {
size_t na = MAX(128, 2 * H5T_g.apaths);
H5T_path_t **x;
if(NULL == (x = (H5T_path_t **)H5MM_realloc(H5T_g.path, na * sizeof(H5T_path_t*))))
- HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
+ HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, NULL, "memory allocation failed")
H5T_g.apaths = na;
H5T_g.path = x;
} /* end if */
@@ -4621,8 +4609,8 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
md++;
HDmemmove(H5T_g.path + md + 1, H5T_g.path + md, (size_t) (H5T_g.npaths - md) * sizeof(H5T_path_t*));
H5T_g.npaths++;
- H5T_g.path[md] = path;
- table = path;
+ H5T_g.path[md] = path;
+ table = path;
} /* end else-if */
/* Set the flag to indicate both source and destination types are compound types
@@ -4635,9 +4623,9 @@ H5T_path_find(const H5T_t *src, const H5T_t *dst, const char *name,
done:
if(!ret_value && path && path != table) {
- if(path->src)
+ if(path->src)
H5T_close(path->src);
- if(path->dst)
+ if(path->dst)
H5T_close(path->dst);
path = H5FL_FREE(H5T_path_t, path);
} /* end if */
@@ -4651,20 +4639,17 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_path_noop
- *
- * Purpose: Is the path the special no-op path? The no-op function can be
- * set by the application and there might be more than one no-op
- * path in a multi-threaded application if one thread is using
- * the no-op path when some other thread changes its definition.
+ * Function: H5T_path_noop
*
- * Return: TRUE/FALSE (can't fail)
+ * Purpose: Is the path the special no-op path? The no-op function can be
+ * set by the application and there might be more than one no-op
+ * path in a multi-threaded application if one thread is using
+ * the no-op path when some other thread changes its definition.
*
- * Programmer: Quincey Koziol
- * Thursday, May 8, 2003
- *
- * Modifications:
+ * Return: TRUE/FALSE (can't fail)
*
+ * Programmer: Quincey Koziol
+ * Thursday, May 8, 2003
*-------------------------------------------------------------------------
*/
hbool_t
@@ -4679,12 +4664,12 @@ H5T_path_noop(const H5T_path_t *p)
/*-------------------------------------------------------------------------
- * Function: H5T_path_compound_subset
+ * Function: H5T_path_compound_subset
*
- * Purpose: Checks if the source and destination types are both compound.
- * Tells whether whether the source members are a subset of
- * destination, and the order is the same, and no conversion
- * is needed. For example:
+ * Purpose: Checks if the source and destination types are both compound.
+ * Tells whether whether the source members are a subset of
+ * destination, and the order is the same, and no conversion
+ * is needed. For example:
* struct source { struct destination {
* TYPE1 A; --> TYPE1 A;
* TYPE2 B; --> TYPE2 B;
@@ -4693,11 +4678,11 @@ H5T_path_noop(const H5T_path_t *p)
* TYPE5 E;
* };
*
- * Return: A pointer to the subset info struct in p, or NULL if there are
- * no compounds. Points directly into the H5T_path_t structure.
+ * Return: A pointer to the subset info struct in p, or NULL if there are
+ * no compounds. Points directly into the H5T_path_t structure.
*
- * Programmer: Raymond Lu
- * 8 June 2007
+ * Programmer: Raymond Lu
+ * 8 June 2007
*
* Modifications: Neil Fortner
* 19 September 2008
@@ -4723,17 +4708,14 @@ H5T_path_compound_subset(const H5T_path_t *p)
/*-------------------------------------------------------------------------
- * Function: H5T_path_bkg
- *
- * Purpose: Get the "background" flag for the conversion path.
+ * Function: H5T_path_bkg
*
- * Return: Background flag (can't fail)
+ * Purpose: Get the "background" flag for the conversion path.
*
- * Programmer: Quincey Koziol
- * Thursday, May 8, 2003
- *
- * Modifications:
+ * Return: Background flag (can't fail)
*
+ * Programmer: Quincey Koziol
+ * Thursday, May 8, 2003
*-------------------------------------------------------------------------
*/
H5T_bkg_t
@@ -4748,34 +4730,31 @@ H5T_path_bkg(const H5T_path_t *p)
/*-------------------------------------------------------------------------
- * Function: H5T_compiler_conv
+ * Function: H5T_compiler_conv
*
- * Purpose: Private function for H5Tcompiler_conv. Finds out whether the
- * library's conversion function from type SRC to type DST
- * is a hard conversion.
+ * Purpose: Private function for H5Tcompiler_conv. Finds out whether the
+ * library's conversion function from type SRC to type DST
+ * is a hard conversion.
*
- * Return: TRUE: hard conversion.
- * FALSE: soft conversion.
- * FAIL: function failed.
- *
- * Programmer: Raymond Lu
- * Friday, Sept 2, 2005
- *
- * Modifications:
+ * Return: TRUE: hard conversion.
+ * FALSE: soft conversion.
+ * FAIL: function failed.
*
+ * Programmer: Raymond Lu
+ * Friday, Sept 2, 2005
*-------------------------------------------------------------------------
*/
static htri_t
H5T_compiler_conv(H5T_t *src, H5T_t *dst)
{
- H5T_path_t *path;
- htri_t ret_value = FAIL; /* Return value */
+ H5T_path_t *path;
+ htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_NOAPI_NOINIT
/* Find it */
if (NULL==(path=H5T_path_find(src, dst, NULL, NULL, H5AC_noio_dxpl_id, FALSE)))
- HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, FAIL, "conversion function not found")
+ HGOTO_ERROR(H5E_DATATYPE, H5E_NOTFOUND, FAIL, "conversion function not found")
ret_value = (htri_t)path->is_hard;
@@ -4785,28 +4764,28 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_convert
+ * Function: H5T_convert
*
- * Purpose: Call a conversion function to convert from source to
- * destination data type and accumulate timing statistics.
+ * Purpose: Call a conversion function to convert from source to
+ * destination data type and accumulate timing statistics.
*
- * Return: Success: non-negative
+ * Return: Success: non-negative
*
- * Failure: negative
+ * Failure: negative
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Tuesday, December 15, 1998
*
* Modifications:
- * Robb Matzke, 1999-06-16
- * The timers are updated only if H5T debugging is enabled at
- * runtime in addition to compile time.
+ * Robb Matzke, 1999-06-16
+ * The timers are updated only if H5T debugging is enabled at
+ * runtime in addition to compile time.
*
- * Robb Matzke, 1999-06-16
- * Added support for non-zero strides. If BUF_STRIDE is non-zero
- * then convert one value at each memory location advancing
- * BUF_STRIDE bytes each time; otherwise assume both source and
- * destination values are packed.
+ * Robb Matzke, 1999-06-16
+ * Added support for non-zero strides. If BUF_STRIDE is non-zero
+ * then convert one value at each memory location advancing
+ * BUF_STRIDE bytes each time; otherwise assume both source and
+ * destination values are packed.
*
* Quincey Koziol, 1999-07-01
* Added dataset transfer properties, to allow custom VL
@@ -4827,11 +4806,11 @@ done:
*/
herr_t
H5T_convert(H5T_path_t *tpath, hid_t src_id, hid_t dst_id, size_t nelmts,
- size_t buf_stride, size_t bkg_stride, void *buf, void *bkg,
+ size_t buf_stride, size_t bkg_stride, void *buf, void *bkg,
hid_t dset_xfer_plist)
{
#ifdef H5T_DEBUG
- H5_timer_t timer;
+ H5_timer_t timer;
#endif
herr_t ret_value=SUCCEED; /* Return value */
@@ -4843,12 +4822,12 @@ H5T_convert(H5T_path_t *tpath, hid_t src_id, hid_t dst_id, size_t nelmts,
tpath->cdata.command = H5T_CONV_CONV;
if ((tpath->func)(src_id, dst_id, &(tpath->cdata), nelmts, buf_stride,
bkg_stride, buf, bkg, dset_xfer_plist)<0)
- HGOTO_ERROR(H5E_ATTR, H5E_CANTENCODE, FAIL, "data type conversion failed");
+ HGOTO_ERROR(H5E_ATTR, H5E_CANTENCODE, FAIL, "data type conversion failed");
#ifdef H5T_DEBUG
if (H5DEBUG(T)) {
- H5_timer_end(&(tpath->stats.timer), &timer);
- tpath->stats.ncalls++;
- tpath->stats.nelmts += nelmts;
+ H5_timer_end(&(tpath->stats.timer), &timer);
+ tpath->stats.ncalls++;
+ tpath->stats.nelmts += nelmts;
}
#endif
@@ -4858,14 +4837,14 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_oloc
+ * Function: H5T_oloc
*
- * Purpose: Returns a pointer to the object location for a named datatype.
+ * Purpose: Returns a pointer to the object location for a named datatype.
*
- * Return: Success: Ptr directly into named datatype
- * Failure: NULL
+ * Return: Success: Ptr directly into named datatype
+ * Failure: NULL
*
- * Programmer: Robb Matzke
+ * Programmer: Robb Matzke
* Friday, June 5, 1998
*
*-------------------------------------------------------------------------
@@ -4899,14 +4878,14 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_nameof
+ * Function: H5T_nameof
*
- * Purpose: Returns a pointer to the path for a named datatype.
+ * Purpose: Returns a pointer to the path for a named datatype.
*
- * Return: Success: Ptr directly into named datatype
- * Failure: NULL
+ * Return: Success: Ptr directly into named datatype
+ * Failure: NULL
*
- * Programmer: Quincey Koziol
+ * Programmer: Quincey Koziol
* Monday, September 12, 2005
*
*-------------------------------------------------------------------------
@@ -4949,9 +4928,6 @@ done:
*
* Programmer: Raymond Lu
* Friday, Dec 7, 2001
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
htri_t
@@ -4982,9 +4958,6 @@ done:
*
* Programmer: Pedro Vicente
* Tuesday, Sep 3, 2002
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
htri_t
@@ -5007,9 +4980,9 @@ done:
* Function: H5T_convert_committed_datatype
*
* Purpose: To convert the committed datatype "dt" to a transient embedded
- * type if the file location associated with the committed datatype is
- * different from the parameter "f".
- * "f" is the file location where the dataset or attribute will be created.
+ * type if the file location associated with the committed datatype is
+ * different from the parameter "f".
+ * "f" is the file location where the dataset or attribute will be created.
*
* Notes: See HDFFV-9940
*
@@ -5048,25 +5021,17 @@ done:
/*--------------------------------------------------------------------------
- NAME
- H5T_get_ref_type
- PURPOSE
- Retrieves the type of reference for a datatype
- USAGE
- H5R_type_t H5Tget_ref_type(dt)
- H5T_t *dt; IN: datatype pointer for the reference datatype
-
- RETURNS
- Success: A reference type defined in H5Rpublic.h
- Failure: H5R_BADTYPE
- DESCRIPTION
- Given a reference datatype object, this function returns the reference type
- of the datatype.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
+ * Function: H5T_get_ref_type
+ *
+ * Purpose: Retrieves the type of reference for a datatype
+ * H5T_t *dt; IN: datatype pointer for the reference datatype
+ *
+ * Return: Success: A reference type defined in H5Rpublic.h
+ * Failure: H5R_BADTYPE
+ * Notes: Given a reference datatype object, this function returns the reference type
+ * of the datatype.
+ *--------------------------------------------------------------------------
+ */
H5R_type_t
H5T_get_ref_type(const H5T_t *dt)
{
@@ -5085,26 +5050,23 @@ done:
/*-------------------------------------------------------------------------
- * Function: H5T_is_sensible
- *
- * Purpose: Determines if a data type is sensible to store on disk
- * (i.e. not partially initialized)
+ * Function: H5T_is_sensible
*
- * Return: Success: TRUE, FALSE
+ * Purpose: Determines if a data type is sensible to store on disk
+ * (i.e. not partially initialized)
*
- * Failure: Negative
+ * Return: Success: TRUE, FALSE
*
- * Programmer: Quincey Koziol
- * Tuesday, June 11, 2002
- *
- * Modifications:
+ * Failure: Negative
*
+ * Programmer: Quincey Koziol
+ * Tuesday, June 11, 2002
*-------------------------------------------------------------------------
*/
htri_t
H5T_is_sensible(const H5T_t *dt)
{
- htri_t ret_value = FAIL; /* Return value */
+ htri_t ret_value = FAIL; /* Return value */
FUNC_ENTER_NOAPI(FAIL)
@@ -5168,18 +5130,15 @@ done:
DESCRIPTION
Recursively descends any VL or compound datatypes to mark all VL datatypes
as either on disk or in memory.
- GLOBAL VARIABLES
- COMMENTS, BUGS, ASSUMPTIONS
- EXAMPLES
- REVISION LOG
---------------------------------------------------------------------------*/
+ --------------------------------------------------------------------------
+ */
htri_t
H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
{
- htri_t changed; /* Whether H5T_set_loc changed the type (even if the size didn't change) */
- htri_t ret_value = 0; /* Indicate that success, but no location change */
- unsigned i; /* Local index variable */
- size_t old_size; /* Previous size of a field */
+ htri_t changed; /* Whether H5T_set_loc changed the type (even if the size didn't change) */
+ htri_t ret_value = 0; /* Indicate that success, but no location change */
+ unsigned i; /* Local index variable */
+ size_t old_size; /* Previous size of a field */
FUNC_ENTER_NOAPI(FAIL)
@@ -5213,53 +5172,53 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
case H5T_COMPOUND: /* Check each field and recurse on VL, compound and array type */
{
- ssize_t accum_change = 0; /* Amount of change in the offset of the fields */
+ ssize_t accum_change = 0; /* Amount of change in the offset of the fields */
- /* Sort the fields based on offsets */
- H5T__sort_value(dt, NULL);
+ /* Sort the fields based on offsets */
+ H5T__sort_value(dt, NULL);
- for (i=0; i<dt->shared->u.compnd.nmembs; i++) {
- H5T_t *memb_type; /* Member's datatype pointer */
+ for (i = 0; i < dt->shared->u.compnd.nmembs; i++) {
+ H5T_t *memb_type; /* Member's datatype pointer */
- /* Range check against compound member's offset */
- if ((accum_change < 0) && ((ssize_t) dt->shared->u.compnd.memb[i].offset < accum_change))
- HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid field size in datatype");
+ /* Range check against compound member's offset */
+ if ((accum_change < 0) && ((ssize_t) dt->shared->u.compnd.memb[i].offset < accum_change))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid field size in datatype");
- /* Apply the accumulated size change to the offset of the field */
- dt->shared->u.compnd.memb[i].offset += (size_t) accum_change;
+ /* Apply the accumulated size change to the offset of the field */
+ dt->shared->u.compnd.memb[i].offset += (size_t) accum_change;
- /* Set the member type pointer (for convenience) */
- memb_type=dt->shared->u.compnd.memb[i].type;
+ /* Set the member type pointer (for convenience) */
+ memb_type = dt->shared->u.compnd.memb[i].type;
- /* Recurse if it's VL, compound, enum or array */
- /* (If the force_conv flag is _not_ set, the type cannot change in size, so don't recurse) */
- if(memb_type->shared->force_conv && H5T_IS_COMPLEX(memb_type->shared->type)) {
- /* Keep the old field size for later */
- old_size=memb_type->shared->size;
+ /* Recurse if it's VL, compound, enum or array */
+ /* (If the force_conv flag is _not_ set, the type cannot change in size, so don't recurse) */
+ if(memb_type->shared->force_conv && H5T_IS_COMPLEX(memb_type->shared->type)) {
+ /* Keep the old field size for later */
+ old_size = memb_type->shared->size;
- /* Mark the VL, compound, enum or array type */
- if((changed=H5T_set_loc(memb_type,f,loc))<0)
- HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "Unable to set VL location");
- if(changed>0)
- ret_value=changed;
+ /* Mark the VL, compound, enum or array type */
+ if((changed = H5T_set_loc(memb_type,f,loc)) < 0)
+ HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "Unable to set VL location");
+ if(changed > 0)
+ ret_value = changed;
- /* Check if the field changed size */
- if(old_size != memb_type->shared->size) {
- /* Adjust the size of the member */
- dt->shared->u.compnd.memb[i].size = (dt->shared->u.compnd.memb[i].size*memb_type->shared->size)/old_size;
+ /* Check if the field changed size */
+ if(old_size != memb_type->shared->size) {
+ /* Adjust the size of the member */
+ dt->shared->u.compnd.memb[i].size = (dt->shared->u.compnd.memb[i].size*memb_type->shared->size)/old_size;
- /* Add that change to the accumulated size change */
- accum_change += (ssize_t) (memb_type->shared->size - old_size);
+ /* Add that change to the accumulated size change */
+ accum_change += (ssize_t) (memb_type->shared->size - old_size);
+ } /* end if */
} /* end if */
- } /* end if */
- } /* end for */
+ } /* end for */
- /* Range check against datatype size */
- if ((accum_change < 0) && ((ssize_t) dt->shared->size < accum_change))
- HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid field size in datatype");
+ /* Range check against datatype size */
+ if ((accum_change < 0) && ((ssize_t) dt->shared->size < accum_change))
+ HGOTO_ERROR(H5E_DATATYPE, H5E_BADVALUE, FAIL, "invalid field size in datatype");
- /* Apply the accumulated size change to the datatype */
- dt->shared->size += (size_t) accum_change;
+ /* Apply the accumulated size change to the datatype */
+ dt->shared->size += (size_t) accum_change;
}
break;
@@ -5267,29 +5226,29 @@ H5T_set_loc(H5T_t *dt, H5F_t *f, H5T_loc_t loc)
/* Recurse if it's VL, compound, enum or array */
/* (If the force_conv flag is _not_ set, the type cannot change in size, so don't recurse) */
if(dt->shared->parent->shared->force_conv && H5T_IS_COMPLEX(dt->shared->parent->shared->type)) {
- if((changed=H5T_set_loc(dt->shared->parent,f,loc))<0)
+ if((changed = H5T_set_loc(dt->shared->parent,f,loc)) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "Unable to set VL location");
- if(changed>0)
- ret_value=changed;
+ if(changed > 0)
+ ret_value = changed;
} /* end if */
/* Mark this VL sequence */
if((changed = H5T__vlen_set_loc(dt, f, loc)) < 0)
HGOTO_ERROR(H5E_DATATYPE, H5E_CANTINIT, FAIL, "Unable to set VL location");
- if(changed>0)
- ret_value=changed;
+ if(changed > 0)
+ ret_value = changed;
break;
case H5T_REFERENCE:
/* Only need to change location of object references */
- if(dt->shared->u.atomic.u.r.rtype==H5R_OBJECT) {
+ if(dt->shared->u.atomic.u.r.rtype == H5R_OBJECT) {
/* Mark this reference */
- if(loc!=dt->shared->u.atomic.u.r.loc) {
+ if(loc != dt->shared->u.atomic.u.r.loc) {
/* Set the location */
dt->shared->u.atomic.u.r.loc = loc;
/* Indicate that the location changed */
- ret_value=TRUE;
+ ret_value = TRUE;
} /* end if */
} /* end if */
break;
@@ -5358,11 +5317,11 @@ done:
* Purpose: H5T__visit callback to Upgrade the version of a datatype
* (if there's any benefit to doing so)
*
- * Note: The behavior below is tightly coupled with the "better"
+ * Note: The behavior below is tightly coupled with the "better"
* encodings for datatype messages in the datatype message
* encoding routine.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* Thursday, July 19, 2007
@@ -5417,7 +5376,7 @@ H5T_upgrade_version_cb(H5T_t *dt, void *op_value)
* doing so) and recursively apply to compound members and/or
* parent datatypes.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* Thursday, July 19, 2007
@@ -5448,7 +5407,7 @@ done:
*
* Purpose: Set the encoding for a datatype to the latest version.
*
- * Return: Non-negative on success/Negative on failure
+ * Return: Non-negative on success/Negative on failure
*
* Programmer: Quincey Koziol
* Thursday, July 19, 2007
@@ -5559,7 +5518,7 @@ H5Tflush(hid_t type_id)
FUNC_ENTER_API(FAIL)
H5TRACE1("e", "i", type_id);
-
+
/* Check args */
if(NULL == (dt = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
@@ -5592,7 +5551,7 @@ H5Trefresh(hid_t type_id)
{
H5T_t * dt = NULL;
herr_t ret_value = SUCCEED; /* return value */
-
+
FUNC_ENTER_API(FAIL)
H5TRACE1("e", "i", type_id);
diff --git a/src/H5TS.c b/src/H5TS.c
index 7d46e95..a0ca134 100644
--- a/src/H5TS.c
+++ b/src/H5TS.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* private headers */
diff --git a/src/H5TSprivate.h b/src/H5TSprivate.h
index 5394b77..e5c41af 100644
--- a/src/H5TSprivate.h
+++ b/src/H5TSprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Tarray.c b/src/H5Tarray.c
index 4b2c7cf..beccfdb 100644
--- a/src/H5Tarray.c
+++ b/src/H5Tarray.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tbit.c b/src/H5Tbit.c
index a6b917f..12d1fd1 100644
--- a/src/H5Tbit.c
+++ b/src/H5Tbit.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tcommit.c b/src/H5Tcommit.c
index 3ded7af..c28b508 100644
--- a/src/H5Tcommit.c
+++ b/src/H5Tcommit.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tcompound.c b/src/H5Tcompound.c
index 1342770..169c146 100644
--- a/src/H5Tcompound.c
+++ b/src/H5Tcompound.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tconv.c b/src/H5Tconv.c
index 23ccd98..84a997e 100644
--- a/src/H5Tconv.c
+++ b/src/H5Tconv.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tcset.c b/src/H5Tcset.c
index 95658ed..186e598 100644
--- a/src/H5Tcset.c
+++ b/src/H5Tcset.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tdbg.c b/src/H5Tdbg.c
index 8a10cc6..f434543 100644
--- a/src/H5Tdbg.c
+++ b/src/H5Tdbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Tdeprec.c b/src/H5Tdeprec.c
index a769b72..c506ec1 100644
--- a/src/H5Tdeprec.c
+++ b/src/H5Tdeprec.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Tenum.c b/src/H5Tenum.c
index e80d748..f5263ed 100644
--- a/src/H5Tenum.c
+++ b/src/H5Tenum.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tfields.c b/src/H5Tfields.c
index 8818a73..be62d85 100644
--- a/src/H5Tfields.c
+++ b/src/H5Tfields.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tfixed.c b/src/H5Tfixed.c
index 62b8e79..bc1d84d 100644
--- a/src/H5Tfixed.c
+++ b/src/H5Tfixed.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tfloat.c b/src/H5Tfloat.c
index b8b1c07..85e8f30 100644
--- a/src/H5Tfloat.c
+++ b/src/H5Tfloat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tmodule.h b/src/H5Tmodule.h
index bfaab5e6..d2ab08c 100644
--- a/src/H5Tmodule.h
+++ b/src/H5Tmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tnative.c b/src/H5Tnative.c
index 9dbce09..6304500 100644
--- a/src/H5Tnative.c
+++ b/src/H5Tnative.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Toffset.c b/src/H5Toffset.c
index cc45d8e..668e730 100644
--- a/src/H5Toffset.c
+++ b/src/H5Toffset.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Toh.c b/src/H5Toh.c
index 9c8ad80..01cefe1 100644
--- a/src/H5Toh.c
+++ b/src/H5Toh.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/****************/
diff --git a/src/H5Topaque.c b/src/H5Topaque.c
index 9cc22c9..4e8f1d4 100644
--- a/src/H5Topaque.c
+++ b/src/H5Topaque.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Torder.c b/src/H5Torder.c
index 6c9c55c..877316d 100644
--- a/src/H5Torder.c
+++ b/src/H5Torder.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tpad.c b/src/H5Tpad.c
index e636a84..c96f42a 100644
--- a/src/H5Tpad.c
+++ b/src/H5Tpad.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tpkg.h b/src/H5Tpkg.h
index 49b0ea0..d075127 100644
--- a/src/H5Tpkg.h
+++ b/src/H5Tpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tprecis.c b/src/H5Tprecis.c
index be85491..59bac8b 100644
--- a/src/H5Tprecis.c
+++ b/src/H5Tprecis.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tprivate.h b/src/H5Tprivate.h
index 7efcb41..f2da62e 100644
--- a/src/H5Tprivate.h
+++ b/src/H5Tprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tpublic.h b/src/H5Tpublic.h
index df7ad41..fc3e4ee 100644
--- a/src/H5Tpublic.h
+++ b/src/H5Tpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tstrpad.c b/src/H5Tstrpad.c
index 2cd1db4..fa084f1 100644
--- a/src/H5Tstrpad.c
+++ b/src/H5Tstrpad.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tvisit.c b/src/H5Tvisit.c
index d732fb3..c706dee 100644
--- a/src/H5Tvisit.c
+++ b/src/H5Tvisit.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Tvlen.c b/src/H5Tvlen.c
index d198d50..00e61e5 100644
--- a/src/H5Tvlen.c
+++ b/src/H5Tvlen.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5UC.c b/src/H5UC.c
index 5762cc5..2277818 100644
--- a/src/H5UC.c
+++ b/src/H5UC.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5UCprivate.h b/src/H5UCprivate.h
index a702c03..c451f31 100644
--- a/src/H5UCprivate.h
+++ b/src/H5UCprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5VM.c b/src/H5VM.c
index c546609..4c0b837 100644
--- a/src/H5VM.c
+++ b/src/H5VM.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5VMprivate.h b/src/H5VMprivate.h
index cbe108a..4d71b29 100644
--- a/src/H5VMprivate.h
+++ b/src/H5VMprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5WB.c b/src/H5WB.c
index 978ded4..8a85a3a 100644
--- a/src/H5WB.c
+++ b/src/H5WB.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5WBprivate.h b/src/H5WBprivate.h
index cfa3fcb..4460808 100644
--- a/src/H5WBprivate.h
+++ b/src/H5WBprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5Z.c b/src/H5Z.c
index e7a2186..ef34d7c 100644
--- a/src/H5Z.c
+++ b/src/H5Z.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Zmodule.h" /* This source code file is part of the H5Z module */
@@ -626,7 +624,7 @@ H5Z__flush_file_cb(void *obj_ptr, hid_t H5_ATTR_UNUSED obj_id, void H5_ATTR_UNUS
/* Call the flush routine for mounted file hierarchies. Do a global flush
* if the file is opened for write */
if(H5F_ACC_RDWR & H5F_INTENT((H5F_t *)obj_ptr)) {
- if(H5F_flush_mounts((H5F_t *)obj_ptr, H5AC_ind_read_dxpl_id) < 0)
+ if(H5F_flush_mounts((H5F_t *)obj_ptr, H5AC_ind_read_dxpl_id, H5AC_rawdata_dxpl_id) < 0)
HGOTO_ERROR(H5E_PLINE, H5E_CANTFLUSH, FAIL, "unable to flush file hierarchy")
} /* end if */
diff --git a/src/H5Zdeflate.c b/src/H5Zdeflate.c
index 15aac27..34fdfec 100644
--- a/src/H5Zdeflate.c
+++ b/src/H5Zdeflate.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Zfletcher32.c b/src/H5Zfletcher32.c
index 9ff85cc..4cd77ef 100644
--- a/src/H5Zfletcher32.c
+++ b/src/H5Zfletcher32.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Zmodule.h b/src/H5Zmodule.h
index fa0ef8f..97e158c 100644
--- a/src/H5Zmodule.h
+++ b/src/H5Zmodule.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5Znbit.c b/src/H5Znbit.c
index 7a41d16..373eb37 100644
--- a/src/H5Znbit.c
+++ b/src/H5Znbit.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Zmodule.h" /* This source code file is part of the H5Z module */
diff --git a/src/H5Zpkg.h b/src/H5Zpkg.h
index aa2ffe2..2aa17f2 100644
--- a/src/H5Zpkg.h
+++ b/src/H5Zpkg.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#if !(defined H5Z_FRIEND || defined H5Z_MODULE)
diff --git a/src/H5Zprivate.h b/src/H5Zprivate.h
index 73d85ac..fe182ad 100644
--- a/src/H5Zprivate.h
+++ b/src/H5Zprivate.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/src/H5Zpublic.h b/src/H5Zpublic.h
index 8daa5f4..f6b313e 100644
--- a/src/H5Zpublic.h
+++ b/src/H5Zpublic.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/src/H5Zscaleoffset.c b/src/H5Zscaleoffset.c
index 1cca9b1..b86d785 100644
--- a/src/H5Zscaleoffset.c
+++ b/src/H5Zscaleoffset.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Zmodule.h" /* This source code file is part of the H5Z module */
diff --git a/src/H5Zshuffle.c b/src/H5Zshuffle.c
index 4cf6adf..1fef1c1 100644
--- a/src/H5Zshuffle.c
+++ b/src/H5Zshuffle.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Zmodule.h" /* This source code file is part of the H5Z module */
diff --git a/src/H5Zszip.c b/src/H5Zszip.c
index 557e923..769011c 100644
--- a/src/H5Zszip.c
+++ b/src/H5Zszip.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Zmodule.h" /* This source code file is part of the H5Z module */
@@ -87,7 +85,7 @@ H5Z_can_apply_szip(hid_t H5_ATTR_UNUSED dcpl_id, hid_t type_id, hid_t H5_ATTR_UN
FUNC_ENTER_NOAPI(FAIL)
/* Get datatype */
- if(NULL == (type = H5I_object_verify(type_id, H5I_DATATYPE)))
+ if(NULL == (type = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
/* Get datatype's size, for checking the "bits-per-pixel" */
@@ -158,7 +156,7 @@ H5Z_set_local_szip(hid_t dcpl_id, hid_t type_id, hid_t space_id)
HGOTO_ERROR(H5E_ATOM, H5E_BADATOM, FAIL, "can't find object for ID")
/* Get datatype */
- if(NULL == (type = H5I_object_verify(type_id, H5I_DATATYPE)))
+ if(NULL == (type = (H5T_t *)H5I_object_verify(type_id, H5I_DATATYPE)))
HGOTO_ERROR(H5E_ARGS, H5E_BADTYPE, FAIL, "not a datatype")
/* Get the filter's current parameters */
diff --git a/src/H5Ztrans.c b/src/H5Ztrans.c
index f30f0b2..d4b59a6 100644
--- a/src/H5Ztrans.c
+++ b/src/H5Ztrans.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5Zmodule.h" /* This source code file is part of the H5Z module */
diff --git a/src/H5api_adpt.h b/src/H5api_adpt.h
index 910bef9..0ff0f74 100644
--- a/src/H5api_adpt.h
+++ b/src/H5api_adpt.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/src/H5checksum.c b/src/H5checksum.c
index 48e4ce5..64d527e 100644
--- a/src/H5checksum.c
+++ b/src/H5checksum.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5dbg.c b/src/H5dbg.c
index 2039a51..dd50034 100644
--- a/src/H5dbg.c
+++ b/src/H5dbg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5detect.c b/src/H5detect.c
index 94e841e..75a1dba 100644
--- a/src/H5detect.c
+++ b/src/H5detect.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*keep this declaration near the top of this file -RPM*/
@@ -22,12 +20,10 @@ static const char *FileHeader = "\n\
* *\n\
* This file is part of HDF5. The full HDF5 copyright notice, including *\n\
* terms governing use, modification, and redistribution, is contained in *\n\
- * the files COPYING and Copyright.html. COPYING can be found at the root *\n\
- * of the source code distribution tree; Copyright.html can be found at the *\n\
- * root level of an installed copy of the electronic HDF5 document set and *\n\
- * is linked from the top-level documents page. It can also be found at *\n\
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *\n\
- * access to either file, you may request a copy from help@hdfgroup.org. *\n\
+ * the COPYING file, which can be found at the root of the source code *\n\
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *\n\
+ * If you do not have access to either file, you may request a copy from *\n\
+ * help@hdfgroup.org. *\n\
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *";
/*
*
diff --git a/src/H5err.txt b/src/H5err.txt
index 9531df9..3f5801f 100644
--- a/src/H5err.txt
+++ b/src/H5err.txt
@@ -4,12 +4,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file is used to generate the various headers that are needed for the
@@ -77,6 +75,7 @@ MAJOR, H5E_SOHM, Shared Object Header Messages
MAJOR, H5E_EARRAY, Extensible Array
MAJOR, H5E_FARRAY, Fixed Array
MAJOR, H5E_PLUGIN, Plugin for dynamically loaded library
+MAJOR, H5E_PAGEBUF, Page Buffering
MAJOR, H5E_NONE_MAJOR, No error
# Sections (for grouping minor errors)
@@ -157,6 +156,7 @@ MINOR, ATOM, H5E_NOIDS, Out of IDs for group
# Cache related errors
MINOR, CACHE, H5E_CANTFLUSH, Unable to flush data from cache
+MINOR, CACHE, H5E_CANTUNSERIALIZE, Unable to mark metadata as unserialized
MINOR, CACHE, H5E_CANTSERIALIZE, Unable to serialize data from cache
MINOR, CACHE, H5E_CANTTAG, Unable to tag metadata in the cache
MINOR, CACHE, H5E_CANTLOAD, Unable to load metadata into cache
@@ -170,6 +170,8 @@ MINOR, CACHE, H5E_CANTPIN, Unable to pin cache entry
MINOR, CACHE, H5E_CANTUNPIN, Unable to un-pin cache entry
MINOR, CACHE, H5E_CANTMARKDIRTY, Unable to mark a pinned entry as dirty
MINOR, CACHE, H5E_CANTMARKCLEAN, Unable to mark a pinned entry as clean
+MINOR, CACHE, H5E_CANTMARKUNSERIALIZED, Unable to mark an entry as unserialized
+MINOR, CACHE, H5E_CANTMARKSERIALIZED, Unable to mark an entry as serialized
MINOR, CACHE, H5E_CANTDIRTY, Unable to mark metadata as dirty
MINOR, CACHE, H5E_CANTCLEAN, Unable to mark metadata as clean
MINOR, CACHE, H5E_CANTEXPUNGE, Unable to expunge a metadata cache entry
diff --git a/src/H5make_libsettings.c b/src/H5make_libsettings.c
index fa00c64..1892806 100644
--- a/src/H5make_libsettings.c
+++ b/src/H5make_libsettings.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*keep this declaration near the top of this file -RPM*/
@@ -22,12 +20,10 @@ static const char *FileHeader = "\n\
* *\n\
* This file is part of HDF5. The full HDF5 copyright notice, including *\n\
* terms governing use, modification, and redistribution, is contained in *\n\
- * the files COPYING and Copyright.html. COPYING can be found at the root *\n\
- * of the source code distribution tree; Copyright.html can be found at the *\n\
- * root level of an installed copy of the electronic HDF5 document set and *\n\
- * is linked from the top-level documents page. It can also be found at *\n\
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *\n\
- * access to either file, you may request a copy from help@hdfgroup.org. *\n\
+ * the COPYING file, which can be found at the root of the source code *\n\
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *\n\
+ * If you do not have access to either file, you may request a copy from *\n\
+ * help@hdfgroup.org. *\n\
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *";
/*
*
diff --git a/src/H5overflow.txt b/src/H5overflow.txt
index 3e9f069..a9e5099 100644
--- a/src/H5overflow.txt
+++ b/src/H5overflow.txt
@@ -4,12 +4,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file is used to generate the headers that is needed for detecting
diff --git a/src/H5private.h b/src/H5private.h
index 47f6d78..c588154 100644
--- a/src/H5private.h
+++ b/src/H5private.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
@@ -1386,7 +1384,13 @@ typedef off_t h5_stat_size_t;
#ifndef HDstrtol
#define HDstrtol(S,R,N) strtol(S,R,N)
#endif /* HDstrtol */
-H5_DLL int64_t HDstrtoll (const char *s, const char **rest, int base);
+#ifndef HDstrtoll
+ #ifdef H5_HAVE_STRTOLL
+ #define HDstrtoll(S,R,N) strtoll(S,R,N)
+ #else
+ H5_DLL int64_t HDstrtoll (const char *s, const char **rest, int base);
+ #endif /* H5_HAVE_STRTOLL */
+#endif /* HDstrtoll */
#ifndef HDstrtoul
#define HDstrtoul(S,R,N) strtoul(S,R,N)
#endif /* HDstrtoul */
@@ -1973,7 +1977,7 @@ extern hbool_t H5_MPEinit_g; /* Has the MPE Library been initialized? */
\
if(!func_check) { \
/* Check function naming status */ \
- HDassert(asrt); \
+ HDassert(asrt && "Function naming conventions are incorrect - check H5_IS_API|PUB|PRIV|PKG macros in H5private.h (this is usually due to an incorrect number of underscores)"); \
\
/* Don't check again */ \
func_check = TRUE; \
diff --git a/src/H5public.h b/src/H5public.h
index f0eb63a..40e6e6e 100644
--- a/src/H5public.h
+++ b/src/H5public.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -93,11 +91,11 @@ extern "C" {
/* Version numbers */
#define H5_VERS_MAJOR 1 /* For major interface/format changes */
-#define H5_VERS_MINOR 9 /* For minor interface/format changes */
-#define H5_VERS_RELEASE 236 /* For tweaks, bug-fixes, or development */
+#define H5_VERS_MINOR 11 /* For minor interface/format changes */
+#define H5_VERS_RELEASE 0 /* For tweaks, bug-fixes, or development */
#define H5_VERS_SUBRELEASE "" /* For pre-releases like snap0 */
/* Empty string for real releases. */
-#define H5_VERS_INFO "HDF5 library version: 1.9.236" /* Full version string */
+#define H5_VERS_INFO "HDF5 library version: 1.11.0" /* Full version string */
#define H5check() H5check_version(H5_VERS_MAJOR,H5_VERS_MINOR, \
H5_VERS_RELEASE)
diff --git a/src/H5system.c b/src/H5system.c
index ac323c0..7e25540 100644
--- a/src/H5system.c
+++ b/src/H5system.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -32,10 +30,10 @@
/***********/
/* Headers */
/***********/
-#include "H5private.h" /* Generic Functions */
-#include "H5Eprivate.h" /* Error handling */
-#include "H5Fprivate.h" /* File access */
-#include "H5MMprivate.h" /* Memory management */
+#include "H5private.h" /* Generic Functions */
+#include "H5Eprivate.h" /* Error handling */
+#include "H5Fprivate.h" /* File access */
+#include "H5MMprivate.h" /* Memory management */
/****************/
@@ -474,6 +472,7 @@ HDfprintf(FILE *stream, const char *fmt, ...)
*
*-------------------------------------------------------------------------
*/
+#ifndef HDstrtoll
int64_t
HDstrtoll(const char *s, const char **rest, int base)
{
@@ -549,7 +548,7 @@ HDstrtoll(const char *s, const char **rest, int base)
*rest = s;
return acc;
} /* end HDstrtoll() */
-
+#endif
/*-------------------------------------------------------------------------
* Function: HDrand/HDsrand
@@ -604,7 +603,7 @@ void HDsrand(unsigned int seed)
#ifdef H5_HAVE_FCNTL
int
Pflock(int fd, int operation) {
-
+
struct flock flk;
/* Set the lock type */
@@ -649,18 +648,18 @@ Nflock(int H5_ATTR_UNUSED fd, int H5_ATTR_UNUSED operation) {
/*-------------------------------------------------------------------------
- * Function: H5_make_time
+ * Function: H5_make_time
*
- * Purpose: Portability routine to abstract converting a 'tm' struct into
- * a time_t value.
+ * Purpose: Portability routine to abstract converting a 'tm' struct into
+ * a time_t value.
*
- * Note: This is a little problematic because mktime() operates on
- * local times. We convert to local time and then figure out the
- * adjustment based on the local time zone and daylight savings
- * setting.
+ * Note: This is a little problematic because mktime() operates on
+ * local times. We convert to local time and then figure out the
+ * adjustment based on the local time zone and daylight savings
+ * setting.
*
- * Return: Success: The value of timezone
- * Failure: -1
+ * Return: Success: The value of timezone
+ * Failure: -1
*
* Programmer: Quincey Koziol
* November 18, 2015
@@ -1138,7 +1137,7 @@ H5_combine_path(const char* path1, const char* path2, char **full_name /*out*/)
if(NULL == (*full_name = (char *)H5MM_strdup(path2)))
HGOTO_ERROR(H5E_RESOURCE, H5E_NOSPACE, FAIL, "memory allocation failed")
- } /* end if */
+ } /* end if */
else if(H5_CHECK_ABS_PATH(path2)) {
/* On windows path2 is a path absolute name */
diff --git a/src/H5timer.c b/src/H5timer.c
index f36681e..0ba8bd1 100644
--- a/src/H5timer.c
+++ b/src/H5timer.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/src/H5trace.c b/src/H5trace.c
index 44b2ed5..9fb8a72 100644
--- a/src/H5trace.c
+++ b/src/H5trace.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
@@ -895,32 +893,28 @@ H5_trace(const double *returning, const char *func, const char *type, ...)
fprintf(out, "NULL");
} /* end if */
else {
- H5F_file_space_type_t fs_type = (H5F_file_space_type_t)va_arg(ap, int);
+ H5F_fspace_strategy_t fs_strategy = (H5F_fspace_strategy_t)va_arg(ap, int);
- switch(fs_type) {
- case H5F_FILE_SPACE_DEFAULT:
- fprintf(out, "H5F_FILE_SPACE_DEFAULT");
+ switch(fs_strategy) {
+ case H5F_FSPACE_STRATEGY_FSM_AGGR:
+ fprintf(out, "H5F_FSPACE_STRATEGY_FSM_AGGR");
break;
- case H5F_FILE_SPACE_ALL_PERSIST:
- fprintf(out, "H5F_FILE_SPACE_ALL_PERSIST");
+ case H5F_FSPACE_STRATEGY_PAGE:
+ fprintf(out, "H5F_FSPACE_STRATEGY_PAGE");
break;
- case H5F_FILE_SPACE_ALL:
- fprintf(out, "H5F_FILE_SPACE_ALL");
+ case H5F_FSPACE_STRATEGY_AGGR:
+ fprintf(out, "H5F_FSPACE_STRATEGY_AGGR");
break;
- case H5F_FILE_SPACE_AGGR_VFD:
- fprintf(out, "H5F_FILE_SPACE_AGGR_VFD");
+ case H5F_FSPACE_STRATEGY_NONE:
+ fprintf(out, "H5F_FSPACE_STRATEGY_NONE");
break;
- case H5F_FILE_SPACE_VFD:
- fprintf(out, "H5F_FILE_SPACE_VFD");
- break;
-
- case H5F_FILE_SPACE_NTYPES:
+ case H5F_FSPACE_STRATEGY_NTYPES:
default:
- fprintf(out, "%ld", (long)fs_type);
+ fprintf(out, "%ld", (long)fs_strategy);
break;
} /* end switch */
} /* end else */
@@ -1003,6 +997,15 @@ H5_trace(const double *returning, const char *func, const char *type, ...)
} /* end else */
break;
+ case 't':
+ if(ptr) {
+ if(vp)
+ fprintf(out, "0x%lx", (unsigned long)vp);
+ else
+ fprintf(out, "NULL");
+ } /* end if */
+ break;
+
case 'v':
if(ptr) {
if(vp)
diff --git a/src/H5vers.txt b/src/H5vers.txt
index 07d2c4b..0303bf5 100644
--- a/src/H5vers.txt
+++ b/src/H5vers.txt
@@ -4,12 +4,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# This file is used to generate the various headers that are needed for
diff --git a/src/H5win32defs.h b/src/H5win32defs.h
index 63c3a16..0149faa 100644
--- a/src/H5win32defs.h
+++ b/src/H5win32defs.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Scott Wegner
@@ -23,6 +21,11 @@
*
*/
+/*
+ * _MSC_VER = 1900 VS2015
+ * _MSC_VER = 1800 VS2013
+ * _MSC_VER = 1700 VS2012
+ */
#ifdef H5_HAVE_WIN32_API
typedef struct _stati64 h5_stat_t;
@@ -54,13 +57,22 @@ typedef __int64 h5_stat_size_t;
#define HDsleep(S) Sleep(S*1000)
#define HDstat(S,B) _stati64(S,B)
#define HDstrcasecmp(A,B) _stricmp(A,B)
-#define HDstrtoull(S,R,N) _strtoui64(S,R,N)
#define HDstrdup(S) _strdup(S)
#define HDtzset() _tzset()
#define HDunlink(S) _unlink(S)
#define HDwrite(F,M,Z) _write(F,M,Z)
#ifdef H5_HAVE_VISUAL_STUDIO
+
+#if (_MSC_VER < 1800)
+ #ifndef H5_HAVE_STRTOLL
+ #define HDstrtoll(S,R,N) _strtoi64(S,R,N)
+ #endif /* H5_HAVE_STRTOLL */
+ #ifndef H5_HAVE_STRTOULL
+ #define HDstrtoull(S,R,N) _strtoui64(S,R,N)
+ #endif /* H5_HAVE_STRTOULL */
+#endif /* MSC_VER < 1800 */
+
/*
* The (void*) cast just avoids a compiler warning in H5_HAVE_VISUAL_STUDIO
*/
@@ -75,8 +87,8 @@ struct timezone {
#if (_MSC_VER < 1900)
struct timespec
{
- time_t tv_sec; // Seconds - >= 0
- long tv_nsec; // Nanoseconds - [0, 999999999]
+ time_t tv_sec; /* Seconds - >= 0 */
+ long tv_nsec; /* Nanoseconds - [0, 999999999] */
};
#endif /* MSC_VER < 1900 */
diff --git a/src/Makefile.am b/src/Makefile.am
index 939e151..0b664a7 100644
--- a/src/Makefile.am
+++ b/src/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -46,7 +44,8 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5B.c H5Bcache.c H5Bdbg.c \
H5B2.c H5B2cache.c H5B2dbg.c H5B2hdr.c H5B2int.c H5B2internal.c \
H5B2leaf.c H5B2stat.c H5B2test.c \
- H5C.c H5Cdbg.c H5Cepoch.c H5Clog.c H5Cquery.c H5Ctag.c H5Ctest.c \
+ H5C.c H5Cdbg.c H5Cepoch.c H5Cimage.c H5Clog.c H5Cprefetched.c \
+ H5Cquery.c H5Ctag.c H5Ctest.c \
H5CS.c \
H5D.c H5Dbtree.c H5Dbtree2.c H5Dchunk.c H5Dcompact.c H5Dcontig.c H5Ddbg.c \
H5Ddeprec.c H5Dearray.c H5Defl.c H5Dfarray.c H5Dfill.c H5Dint.c \
@@ -59,7 +58,7 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5F.c H5Fint.c H5Faccum.c H5Fcwfs.c \
H5Fdbg.c H5Fdeprec.c H5Fefc.c H5Ffake.c H5Fio.c \
H5Fmount.c H5Fquery.c \
- H5Fsfile.c H5Fsuper.c H5Fsuper_cache.c H5Ftest.c \
+ H5Fsfile.c H5Fspace.c H5Fsuper.c H5Fsuper_cache.c H5Ftest.c \
H5FA.c H5FAcache.c H5FAdbg.c H5FAdblock.c H5FAdblkpage.c H5FAhdr.c \
H5FAint.c H5FAstat.c H5FAtest.c \
H5FD.c H5FDcore.c \
@@ -81,7 +80,8 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5MF.c H5MFaggr.c H5MFdbg.c H5MFsection.c \
H5MM.c H5MP.c H5MPtest.c \
H5O.c H5Oainfo.c H5Oalloc.c H5Oattr.c \
- H5Oattribute.c H5Obogus.c H5Obtreek.c H5Ocache.c H5Ochunk.c \
+ H5Oattribute.c H5Obogus.c H5Obtreek.c H5Ocache.c H5Ocache_image.c \
+ H5Ochunk.c \
H5Ocont.c H5Ocopy.c H5Odbg.c H5Odrvinfo.c H5Odtype.c H5Oefl.c \
H5Ofill.c H5Oflush.c H5Ofsinfo.c H5Oginfo.c \
H5Olayout.c \
@@ -96,6 +96,7 @@ libhdf5_la_SOURCES= H5.c H5checksum.c H5dbg.c H5system.c H5timer.c H5trace.c \
H5Pfapl.c H5Pfcpl.c H5Pfmpl.c \
H5Pgcpl.c H5Pint.c \
H5Plapl.c H5Plcpl.c H5Pocpl.c H5Pocpypl.c H5Pstrcpl.c H5Ptest.c \
+ H5PB.c \
H5PL.c \
H5R.c H5Rdeprec.c \
H5UC.c \
diff --git a/src/hdf5.h b/src/hdf5.h
index 7a10507..fc4541a 100644
--- a/src/hdf5.h
+++ b/src/hdf5.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/AtomicWriterReader.txt b/test/AtomicWriterReader.txt
new file mode 100644
index 0000000..dc0a3bd
--- /dev/null
+++ b/test/AtomicWriterReader.txt
@@ -0,0 +1,48 @@
+Atomic Tests Instructions
+=========================
+
+Purpose:
+--------
+This documents how to build and run the Atomic Writer and Reader tests.
+The atomic test is to verify if atomic read-write operation on a system works.
+The two programs are atomic_writer.c and atomic_reader.c.
+atomic_writer.c: is the "write" part of the test; and
+atomic_reader.c: is the "read" part of the test.
+
+Building the Tests
+------------------
+The two test parts are automically built during configure and make process.
+But to build them individually, you can do in test/ directory:
+$ gcc atomic_writer
+$ gcc atomic_reader
+
+Running the Tests
+-----------------
+$ atomic_writer -n <number of integers to write> -i <number of iterations for writer>
+$ atomic_reader -n <number of integers to read> -i <number of iterations for reader>
+
+Note**
+(1) "atomic_data" is the data file used by both the writer/reader in the
+ current directory.
+(2) The value for -n should be the same for both the writer and the reader.
+(3) The values for options n and i should be positive integers.
+(4) For this version, the user has to provide both options -n and -i to run
+ the writer and the reader.
+(5) If the user wants to run the writer for a long time, just provides a
+ large number for -i.
+
+Examples
+--------
+$ ./atomic_writer -n 10000 -i 5
+ Try to atomic write 10000 integers patterns 10000 time, and iterate the whole
+ write process 5 times.
+
+$ ./atomic_reader -n 10000 -i 2
+ Try to atomic read 10000 integers patterns 10000 times, and iterate only once.
+ A summary is posted at the end. If all atomic reads are correct, it will not
+ show any read beyond "0 re-tries", that is all reads have succeeded in the
+ first read attempt.
+
+Remark:
+You usually want the writer to iterate more times than the reader so that
+the writing will not finish before reading is done.
diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt
index 0197524..d7965cb 100644
--- a/test/CMakeLists.txt
+++ b/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TEST)
#-----------------------------------------------------------------------------
@@ -20,21 +20,23 @@ set (TEST_LIB_SOURCES
${HDF5_TEST_SOURCE_DIR}/h5test.c
${HDF5_TEST_SOURCE_DIR}/testframe.c
${HDF5_TEST_SOURCE_DIR}/cache_common.c
+ ${HDF5_TEST_SOURCE_DIR}/swmr_common.c
)
set (TEST_LIB_HEADERS
${HDF5_TEST_SOURCE_DIR}/h5test.h
${HDF5_TEST_SOURCE_DIR}/cache_common.h
+ ${HDF5_TEST_SOURCE_DIR}/swmr_common.h
)
add_library (${HDF5_TEST_LIB_TARGET} STATIC ${TEST_LIB_SOURCES} ${TEST_LIB_HEADERS})
TARGET_C_PROPERTIES (${HDF5_TEST_LIB_TARGET} STATIC " " " ")
if (MSVC)
target_link_libraries (${HDF5_TEST_LIB_TARGET} "ws2_32.lib")
-endif (MSVC)
+endif ()
if (MINGW)
target_link_libraries (${HDF5_TEST_LIB_TARGET} "wsock32.lib")
-endif (MINGW)
+endif ()
target_link_libraries (${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
H5_SET_LIB_OPTIONS (${HDF5_TEST_LIB_TARGET} ${HDF5_TEST_LIB_NAME} STATIC)
set_target_properties (${HDF5_TEST_LIB_TARGET} PROPERTIES
@@ -47,10 +49,10 @@ if (BUILD_SHARED_LIBS)
TARGET_C_PROPERTIES (${HDF5_TEST_LIBSH_TARGET} SHARED " " " ")
if (MSVC)
target_link_libraries (${HDF5_TEST_LIBSH_TARGET} "ws2_32.lib")
- endif (MSVC)
+ endif ()
if (MINGW)
target_link_libraries (${HDF5_TEST_LIBSH_TARGET} "wsock32.lib")
- endif (MINGW)
+ endif ()
target_link_libraries (${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
H5_SET_LIB_OPTIONS (${HDF5_TEST_LIBSH_TARGET} ${HDF5_TEST_LIB_NAME} SHARED ${HDF5_PACKAGE_SOVERSION})
set_target_properties (${HDF5_TEST_LIBSH_TARGET} PROPERTIES
@@ -64,8 +66,8 @@ if (BUILD_SHARED_LIBS)
APPEND PROPERTY COMPILE_DEFINITIONS
"H5_HAVE_THREADSAFE"
)
- endif (HDF5_ENABLE_THREADSAFE)
-endif (BUILD_SHARED_LIBS)
+ endif ()
+endif ()
#-----------------------------------------------------------------------------
# If plugin library tests can be tested
@@ -110,7 +112,7 @@ endif (BUILD_SHARED_LIBS)
"$<TARGET_FILE:${HDF5_TEST_PLUGIN_LIB_TARGET}>"
"${CMAKE_BINARY_DIR}/testdir1/$<TARGET_FILE_NAME:${HDF5_TEST_PLUGIN_LIB_TARGET}>"
)
- endforeach (test_lib ${TEST_PLUGIN_LIBS})
+ endforeach ()
foreach (test_lib ${TEST2_PLUGIN_LIBS})
set (HDF5_TEST_PLUGIN_LIB_CORENAME "${test_lib}")
@@ -136,7 +138,7 @@ endif (BUILD_SHARED_LIBS)
"$<TARGET_FILE:${HDF5_TEST_PLUGIN_LIB_TARGET}>"
"${CMAKE_BINARY_DIR}/testdir2/$<TARGET_FILE_NAME:${HDF5_TEST_PLUGIN_LIB_TARGET}>"
)
- endforeach (test_lib ${TEST2_PLUGIN_LIBS})
+ endforeach ()
set (testhdf5_SOURCES
${HDF5_TEST_SOURCE_DIR}/testhdf5.c
@@ -178,7 +180,7 @@ if (BUILD_SHARED_LIBS)
TARGET_C_PROPERTIES (testhdf5-shared SHARED " " " ")
target_link_libraries (testhdf5-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
set_target_properties (testhdf5-shared PROPERTIES FOLDER test)
-endif (BUILD_SHARED_LIBS)
+endif ()
MACRO (ADD_H5_EXE file)
add_executable (${file} ${HDF5_TEST_SOURCE_DIR}/${file}.c)
@@ -192,8 +194,8 @@ MACRO (ADD_H5_EXE file)
TARGET_C_PROPERTIES (${file}-shared SHARED " " " ")
target_link_libraries (${file}-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
set_target_properties (${file}-shared PROPERTIES FOLDER test)
- endif (BUILD_SHARED_LIBS)
-ENDMACRO (ADD_H5_EXE file)
+ endif ()
+ENDMACRO ()
set (H5_TESTS
cache
@@ -223,6 +225,7 @@ set (H5_TESTS
objcopy
links
unlink
+ twriteorder
big
mtime
fillval
@@ -243,6 +246,7 @@ set (H5_TESTS
cross_read
freespace
mf
+ page_buffer
vds
file_image
unregister
@@ -253,7 +257,34 @@ set (H5_TESTS
foreach (test ${H5_TESTS})
ADD_H5_EXE(${test})
-endforeach (test ${H5_TESTS})
+endforeach ()
+
+set (H5_SWMR_TESTS
+ swmr_addrem_writer
+ swmr_check_compat_vfd
+ swmr_generator
+ swmr_reader
+ swmr_remove_reader
+ swmr_remove_writer
+ swmr_sparse_reader
+ swmr_sparse_writer
+ swmr_start_write
+ swmr_writer
+)
+
+foreach (test ${H5_SWMR_TESTS})
+ ADD_H5_EXE(${test})
+endforeach ()
+
+set (H5_VDS_SWMR_TESTS
+ vds_swmr_gen
+ vds_swmr_reader
+ vds_swmr_writer
+)
+
+foreach (test ${H5_VDS_SWMR_TESTS})
+ ADD_H5_EXE(${test})
+endforeach ()
##############################################################################
##############################################################################
@@ -261,6 +292,26 @@ endforeach (test ${H5_TESTS})
##############################################################################
##############################################################################
+#-- Adding test for cache_image
+add_executable (cache_image
+ ${HDF5_TEST_SOURCE_DIR}/cache_image.c
+ ${HDF5_TEST_SOURCE_DIR}/genall5.c
+)
+TARGET_NAMING (cache_image STATIC)
+TARGET_C_PROPERTIES (cache_image STATIC " " " ")
+target_link_libraries (cache_image ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+set_target_properties (cache_image PROPERTIES FOLDER test)
+if (BUILD_SHARED_LIBS)
+ add_executable (cache_image-shared
+ ${HDF5_TEST_SOURCE_DIR}/cache_image.c
+ ${HDF5_TEST_SOURCE_DIR}/genall5.c
+ )
+ TARGET_NAMING (cache_image-shared SHARED)
+ TARGET_C_PROPERTIES (cache_image-shared SHARED " " " ")
+ target_link_libraries (cache_image-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (cache_image-shared PROPERTIES FOLDER test)
+endif ()
+
#-- Adding test for hyperslab
add_executable (hyperslab ${HDF5_TEST_SOURCE_DIR}/hyperslab.c)
TARGET_NAMING (hyperslab STATIC)
@@ -278,8 +329,8 @@ if (BUILD_SHARED_LIBS)
APPEND PROPERTY COMPILE_DEFINITIONS
"H5_HAVE_THREADSAFE"
)
- endif (HDF5_ENABLE_THREADSAFE)
-endif (BUILD_SHARED_LIBS)
+ endif ()
+endif ()
#-- Adding test for ttsafe
add_executable (ttsafe
@@ -310,20 +361,23 @@ if (BUILD_SHARED_LIBS)
APPEND PROPERTY COMPILE_DEFINITIONS
"H5_HAVE_THREADSAFE"
)
- endif (HDF5_ENABLE_THREADSAFE)
-endif (BUILD_SHARED_LIBS)
+ endif ()
+endif ()
set (H5_CHECK_TESTS
error_test
err_compat
tcheck_version
testmeta
+ atomic_writer
+ atomic_reader
links_env
+ flushrefresh
)
foreach (test ${H5_CHECK_TESTS})
ADD_H5_EXE(${test})
-endforeach (test ${H5_CHECK_TESTS})
+endforeach ()
#-- Adding test for accum_swmr_reader
# This has to be copied to the test directory for execve() to find it
@@ -338,7 +392,7 @@ set_target_properties (accum_swmr_reader PROPERTIES FOLDER test)
set_target_properties (accum PROPERTIES DEPENDS accum_swmr_reader)
if (BUILD_SHARED_LIBS)
set_target_properties (accum-shared PROPERTIES DEPENDS accum_swmr_reader)
-endif (BUILD_SHARED_LIBS)
+endif ()
#-- Adding test for libinfo
set (GREP_RUNNER ${PROJECT_BINARY_DIR}/GrepRunner.cmake)
@@ -346,9 +400,9 @@ file (WRITE ${GREP_RUNNER}
"file (STRINGS \${TEST_PROGRAM} TEST_RESULT REGEX \"SUMMARY OF THE HDF5 CONFIGURATION\")
if (\${TEST_RESULT} STREQUAL \"0\")
message (FATAL_ERROR \"Failed: The output: \${TEST_RESULT} of \${TEST_PROGRAM} did not contain SUMMARY OF THE HDF5 CONFIGURATION\")
-else (\${TEST_RESULT} STREQUAL \"0\")
+else ()
message (STATUS \"COMMAND Result: \${TEST_RESULT}\")
-endif (\${TEST_RESULT} STREQUAL \"0\")
+endif ()
"
)
@@ -361,12 +415,57 @@ if (BUILD_SHARED_LIBS)
TARGET_C_PROPERTIES (plugin SHARED " " " ")
target_link_libraries (plugin ${HDF5_TEST_LIB_TARGET})
set_target_properties (plugin PROPERTIES FOLDER test)
-else (BUILD_SHARED_LIBS)
+else ()
add_executable (plugin ${HDF5_TEST_SOURCE_DIR}/plugin.c)
TARGET_NAMING (plugin STATIC)
TARGET_C_PROPERTIES (plugin STATIC " " " ")
target_link_libraries (plugin ${HDF5_TEST_LIB_TARGET})
set_target_properties (plugin PROPERTIES FOLDER test)
-endif (BUILD_SHARED_LIBS)
+endif ()
+
+##############################################################################
+### U S E C A S E S T E S T S
+##############################################################################
+set (use_append_chunk_SOURCES ${HDF5_TEST_SOURCE_DIR}/use_append_chunk.c ${HDF5_TEST_SOURCE_DIR}/use_common.c)
+add_executable (use_append_chunk ${use_append_chunk_SOURCES})
+TARGET_NAMING (use_append_chunk STATIC)
+TARGET_C_PROPERTIES (use_append_chunk STATIC " " " ")
+target_link_libraries (use_append_chunk ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+set_target_properties (use_append_chunk PROPERTIES FOLDER test)
+if (BUILD_SHARED_LIBS)
+ add_executable (use_append_chunk-shared ${use_append_chunk_SOURCES})
+ TARGET_NAMING (use_append_chunk-shared SHARED)
+ TARGET_C_PROPERTIES (use_append_chunk-shared SHARED " " " ")
+ target_link_libraries (use_append_chunk-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (use_append_chunk-shared PROPERTIES FOLDER test)
+endif ()
+
+set (use_append_mchunks_SOURCES ${HDF5_TEST_SOURCE_DIR}/use_append_mchunks.c ${HDF5_TEST_SOURCE_DIR}/use_common.c)
+add_executable (use_append_mchunks ${use_append_mchunks_SOURCES})
+TARGET_NAMING (use_append_mchunks STATIC)
+TARGET_C_PROPERTIES (use_append_mchunks STATIC " " " ")
+target_link_libraries (use_append_mchunks ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+set_target_properties (use_append_mchunks PROPERTIES FOLDER test)
+if (BUILD_SHARED_LIBS)
+ add_executable (use_append_mchunks-shared ${use_append_mchunks_SOURCES})
+ TARGET_NAMING (use_append_mchunks-shared SHARED)
+ TARGET_C_PROPERTIES (use_append_mchunks-shared SHARED " " " ")
+ target_link_libraries (use_append_mchunks-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (use_append_mchunks-shared PROPERTIES FOLDER test)
+endif ()
+
+set (use_disable_mdc_flushes_SOURCES ${HDF5_TEST_SOURCE_DIR}/use_disable_mdc_flushes.c)
+add_executable (use_disable_mdc_flushes ${use_disable_mdc_flushes_SOURCES})
+TARGET_NAMING (use_disable_mdc_flushes STATIC)
+TARGET_C_PROPERTIES (use_disable_mdc_flushes STATIC " " " ")
+target_link_libraries (use_disable_mdc_flushes ${HDF5_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+set_target_properties (use_disable_mdc_flushes PROPERTIES FOLDER test)
+if (BUILD_SHARED_LIBS)
+ add_executable (use_disable_mdc_flushes-shared ${use_disable_mdc_flushes_SOURCES})
+ TARGET_NAMING (use_disable_mdc_flushes-shared SHARED)
+ TARGET_C_PROPERTIES (use_disable_mdc_flushes-shared SHARED " " " ")
+ target_link_libraries (use_disable_mdc_flushes-shared ${HDF5_TEST_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (use_disable_mdc_flushes-shared PROPERTIES FOLDER test)
+endif ()
include (CMakeTests.cmake)
diff --git a/test/CMakeTests.cmake b/test/CMakeTests.cmake
index 0ccb557..fe8f261 100644
--- a/test/CMakeTests.cmake
+++ b/test/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -13,7 +24,7 @@ if (BUILD_SHARED_LIBS)
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST-shared")
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST-shared/testfiles")
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/H5TEST-shared/testfiles/plist_files")
-endif (BUILD_SHARED_LIBS)
+endif ()
if (HDF5_TEST_VFD)
set (VFD_LIST
@@ -27,14 +38,14 @@ if (HDF5_TEST_VFD)
)
if (DIRECT_VFD)
set (VFD_LIST ${VFD_LIST} direct)
- endif (DIRECT_VFD)
+ endif ()
foreach (vfdtest ${VFD_LIST})
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/${vfdtest}")
#if (BUILD_SHARED_LIBS)
# file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/${vfdtest}-shared")
- #endif (BUILD_SHARED_LIBS)
- endforeach (vfdtest ${VFD_LIST})
-endif (HDF5_TEST_VFD)
+ #endif ()
+ endforeach ()
+endif ()
# --------------------------------------------------------------------
# Copy all the HDF5 files from the source directory into the test directory
@@ -56,24 +67,24 @@ if (BUILD_SHARED_LIBS)
COMMAND ${CMAKE_COMMAND}
ARGS -E copy_if_different "$<TARGET_FILE:accum_swmr_reader>" "${PROJECT_BINARY_DIR}/H5TEST-shared/accum_swmr_reader"
)
-endif (BUILD_SHARED_LIBS)
+endif ()
foreach (h5_tfile ${HDF5_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/${h5_tfile}" "${PROJECT_BINARY_DIR}/H5TEST/${h5_tfile}" "HDF5_TEST_LIB_files")
if (BUILD_SHARED_LIBS)
HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/${h5_tfile}" "${PROJECT_BINARY_DIR}/H5TEST-shared/${h5_tfile}" "HDF5_TEST_LIBSH_files")
- endif (BUILD_SHARED_LIBS)
-endforeach (h5_tfile ${HDF5_TEST_FILES})
+ endif ()
+endforeach ()
if (HDF5_TEST_VFD)
foreach (vfdtest ${VFD_LIST})
foreach (h5_tfile ${HDF5_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/${h5_tfile}" "${PROJECT_BINARY_DIR}/${vfdtest}/${h5_tfile}" "HDF5_TEST_LIB_files")
if (BUILD_SHARED_LIBS)
HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/${h5_tfile}" "${PROJECT_BINARY_DIR}/${vfdtest}-shared/${h5_tfile}" "HDF5_TEST_LIBSH_files")
- endif (BUILD_SHARED_LIBS)
- endforeach (h5_tfile ${HDF5_TEST_FILES})
- endforeach (vfdtest ${VFD_LIST})
-endif (HDF5_TEST_VFD)
+ endif ()
+ endforeach ()
+ endforeach ()
+endif ()
# --------------------------------------------------------------------
# Copy all the HDF5 files from the test directory into the source directory
@@ -90,18 +101,18 @@ foreach (ref_file ${HDF5_REFERENCE_FILES})
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/testfiles/${ref_file}" "${PROJECT_BINARY_DIR}/H5TEST/${ref_file}" "HDF5_TEST_LIB_files")
if (BUILD_SHARED_LIBS)
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/testfiles/${ref_file}" "${PROJECT_BINARY_DIR}/H5TEST-shared/${ref_file}" "HDF5_TEST_LIBSH_files")
- endif (BUILD_SHARED_LIBS)
-endforeach (ref_file ${HDF5_REFERENCE_FILES})
+ endif ()
+endforeach ()
if (HDF5_TEST_VFD)
foreach (vfdtest ${VFD_LIST})
foreach (ref_file ${HDF5_REFERENCE_FILES})
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/testfiles/${ref_file}" "${PROJECT_BINARY_DIR}/${vfdtest}/${ref_file}" "HDF5_TEST_LIB_files")
if (BUILD_SHARED_LIBS)
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/testfiles/${ref_file}" "${PROJECT_BINARY_DIR}/${vfdtest}-shared/${ref_file}" "HDF5_TEST_LIBSH_files")
- endif (BUILD_SHARED_LIBS)
- endforeach (ref_file ${HDF5_REFERENCE_FILES})
- endforeach (vfdtest ${VFD_LIST})
-endif (HDF5_TEST_VFD)
+ endif ()
+ endforeach ()
+ endforeach ()
+endif ()
# --------------------------------------------------------------------
# Copy test files from test/testfiles/plist_files dir to test dir
@@ -163,13 +174,14 @@ foreach (plistfile ${HDF5_REFERENCE_PLIST_FILES})
if (BUILD_SHARED_LIBS)
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/testfiles/plist_files/${plistfile}" "${PROJECT_BINARY_DIR}/H5TEST-shared/testfiles/plist_files/${plistfile}" "HDF5_TEST_LIBSH_files")
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/testfiles/plist_files/def_${plistfile}" "${PROJECT_BINARY_DIR}/H5TEST-shared/testfiles/plist_files/def_${plistfile}" "HDF5_TEST_LIBSH_files")
- endif (BUILD_SHARED_LIBS)
-endforeach (plistfile ${HDF5_REFERENCE_PLIST_FILES})
+ endif ()
+endforeach ()
# --------------------------------------------------------------------
#-- Copy all the HDF5 files from the test directory into the source directory
# --------------------------------------------------------------------
set (HDF5_REFERENCE_TEST_FILES
+ aggr.h5
bad_compound.h5
be_data.h5
be_extlink1.h5
@@ -187,7 +199,15 @@ set (HDF5_REFERENCE_TEST_FILES
filespace_1_8.h5
fill_old.h5
filter_error.h5
+ fsm_aggr_nopersist.h5
+ fsm_aggr_persist.h5
group_old.h5
+ h5fc_ext1_i.h5
+ h5fc_ext1_f.h5
+ h5fc_ext2_if.h5
+ h5fc_ext2_sf.h5
+ h5fc_ext3_isf.h5
+ h5fc_ext_none.h5
le_data.h5
le_extlink1.h5
le_extlink2.h5
@@ -195,6 +215,9 @@ set (HDF5_REFERENCE_TEST_FILES
multi_file_v16-r.h5
multi_file_v16-s.h5
noencoder.h5
+ none.h5
+ paged_nopersist.h5
+ paged_persist.h5
specmetaread.h5
tarrold.h5
tbad_msg_count.h5
@@ -212,22 +235,22 @@ foreach (h5_file ${HDF5_REFERENCE_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/${h5_file}" "${HDF5_TEST_BINARY_DIR}/H5TEST/${h5_file}" "HDF5_TEST_LIB_files")
if (BUILD_SHARED_LIBS)
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/${h5_file}" "${HDF5_TEST_BINARY_DIR}/H5TEST-shared/${h5_file}" "HDF5_TEST_LIBSH_files")
- endif (BUILD_SHARED_LIBS)
-endforeach (h5_file ${HDF5_REFERENCE_TEST_FILES})
+ endif ()
+endforeach ()
if (HDF5_TEST_VFD)
foreach (vfdtest ${VFD_LIST})
foreach (h5_file ${HDF5_REFERENCE_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/${h5_file}" "${HDF5_TEST_BINARY_DIR}/${vfdtest}/${h5_file}" "HDF5_TEST_LIB_files")
if (BUILD_SHARED_LIBS)
HDFTEST_COPY_FILE("${HDF5_TEST_SOURCE_DIR}/${h5_file}" "${HDF5_TEST_BINARY_DIR}/${vfdtest}-shared/${h5_file}" "HDF5_TEST_LIBSH_files")
- endif (BUILD_SHARED_LIBS)
- endforeach (h5_file ${HDF5_REFERENCE_TEST_FILES})
- endforeach (vfdtest ${VFD_LIST})
-endif (HDF5_TEST_VFD)
+ endif ()
+ endforeach ()
+ endforeach ()
+endif ()
add_custom_target(HDF5_TEST_LIB_files ALL COMMENT "Copying files needed by HDF5_TEST_LIB tests" DEPENDS ${HDF5_TEST_LIB_files_list})
if (BUILD_SHARED_LIBS)
add_custom_target(HDF5_TEST_LIBSH_files ALL COMMENT "Copying files needed by HDF5_TEST_LIBSH tests" DEPENDS ${HDF5_TEST_LIBSH_files_list})
-endif()
+endif ()
# Remove any output file left over from previous test run
add_test (NAME H5TEST-clear-testhdf5-objects
@@ -279,8 +302,17 @@ if (HDF5_ENABLE_USING_MEMCHECKER)
ENVIRONMENT "HDF5_ALARM_SECONDS=3600;srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
-else (HDF5_ENABLE_USING_MEMCHECKER)
- add_test (NAME H5TEST-testhdf5 COMMAND $<TARGET_FILE:testhdf5>)
+else ()
+ add_test (NAME H5TEST-testhdf5 COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:testhdf5>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=testhdf5.txt"
+ #-D "TEST_REFERENCE=testhdf5.out"
+ -D "TEST_FOLDER=${HDF5_TEST_BINARY_DIR}/H5TEST"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
set_tests_properties (H5TEST-testhdf5 PROPERTIES
DEPENDS H5TEST-clear-testhdf5-objects
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
@@ -310,14 +342,23 @@ else (HDF5_ENABLE_USING_MEMCHECKER)
WORKING_DIRECTORY
${HDF5_TEST_BINARY_DIR}/H5TEST-shared
)
- add_test (NAME H5TEST-shared-testhdf5 COMMAND $<TARGET_FILE:testhdf5-shared>)
+ add_test (NAME H5TEST-shared-testhdf5 COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:testhdf5-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=testhdf5.txt"
+ #-D "TEST_REFERENCE=testhdf5.out"
+ -D "TEST_FOLDER=${HDF5_TEST_BINARY_DIR}/H5TEST-shared"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
set_tests_properties (H5TEST-shared-testhdf5 PROPERTIES
DEPENDS H5TEST-shared-clear-testhdf5-objects
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared
)
- endif (BUILD_SHARED_LIBS)
-endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+endif ()
##############################################################################
##############################################################################
@@ -346,6 +387,10 @@ set (test_CLEANFILES
layout_extend.h5
zero_chunk.h5
chunk_single.h5
+ swmr_non_latest.h5
+ earray_hdr_fd.h5
+ farray_hdr_fd.h5
+ bt2_hdr_fd.h5
storage_size.h5
dls_01_strings.h5
extend.h5
@@ -439,7 +484,15 @@ set (test_CLEANFILES
tvltypes.h5
tvlstr.h5
tvlstr2.h5
+ twriteorder.dat
flush.h5
+ flush-swmr.h5
+ noflush.h5
+ noflush-swmr.h5
+ flush_extend.h5
+ flush_extend-swmr.h5
+ noflush_extend.h5
+ noflush_extend-swmr.h5
enum1.h5
titerate.h5
ttsafe.h5
@@ -507,11 +560,24 @@ set (test_CLEANFILES
vds_dapl.h5
vds_src_0.h5
vds_src_1.h5
+ swmr_data.h5
+ use_use_append_chunk.h5
+ use_append_mchunks.h5
+ use_disable_mdc_flushes.h5
tbogus.h5.copy
+ flushrefresh.h5
+ flushrefresh_VERIFICATION_START
+ flushrefresh_VERIFICATION_CHECKPOINT1
+ flushrefresh_VERIFICATION_CHECKPOINT2
+ flushrefresh_VERIFICATION_DONE
+ atomic_data
accum_swmr_big.h5
ohdr_swmr.h5
+ test_swmr*.h5
cache_logging.h5
cache_logging.out
+ vds_swmr.h5
+ vds_swmr_src_*.h5
)
# Remove any output file left over from previous test run
@@ -553,6 +619,7 @@ set (H5TEST_TESTS
objcopy
links
unlink
+ twriteorder
big
mtime
fillval
@@ -574,6 +641,7 @@ set (H5TEST_TESTS
cross_read
freespace
mf
+ page_buffer
vds
file_image
unregister
@@ -583,19 +651,37 @@ set (H5TEST_TESTS
)
foreach (test ${H5TEST_TESTS})
- if (${test} STREQUAL "big" AND CYGWIN)
- add_test (NAME H5TEST-${test}
- COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${test}"
- )
- else (${test} STREQUAL "big" AND CYGWIN)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5TEST-${test} COMMAND $<TARGET_FILE:${test}>)
- endif (${test} STREQUAL "big" AND CYGWIN)
- set_tests_properties (H5TEST-${test} PROPERTIES
- DEPENDS H5TEST-clear-objects
- ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
- WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
- )
-endforeach (test ${H5TEST_TESTS})
+ set_tests_properties (H5TEST-${test} PROPERTIES
+ DEPENDS H5TEST-clear-objects
+ ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
+ WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
+ )
+ else ()
+ if (${test} STREQUAL "big" AND CYGWIN)
+ add_test (NAME H5TEST-${test}
+ COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${test}"
+ )
+ else ()
+ add_test (NAME H5TEST-${test} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:${test}>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=${test}.txt"
+ #-D "TEST_REFERENCE=${test}.out"
+ -D "TEST_FOLDER=${HDF5_TEST_BINARY_DIR}/H5TEST"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
+ set_tests_properties (H5TEST-${test} PROPERTIES
+ DEPENDS H5TEST-clear-objects
+ ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
+ WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
+ )
+ endif ()
+endforeach ()
set_tests_properties (H5TEST-flush2 PROPERTIES DEPENDS H5TEST-flush1)
set_tests_properties (H5TEST-fheap PROPERTIES TIMEOUT 1800)
@@ -618,22 +704,31 @@ if (BUILD_SHARED_LIBS)
add_test (NAME H5TEST-shared-${test}
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${test}-shared"
)
- else (${test} STREQUAL "big" AND CYGWIN)
- add_test (NAME H5TEST-shared-${test} COMMAND $<TARGET_FILE:${test}-shared>)
- endif (${test} STREQUAL "big" AND CYGWIN)
+ else ()
+ add_test (NAME H5TEST-shared-${test} COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:${test}-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=${test}.txt"
+ #-D "TEST_REFERENCE=${test}.out"
+ -D "TEST_FOLDER=${HDF5_TEST_BINARY_DIR}/H5TEST-shared"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (H5TEST-shared-${test} PROPERTIES
DEPENDS H5TEST-shared-clear-objects
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared
)
- endforeach (test ${H5TEST_TESTS})
+ endforeach ()
set_tests_properties (H5TEST-shared-flush2 PROPERTIES DEPENDS H5TEST-shared-flush1)
set_tests_properties (H5TEST-shared-fheap PROPERTIES TIMEOUT 1800)
set_tests_properties (H5TEST-shared-big PROPERTIES TIMEOUT 1800)
set_tests_properties (H5TEST-shared-btree2 PROPERTIES TIMEOUT 1800)
set_tests_properties (H5TEST-shared-objcopy PROPERTIES TIMEOUT 1800)
-endif (BUILD_SHARED_LIBS)
+endif ()
##############################################################################
##############################################################################
@@ -650,14 +745,43 @@ if (NOT CYGWIN)
WORKING_DIRECTORY
${HDF5_TEST_BINARY_DIR}/H5TEST
)
- add_test (NAME H5TEST-cache COMMAND $<TARGET_FILE:cache>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME H5TEST-cache COMMAND $<TARGET_FILE:cache>)
+ else ()
+ add_test (NAME H5TEST-cache COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:cache>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=cache.txt"
+ #-D "TEST_REFERENCE=cache.out"
+ -D "TEST_FOLDER=${HDF5_TEST_BINARY_DIR}/H5TEST"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (H5TEST-cache PROPERTIES
DEPENDS H5TEST-clear-cache-objects
- ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST;HDF5TestExpress=${HDF_TEST_EXPRESS}"
+ ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST;HDF5TestExpress=3"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
set_tests_properties (H5TEST-cache PROPERTIES TIMEOUT 1800)
-endif (NOT CYGWIN)
+endif ()
+
+#-- Adding test for cache_image
+add_test (
+ NAME H5TEST-clear-cache_image-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ cache_image_test.h5
+ WORKING_DIRECTORY
+ ${HDF5_TEST_BINARY_DIR}/H5TEST
+)
+add_test (NAME H5TEST-cache_image COMMAND $<TARGET_FILE:cache_image>)
+set_tests_properties (H5TEST-cache_image PROPERTIES
+ DEPENDS H5TEST-clear-cache_image-objects
+ ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST;HDF5TestExpress=3"
+ WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
+)
#-- Adding test for err_compat
if (HDF5_ENABLE_DEPRECATED_SYMBOLS)
@@ -684,7 +808,7 @@ if (HDF5_ENABLE_DEPRECATED_SYMBOLS)
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST
)
-endif (HDF5_ENABLE_DEPRECATED_SYMBOLS)
+endif ()
#-- Adding test for error_test
add_test (NAME H5TEST-clear-error_test-objects
@@ -749,7 +873,7 @@ add_test (NAME H5TEST-testlibinfo
if (BUILD_SHARED_LIBS)
#-- Adding test for cache
- if (NOT CYGWIN)
+ if (NOT CYGWIN AND NOT WIN32)
add_test (NAME H5TEST-shared-clear-cache-objects
COMMAND ${CMAKE_COMMAND}
-E remove
@@ -757,14 +881,27 @@ if (BUILD_SHARED_LIBS)
WORKING_DIRECTORY
${HDF5_TEST_BINARY_DIR}/H5TEST-shared
)
- add_test (NAME H5TEST-shared-cache COMMAND $<TARGET_FILE:cache-shared>)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME H5TEST-shared-cache COMMAND $<TARGET_FILE:cache-shared>)
+ else ()
+ add_test (NAME H5TEST-shared-cache COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:cache-shared>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=cache-shared.txt"
+ #-D "TEST_REFERENCE=cache-shared.out"
+ -D "TEST_FOLDER=${HDF5_TEST_BINARY_DIR}/H5TEST-shared"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
set_tests_properties (H5TEST-shared-cache PROPERTIES
DEPENDS H5TEST-shared-clear-cache-objects
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared;HDF5TestExpress=${HDF_TEST_EXPRESS}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared
)
set_tests_properties (H5TEST-shared-cache PROPERTIES TIMEOUT 1800)
- endif (NOT CYGWIN)
+ endif ()
#-- Adding test for err_compat
if (HDF5_ENABLE_DEPRECATED_SYMBOLS)
@@ -791,7 +928,7 @@ if (BUILD_SHARED_LIBS)
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/H5TEST-shared"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/H5TEST-shared
)
- endif (HDF5_ENABLE_DEPRECATED_SYMBOLS)
+ endif ()
#-- Adding test for error_test
add_test (NAME H5TEST-shared-clear-error_test-objects
@@ -853,16 +990,18 @@ if (BUILD_SHARED_LIBS)
WORKING_DIRECTORY
${HDF5_TEST_BINARY_DIR}/H5TEST-shared
)
-endif (BUILD_SHARED_LIBS)
+endif ()
##############################################################################
### P L U G I N T E S T S
##############################################################################
if (WIN32)
set (CMAKE_SEP "\;")
-else (WIN32)
+ set (BIN_REL_PATH "../../")
+else ()
set (CMAKE_SEP ":")
-endif (WIN32)
+ set (BIN_REL_PATH "../")
+endif ()
add_test (NAME H5PLUGIN-plugin COMMAND $<TARGET_FILE:plugin>)
set_tests_properties (H5PLUGIN-plugin PROPERTIES
@@ -871,6 +1010,24 @@ set_tests_properties (H5PLUGIN-plugin PROPERTIES
)
##############################################################################
+# HDFFV-9655 relative plugin test disabled
+#
+#add_test (NAME H5PLUGIN-pluginRelative COMMAND $<TARGET_FILE:plugin>)
+#set_tests_properties (H5PLUGIN-pluginRelative PROPERTIES
+# ENVIRONMENT "HDF5_PLUGIN_PATH=@/${BIN_REL_PATH}testdir1${CMAKE_SEP}@/${BIN_REL_PATH}testdir2;srcdir=${HDF5_TEST_BINARY_DIR}"
+# WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}
+#)
+##############################################################################
+
+##############################################################################
+### S W M R T E S T S
+##############################################################################
+# testflushrefresh.sh: flushrefresh
+# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
+# testswmr.sh: swmr*
+# testvdsswmr.sh: vds_swmr*
+
+##############################################################################
##############################################################################
### V F D T E S T S ###
##############################################################################
@@ -936,9 +1093,10 @@ if (HDF5_TEST_VFD)
)
if (NOT CYGWIN)
set (H5_VFD_TESTS ${H5_VFD_TESTS} big cache)
- endif (NOT CYGWIN)
+ endif ()
- MACRO (CHECK_VFD_TEST vfdtest vfdname resultcode)
+ # Windows only macro
+ macro (CHECK_VFD_TEST vfdtest vfdname resultcode)
if (${vfdtest} STREQUAL "flush1" OR ${vfdtest} STREQUAL "flush2")
if (${vfdname} STREQUAL "multi" OR ${vfdname} STREQUAL "split")
if (NOT BUILD_SHARED_LIBS AND NOT CMAKE_BUILD_TYPE MATCHES Debug)
@@ -971,8 +1129,8 @@ if (HDF5_TEST_VFD)
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/${vfdname}-shared"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/${vfdname}-shared
)
- endif (BUILD_SHARED_LIBS)
- else (NOT BUILD_SHARED_LIBS AND NOT CMAKE_BUILD_TYPE MATCHES Debug)
+ endif ()
+ else ()
add_test (NAME VFD-${vfdname}-${vfdtest}
COMMAND ${CMAKE_COMMAND} -E echo "SKIP VFD-${vfdname}-${vfdtest}"
)
@@ -980,9 +1138,9 @@ if (HDF5_TEST_VFD)
add_test (NAME VFD-${vfdname}-${test}-shared
COMMAND ${CMAKE_COMMAND} -E echo "SKIP VFD-${vfdname}-${vfdtest}-shared"
)
- endif (BUILD_SHARED_LIBS)
- endif(NOT BUILD_SHARED_LIBS AND NOT CMAKE_BUILD_TYPE MATCHES Debug)
- else (${vfdname} STREQUAL "multi" OR ${vfdname} STREQUAL "split")
+ endif ()
+ endif ()
+ else ()
add_test (NAME VFD-${vfdname}-${vfdtest}
COMMAND "${CMAKE_COMMAND}"
-D "TEST_PROGRAM=$<TARGET_FILE:${vfdtest}>"
@@ -1012,9 +1170,9 @@ if (HDF5_TEST_VFD)
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/${vfdname}-shared"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/${vfdname}-shared
)
- endif (BUILD_SHARED_LIBS)
- endif (${vfdname} STREQUAL "multi" OR ${vfdname} STREQUAL "split")
- else (${vfdtest} STREQUAL "flush1" OR ${vfdtest} STREQUAL "flush2")
+ endif ()
+ endif ()
+ else ()
add_test (NAME VFD-${vfdname}-${vfdtest}
COMMAND "${CMAKE_COMMAND}"
-D "TEST_PROGRAM=$<TARGET_FILE:${vfdtest}>"
@@ -1029,7 +1187,7 @@ if (HDF5_TEST_VFD)
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/${vfdname};HDF5TestExpress=${HDF_TEST_EXPRESS}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/${vfdname}
)
- if (BUILD_SHARED_LIBS)
+ if (BUILD_SHARED_LIBS AND NOT ${vfdtest} STREQUAL "cache")
add_test (NAME VFD-${vfdname}-${vfdtest}-shared
COMMAND "${CMAKE_COMMAND}"
-D "TEST_PROGRAM=$<TARGET_FILE:${vfdtest}-shared>"
@@ -1044,15 +1202,15 @@ if (HDF5_TEST_VFD)
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/${vfdname}-shared;HDF5TestExpress=${HDF_TEST_EXPRESS}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/${vfdname}-shared
)
- endif (BUILD_SHARED_LIBS)
- endif (${vfdtest} STREQUAL "flush1" OR ${vfdtest} STREQUAL "flush2")
- ENDMACRO (CHECK_VFD_TEST vfdtest vfdname resultcode)
+ endif ()
+ endif ()
+ endmacro ()
- MACRO (ADD_VFD_TEST vfdname resultcode)
+ macro (ADD_VFD_TEST vfdname resultcode)
foreach (test ${H5_VFD_TESTS})
if (WIN32)
CHECK_VFD_TEST (${test} ${vfdname} ${resultcode})
- else (WIN32)
+ else ()
add_test (NAME VFD-${vfdname}-${test}
COMMAND "${CMAKE_COMMAND}"
-D "TEST_PROGRAM=$<TARGET_FILE:${test}>"
@@ -1082,25 +1240,25 @@ if (HDF5_TEST_VFD)
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/${vfdname}-shared"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/${vfdname}-shared
)
- endif (BUILD_SHARED_LIBS)
- endif (WIN32)
- endforeach (test ${H5_VFD_TESTS})
+ endif ()
+ endif ()
+ endforeach ()
set_tests_properties (VFD-${vfdname}-flush2 PROPERTIES DEPENDS VFD-${vfdname}-flush1)
set_tests_properties (VFD-${vfdname}-flush1 PROPERTIES TIMEOUT 10)
set_tests_properties (VFD-${vfdname}-flush2 PROPERTIES TIMEOUT 10)
set_tests_properties (VFD-${vfdname}-istore PROPERTIES TIMEOUT 1800)
if (NOT CYGWIN)
set_tests_properties (VFD-${vfdname}-cache PROPERTIES TIMEOUT 1800)
- endif (NOT CYGWIN)
+ endif ()
if (BUILD_SHARED_LIBS)
set_tests_properties (VFD-${vfdname}-flush2-shared PROPERTIES DEPENDS VFD-${vfdname}-flush1-shared)
set_tests_properties (VFD-${vfdname}-flush1-shared PROPERTIES TIMEOUT 10)
set_tests_properties (VFD-${vfdname}-flush2-shared PROPERTIES TIMEOUT 10)
set_tests_properties (VFD-${vfdname}-istore-shared PROPERTIES TIMEOUT 1800)
- if (NOT CYGWIN)
+ if (NOT CYGWIN AND NOT WIN32)
set_tests_properties (VFD-${vfdname}-cache-shared PROPERTIES TIMEOUT 1800)
- endif (NOT CYGWIN)
- endif (BUILD_SHARED_LIBS)
+ endif ()
+ endif ()
if (HDF5_TEST_FHEAP_VFD)
add_test (NAME VFD-${vfdname}-fheap
COMMAND "${CMAKE_COMMAND}"
@@ -1133,16 +1291,16 @@ if (HDF5_TEST_VFD)
ENVIRONMENT "srcdir=${HDF5_TEST_BINARY_DIR}/${vfdname}-shared;HDF5TestExpress=${HDF_TEST_EXPRESS}"
WORKING_DIRECTORY ${HDF5_TEST_BINARY_DIR}/${vfdname}-shared
)
- endif (BUILD_SHARED_LIBS)
- endif (HDF5_TEST_FHEAP_VFD)
- ENDMACRO (ADD_VFD_TEST)
+ endif ()
+ endif ()
+ endmacro ()
# Run test with different Virtual File Driver
foreach (vfd ${VFD_LIST})
ADD_VFD_TEST (${vfd} 0)
- endforeach (vfd ${VFD_LIST})
+ endforeach ()
-endif (HDF5_TEST_VFD)
+endif ()
##############################################################################
##############################################################################
@@ -1151,13 +1309,13 @@ endif (HDF5_TEST_VFD)
##############################################################################
if (HDF5_BUILD_GENERATORS)
- MACRO (ADD_H5_GENERATOR genfile)
+ macro (ADD_H5_GENERATOR genfile)
add_executable (${genfile} ${HDF5_TEST_SOURCE_DIR}/${genfile}.c)
TARGET_NAMING (${genfile} STATIC)
TARGET_C_PROPERTIES (${genfile} STATIC " " " ")
target_link_libraries (${genfile} ${HDF5_TEST_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (${genfile} PROPERTIES FOLDER generator/test)
- ENDMACRO (ADD_H5_GENERATOR genfile)
+ endmacro ()
# generator executables
set (H5_GENERATORS
@@ -1184,6 +1342,6 @@ if (HDF5_BUILD_GENERATORS)
foreach (gen ${H5_GENERATORS})
ADD_H5_GENERATOR (${gen})
- endforeach (gen ${H5_GENERATORS})
+ endforeach ()
-endif (HDF5_BUILD_GENERATORS)
+endif ()
diff --git a/test/COPYING b/test/COPYING
index 6903daf..6497ace 100644
--- a/test/COPYING
+++ b/test/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/test/H5srcdir.h b/test/H5srcdir.h
index 2f04295..32fe8c9 100644
--- a/test/H5srcdir.h
+++ b/test/H5srcdir.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/H5srcdir_str.h.in b/test/H5srcdir_str.h.in
index d472124..bab1df3 100644
--- a/test/H5srcdir_str.h.in
+++ b/test/H5srcdir_str.h.in
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* If you are reading this file and it has a '.h' suffix, it was automatically
diff --git a/test/Makefile.am b/test/Makefile.am
index 5a63c89..20b63f6 100644
--- a/test/Makefile.am
+++ b/test/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -28,8 +26,18 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_builddir)/src
# testlibinfo.sh:
# testcheck_version.sh: tcheck_version
# tetlinks_env.sh: links_env
-TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh
-SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT)
+# testflushrefresh.sh: flushrefresh
+# test_usecases.sh: use_append_chunk, use_append_mchunks, use_disable_mdc_flushes
+# testswmr.sh: swmr*
+# testvdsswmr.sh: vds_swmr*
+TEST_SCRIPT = testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh \
+ testswmr.sh testvdsswmr.sh testflushrefresh.sh test_usecases.sh
+SCRIPT_DEPEND = error_test$(EXEEXT) err_compat$(EXEEXT) links_env$(EXEEXT) \
+ flushrefresh$(EXEEXT) use_append_chunk$(EXEEXT) use_append_mchunks$(EXEEXT) use_disable_mdc_flushes$(EXEEXT) \
+ swmr_generator$(EXEEXT) swmr_reader$(EXEEXT) swmr_writer$(EXEEXT) \
+ swmr_remove_reader$(EXEEXT) swmr_remove_writer$(EXEEXT) swmr_addrem_writer$(EXEEXT) \
+ swmr_sparse_reader$(EXEEXT) swmr_sparse_writer$(EXEEXT) swmr_start_write$(EXEEXT) \
+ vds_swmr_gen$(EXEEXT) vds_swmr_reader$(EXEEXT) vds_swmr_writer$(EXEEXT)
if HAVE_SHARED_CONDITIONAL
TEST_SCRIPT += test_plugin.sh
SCRIPT_DEPEND += plugin$(EXEEXT)
@@ -41,11 +49,12 @@ check_SCRIPTS = $(TEST_SCRIPT)
# executed, generally most specific tests to least specific tests.
# As an exception, long-running tests should occur earlier in the list.
# This gives them more time to run when tests are executing in parallel.
-TEST_PROG= testhdf5 cache cache_api cache_tagging lheap ohdr stab gheap \
+TEST_PROG= testhdf5 \
+ cache cache_api cache_image cache_tagging lheap ohdr stab gheap \
evict_on_close farray earray btree2 fheap \
- pool accum hyperslab istore bittests dt_arith \
+ pool accum hyperslab istore bittests dt_arith page_buffer \
dtypes dsets cmpd_dset filter_fail extend external efc objcopy links unlink \
- big mtime fillval mount flush1 flush2 app_ref enum \
+ twriteorder big mtime fillval mount flush1 flush2 app_ref enum \
set_extent ttsafe enc_dec_plist enc_dec_plist_cross_platform\
getname vfd ntypes dangle dtransform reserved cross_read \
freespace mf vds file_image unregister cache_logging cork swmr
@@ -54,13 +63,21 @@ TEST_PROG= testhdf5 cache cache_api cache_tagging lheap ohdr stab gheap \
# error_test and err_compat are built at the same time as the other tests, but executed by testerror.sh.
# tcheck_version is used by testcheck_version.sh.
# accum_swmr_reader is used by accum.c.
+# atomic_writer and atomic_reader are standalone programs.
# links_env is used by testlinks_env.sh
+# flushrefresh is used by testflushrefresh.sh.
+# use_append_chunk, use_append_mchunks and use_disable_mdc_flushes are used by test_usecases.sh
+# swmr_* files (besides swmr.c) are used by testswmr.sh.
+# vds_swmr_* files are used by testvdsswmr.sh
# 'make check' doesn't run them directly, so they are not included in TEST_PROG.
# Also build testmeta, which is used for timings test. It builds quickly,
# and this lets automake keep all its test programs in one place.
check_PROGRAMS=$(TEST_PROG) error_test err_compat tcheck_version \
- testmeta accum_swmr_reader \
- links_env
+ testmeta accum_swmr_reader atomic_writer atomic_reader \
+ links_env flushrefresh use_append_chunk use_append_mchunks use_disable_mdc_flushes \
+ swmr_generator swmr_start_write swmr_reader swmr_writer swmr_remove_reader \
+ swmr_remove_writer swmr_addrem_writer swmr_sparse_reader swmr_sparse_writer \
+ swmr_check_compat_vfd vds_swmr_gen vds_swmr_reader vds_swmr_writer
if HAVE_SHARED_CONDITIONAL
check_PROGRAMS+= plugin
endif
@@ -81,27 +98,39 @@ if BUILD_ALL_CONDITIONAL
noinst_PROGRAMS=$(BUILD_ALL_PROGS)
endif
-if HAVE_SHARED_CONDITIONAL
- # The libh5test library provides common support code for the tests.
- noinst_LTLIBRARIES=libh5test.la
+# The libh5test library provides common support code for the tests.
+noinst_LTLIBRARIES=libh5test.la
+if HAVE_SHARED_CONDITIONAL
# The libdynlib1, libdynlib2, libdynlib3, and libdynlib4 library for testing plugin module plugin.c.
# Build it as shared library if configure is enabled for shared library.
- lib_LTLIBRARIES=libdynlib1.la libdynlib2.la libdynlib3.la libdynlib4.la
+ dyn_LTLIBRARIES=libdynlib1.la libdynlib2.la libdynlib3.la libdynlib4.la
libdynlib1_la_SOURCES=dynlib1.c
libdynlib2_la_SOURCES=dynlib2.c
libdynlib3_la_SOURCES=dynlib3.c
libdynlib4_la_SOURCES=dynlib4.c
+ libdynlib1_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+ libdynlib2_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+ libdynlib3_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+ libdynlib4_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+
+libdynlib1.la: $(libdynlib1_la_OBJECTS) $(libdynlib1_la_DEPENDENCIES) $(EXTRA_libdynlib1_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlib1_la_LINK) $(am_libdynlib1_la_rpath) $(libdynlib1_la_OBJECTS) $(libdynlib1_la_LIBADD)
+
+libdynlib2.la: $(libdynlib2_la_OBJECTS) $(libdynlib2_la_DEPENDENCIES) $(EXTRA_libdynlib2_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlib2_la_LINK) $(am_libdynlib2_la_rpath) $(libdynlib2_la_OBJECTS) $(libdynlib2_la_LIBADD)
-install-exec-hook:
- $(RM) $(DESTDIR)$(libdir)/*dynlib*
+libdynlib3.la: $(libdynlib3_la_OBJECTS) $(libdynlib3_la_DEPENDENCIES) $(EXTRA_libdynlib3_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlib3_la_LINK) $(am_libdynlib3_la_rpath) $(libdynlib3_la_OBJECTS) $(libdynlib3_la_LIBADD)
-else
- # The libh5test library provides common support code for the tests.
- noinst_LTLIBRARIES=libh5test.la
+libdynlib4.la: $(libdynlib4_la_OBJECTS) $(libdynlib4_la_DEPENDENCIES) $(EXTRA_libdynlib4_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlib4_la_LINK) $(am_libdynlib4_la_rpath) $(libdynlib4_la_OBJECTS) $(libdynlib4_la_LIBADD)
+
+#install-exec-hook:
+# $(RM) $(DESTDIR)$(dyndir)/*dynlib*
endif
-libh5test_la_SOURCES=h5test.c testframe.c cache_common.c
+libh5test_la_SOURCES=h5test.c testframe.c cache_common.c swmr_common.c
# Use libhd5test.la to compile all of the tests
LDADD=libh5test.la $(LIBHDF5)
@@ -109,6 +138,7 @@ LDADD=libh5test.la $(LIBHDF5)
# List the source files for tests that have more than one
ttsafe_SOURCES=ttsafe.c ttsafe_dcreate.c ttsafe_error.c ttsafe_cancel.c \
ttsafe_acreate.c
+cache_image_SOURCES=cache_image.c genall5.c
VFD_LIST = sec2 stdio core core_paged split multi family
if DIRECT_VFD_CONDITIONAL
@@ -148,7 +178,9 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
stdio.h5 sec2.h5 dtypes[0-9].h5 dtypes1[0].h5 dt_arith[1-2].h5 tattr.h5 \
tselect.h5 mtime.h5 unlink.h5 unicode.h5 coord.h5 \
fillval_[0-9].h5 fillval.raw mount_[0-9].h5 testmeta.h5 ttime.h5 \
- trefer[1-3].h5 tvltypes.h5 tvlstr.h5 tvlstr2.h5 flush.h5 \
+ trefer[1-3].h5 tvltypes.h5 tvlstr.h5 tvlstr2.h5 twriteorder.dat \
+ flush.h5 flush-swmr.h5 noflush.h5 noflush-swmr.h5 flush_extend.h5 \
+ flush_extend-swmr.h5 noflush_extend.h5 noflush_extend-swmr.h5 \
enum1.h5 titerate.h5 ttsafe.h5 tarray1.h5 tgenprop.h5 \
tmisc[0-9]*.h5 set_extent[1-5].h5 ext[12].bin \
getname.h5 getname[1-3].h5 sec2_file.h5 direct_file.h5 \
@@ -163,15 +195,27 @@ CHECK_CLEANFILES+=accum.h5 cmpd_dset.h5 compact_dataset.h5 dataset.h5 dset_offse
split_get_file_image_test-m.h5 split_get_file_image_test-r.h5 \
file_image_core_test.h5.copy unregister_filter_1.h5 unregister_filter_2.h5 \
vds_virt.h5 vds_dapl.h5 vds_src_[0-1].h5 \
- accum_swmr_big.h5 ohdr_swmr.h5 \
- cache_logging.h5 cache_logging.out \
- swmr[0-2].h5
+ swmr_data.h5 use_use_append_chunk.h5 use_append_mchunks.h5 use_disable_mdc_flushes.h5 \
+ flushrefresh.h5 flushrefresh_VERIFICATION_START \
+ flushrefresh_VERIFICATION_CHECKPOINT1 flushrefresh_VERIFICATION_CHECKPOINT2 \
+ flushrefresh_VERIFICATION_DONE atomic_data accum_swmr_big.h5 ohdr_swmr.h5 \
+ test_swmr*.h5 cache_logging.h5 cache_logging.out vds_swmr.h5 vds_swmr_src_*.h5 \
+ swmr[0-2].h5 swmr_writer.out swmr_writer.log.* swmr_reader.out.* swmr_reader.log.* \
+ tbogus.h5.copy cache_image_test.h5
+
# Sources for testhdf5 executable
testhdf5_SOURCES=testhdf5.c tarray.c tattr.c tchecksum.c tconfig.c tfile.c \
tgenprop.c th5o.c th5s.c tcoords.c theap.c tid.c titerate.c tmeta.c tmisc.c \
trefer.c trefstr.c tselect.c tskiplist.c tsohm.c ttime.c ttst.c tunicode.c \
tvlstr.c tvltypes.c
+# Sources for Use Cases
+use_append_chunk_SOURCES=use_append_chunk.c use_common.c
+use_append_mchunks_SOURCES=use_append_mchunks.c use_common.c
+use_disable_mdc_flushes_SOURCES=use_disable_mdc_flushes.c
+
# Temporary files.
-DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh test_plugin.sh
+DISTCLEANFILES=testerror.sh testlibinfo.sh testcheck_version.sh testlinks_env.sh test_plugin.sh \
+ testswmr.sh testvdsswmr.sh test_usecases.sh testflushrefresh.sh
+
include $(top_srcdir)/config/conclude.am
diff --git a/test/POSIX_Order_Write_Test_Report.docx b/test/POSIX_Order_Write_Test_Report.docx
new file mode 100644
index 0000000..cf6d1dc
--- /dev/null
+++ b/test/POSIX_Order_Write_Test_Report.docx
Binary files differ
diff --git a/test/POSIX_Order_Write_Test_Report.pdf b/test/POSIX_Order_Write_Test_Report.pdf
new file mode 100644
index 0000000..0c678c4
--- /dev/null
+++ b/test/POSIX_Order_Write_Test_Report.pdf
Binary files differ
diff --git a/test/SWMR_POSIX_Order_UG.txt b/test/SWMR_POSIX_Order_UG.txt
new file mode 100644
index 0000000..2771af1
--- /dev/null
+++ b/test/SWMR_POSIX_Order_UG.txt
@@ -0,0 +1,94 @@
+POSIX Write Order Test Instructions
+===================================
+
+Purpose
+-------
+This documents shows the requirments, implementaion design and instructions
+of building and running the POSIX Write Order test. The name of the
+test is twriteorder and it resides in the test/ directory.
+
+Requirements
+------------
+The test is to verify that the write order is strictly consistent.
+The SWMR feature requires that the order of write is strictly consistent.
+"Strict consistency in computer science is the most stringent consistency
+model. It says that a read operation has to return the result of the
+latest write operation which occurred on that data item."--
+(http://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability).
+This is also an alternative form of what POSIX write require that after a
+write operation has returned success, all reads issued afterward should
+get the same data the write has written.
+
+Implementation Design
+---------------------
+The test simulates what SWMR does by writing chained blocks and see if
+they can be read back correctly.
+There is a writer process and a read process.
+The file is divided into 2KB partitions. Then writer writes 1 chained
+block, each of 1KB big, in each partition after the first partition.
+Each chained block has this structure:
+Byte 0-3: offset address of its child block. The last child uses 0 as NULL.
+Byte 4-1023: some artificial data.
+The child block address of Block 1 is NULL (0).
+The child block address of Block 2 is the offset address of Block 1.
+The child block address of Block n is the offset address of Block n-1.
+After all n blocks are written, the offset address of Block n is written
+to the offset 0 of the first partition.
+Therefore, by the time the offset address of Block n is written to this
+position, all n chain-linked blocks have been written.
+
+The other reader processes will try to read the address value at the
+offset 0. The value is initially NULL(0). When it changes to non-zero,
+it signifies the writer process has written all the chain-link blocks
+and they are ready for the reader processes to access.
+
+If the system, in which the writer and reader processes run, the readers
+will always get all chain-linked blocks correctly. If the order of write
+is not maintained, some reader processes may found unexpect block data.
+
+Building the Tests
+------------------
+The name of the test is twriteorder in the test directory. It is added
+to the test suite and is built during the "make" process and is run by
+the test_usecases.sh test. Users may inspect test/test_usecases.sh.in
+to see the examples of testing.
+
+Running the Tests
+-----------------
+twriteorder test accepts the following options:
+$ ./twriteorder -h
+usage: twriteorder [OPTIONS]
+ OPTIONS
+ -h Print a usage message and exit
+ -l w|r launch writer or reader only. [default: launch both]
+ -b N Block size [default: 1024]
+ -p N Partition size [default: 2048]
+ -n N Number of linked blocks [default: 512]
+
+More Examples
+-------------
+
+# run test with default parameters and launch both writer and reader
+#processes.
+$ twriteorder
+
+# run test with blocksize of 1000 bytes (default is 1024 bytes).
+$ twriteorder -b 1000
+
+# run test with partition size of 3000 bytes (default is 2048 bytes).
+$ twriteorder -p 3000
+
+# run test with 2000 linked blocks (default is 512 blocks).
+$ twriteorder -n 2000
+
+# Launch only the writer process.
+$ twriteorder -l w
+
+# Launch only the reader process.
+$ twriteorder -l r
+
+Note that if you want to launch the writer and the reader processes
+manually (for example in different machines sharing a common file system),
+you need to start the writer process (-l w) first, and then the reader
+process (-l r).
+
diff --git a/test/SWMR_UseCase_UG.txt b/test/SWMR_UseCase_UG.txt
new file mode 100644
index 0000000..e29944a
--- /dev/null
+++ b/test/SWMR_UseCase_UG.txt
@@ -0,0 +1,223 @@
+1. Title:
+ User Guide for SWMR Use Case Programs
+
+2. Purpose:
+ This is a User Guide of the SWMR Use Case programs. It descibes the use
+ case program and explain how to run them.
+
+2.1. Author and Dates:
+ Version 2: By Albert Cheng (acheng@hdfgroup.org), 2013/06/18.
+ Version 1: By Albert Cheng (acheng@hdfgroup.org), 2013/06/01.
+
+
+%%%%Use Case 1.7%%%%
+
+3. Use Case [1.7]:
+ Appending a single chunk
+
+3.1. Program name:
+ use_append_chunk
+
+3.2. Description:
+ Appending a single chunk of raw data to a dataset along an unlimited
+ dimension within a pre-created file and reading the new data back.
+
+ It first creates one 3d dataset using chunked storage, each chunk
+ is a (1, chunksize, chunksize) square. The dataset is (unlimited,
+ chunksize, chunksize). Data type is 2 bytes integer. It starts out
+ "empty", i.e., first dimension is 0.
+
+ The writer then appends planes, each of (1,chunksize,chunksize)
+ to the dataset. Fills each plan with plane number and then writes
+ it at the nth plane. Increases the plane number and repeats till
+ the end of dataset, when it reaches chunksize long. End product is
+ a chunksize^3 cube.
+
+ The reader is a separated process, running in parallel with
+ the writer. It reads planes from the dataset. It expects the
+ dataset is being changed (growing). It checks the unlimited dimension
+ (dimension[0]). When it increases, it will read in the new planes, one
+ by one, and verify the data correctness. (The nth plan should contain
+ all "n".) When the unlimited dimension grows to the chunksize (it
+ becomes a cube), that is the expected end of data, the reader exits.
+
+3.3. How to run the program:
+ Simplest way is
+ $ use_append_chunk
+
+ It creates a skeleton dataset (0,256,256) of shorts. Then fork off
+ a process, which becomes the reader process to read planes from the
+ dataset, while the original process continues as the writer process
+ to append planes onto the dataset.
+
+ Other possible options:
+
+ 1. -z option: different chunksize. Default is 256.
+ $ use_append_chunk -z 1024
+
+ It uses (1,1024,1024) chunks to produce a 1024^3 cube, about 2GB big.
+
+
+ 2. -f filename: different dataset file name
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5
+
+ The data file is /gpfs/tmp/append_data.h5. This allows two independent
+ processes in separated compute nodes to access the datafile on the
+ shared /gpfs file system.
+
+
+ 3. -l option: launch only the reader or writer process.
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5 -l w # in node X
+ $ use_append_chunk -f /gpfs/tmp/append_data.h5 -l r # in node Y
+
+ In node X, launch the writer process, which creates the data file
+ and appends to it.
+ In node Y, launch the read process to read the data file.
+
+ Note that you need to time the read process to start AFTER the write
+ process has created the skeleton data file. Otherwise, the reader
+ will encounter errors such as data file not found.
+
+ 4. -n option: number of planes to write/read. Default is same as the
+ chunk size as specified by option -z.
+ $ use_append_chunk -n 1000 # 1000 planes are writtern and read.
+
+ 5. -s option: use SWMR file access mode or not. Default is yes.
+ $ use_append_chunk -s 0
+
+ It opens the HDF5 data file without the SWMR access mode (0 means
+ off). This likely will result in error. This option is provided for
+ users to see the effect of the neede SWMR access mode for concurrent
+ access.
+
+3.4. Test Shell Script:
+ The Use Case program is installed in the test/ directory and is
+ compiled as part of the make process. A test script (test_usecases.sh)
+ is installed in the same directory to test the use case programs. The
+ test script is rather basic and is more for demonstrating how to
+ use the program.
+
+
+%%%%Use Case 1.8%%%%
+
+4. Use Case [1.8]:
+ Appending a hyperslab of multiple chunks.
+
+4.1. Program name:
+ use_append_mchunks
+
+4.2. Description:
+ Appending a hyperslab that spans several chunks of a dataset with
+ unlimited dimensions within a pre-created file and reading the new
+ data back.
+
+ It first creates one 3d dataset using chunked storage, each chunk is a (1,
+ chunksize, chunksize) square. The dataset is (unlimited, 2*chunksize,
+ 2*chunksize). Data type is 2 bytes integer. Therefore, each plane
+ consists of 4 chunks. It starts out "empty", i.e., first dimension is 0.
+
+ The writer then appends planes, each of (1,2*chunksize,2*chunksize)
+ to the dataset. Fills each plan with plane number and then writes
+ it at the nth plane. Increases the plane number and repeats till
+ the end of dataset, when it reaches chunksize long. End product is
+ a (2*chunksize)^3 cube.
+
+ The reader is a separated process, running in parallel with
+ the writer. It reads planes from the dataset. It expects the
+ dataset is being changed (growing). It checks the unlimited dimension
+ (dimension[0]). When it increases, it will read in the new planes, one
+ by one, and verify the data correctness. (The nth plan should contain
+ all "n".) When the unlimited dimension grows to the 2*chunksize (it
+ becomes a cube), that is the expected end of data, the reader exits.
+
+4.3. How to run the program:
+ Simplest way is
+ $ use_append_mchunks
+
+ It creates a skeleton dataset (0,512,512) of shorts. Then fork off
+ a process, which becomes the reader process to read planes from the
+ dataset, while the original process continues as the writer process
+ to append planes onto the dataset.
+
+ Other possible options:
+
+ 1. -z option: different chunksize. Default is 256.
+ $ use_append_mchunks -z 512
+
+ It uses (1,512,512) chunks to produce a 1024^3 cube, about 2GB big.
+
+
+ 2. -f filename: different dataset file name
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5
+
+ The data file is /gpfs/tmp/append_data.h5. This allows two independent
+ processes in separated compute nodes to access the datafile on the
+ shared /gpfs file system.
+
+
+ 3. -l option: launch only the reader or writer process.
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5 -l w # in node X
+ $ use_append_mchunks -f /gpfs/tmp/append_data.h5 -l r # in node Y
+
+ In node X, launch the writer process, which creates the data file
+ and appends to it.
+ In node Y, launch the read process to read the data file.
+
+ Note that you need to time the read process to start AFTER the write
+ process has created the skeleton data file. Otherwise, the reader
+ will encounter errors such as data file not found.
+
+ 4. -n option: number of planes to write/read. Default is same as the
+ chunk size as specified by option -z.
+ $ use_append_mchunks -n 1000 # 1000 planes are writtern and read.
+
+ 5. -s option: use SWMR file access mode or not. Default is yes.
+ $ use_append_mchunks -s 0
+
+ It opens the HDF5 data file without the SWMR access mode (0 means
+ off). This likely will result in error. This option is provided for
+ users to see the effect of the neede SWMR access mode for concurrent
+ access.
+
+4.4. Test Shell Script:
+ The Use Case program is installed in the test/ directory and is
+ compiled as part of the make process. A test script (test_usecases.sh)
+ is installed in the same directory to test the use case programs. The
+ test script is rather basic and is more for demonstrating how to
+ use the program.
+
+
+%%%%Use Case 1.9%%%%
+
+5. Use Case [1.9]:
+ Appending n-1 dimensional planes
+
+5.1. Program names:
+ use_append_chunk and use_append_mchunks
+
+5.2. Description:
+ Appending n-1 dimensional planes or regions to a chunked dataset where
+ the data does not fill the chunk.
+
+ This means the chunks have multiple planes and when a plane is written,
+ only one of the planes in each chunk is written. This use case is
+ achieved by extending the previous use cases 1.7 and 1.8 by defining the
+ chunks to have more than 1 plane. The -y option is implemented for both
+ use_append_chunk and use_append_mchunks.
+
+5.3. How to run the program:
+ Simplest way is
+ $ use_append_mchunks -y 5
+
+ It creates a skeleton dataset (0,512,512), with storage chunks (5,512,512)
+ of shorts. It then proceeds like use case 1.8 by forking off a reader
+ process. The original process continues as the writer process that
+ writes 1 plane at a time, updating parts of the chunks involved. The
+ reader reads 1 plane at a time, retrieving data from partial chunks.
+
+ The other possible options will work just like the two use cases.
+
+5.4. Test Shell Script:
+ Commands are added with -y options to demonstrate how the two use case
+ programs can be used as for this use case.
+
diff --git a/test/accum.c b/test/accum.c
index 1fcd051..d76c866 100644
--- a/test/accum.c
+++ b/test/accum.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Mike McGreevy
@@ -45,18 +43,18 @@
H5F_t * f = NULL;
/* Function Prototypes */
-unsigned test_write_read(const H5F_io_info_t *fio_info);
-unsigned test_write_read_nonacc_front(const H5F_io_info_t *fio_info);
-unsigned test_write_read_nonacc_end(const H5F_io_info_t *fio_info);
-unsigned test_accum_overlap(const H5F_io_info_t *fio_info);
-unsigned test_accum_overlap_clean(const H5F_io_info_t *fio_info);
-unsigned test_accum_overlap_size(const H5F_io_info_t *fio_info);
-unsigned test_accum_non_overlap_size(const H5F_io_info_t *fio_info);
-unsigned test_accum_adjust(const H5F_io_info_t *fio_info);
-unsigned test_read_after(const H5F_io_info_t *fio_info);
-unsigned test_free(const H5F_io_info_t *fio_info);
-unsigned test_big(const H5F_io_info_t *fio_info);
-unsigned test_random_write(const H5F_io_info_t *fio_info);
+unsigned test_write_read(const H5F_io_info2_t *fio_info);
+unsigned test_write_read_nonacc_front(const H5F_io_info2_t *fio_info);
+unsigned test_write_read_nonacc_end(const H5F_io_info2_t *fio_info);
+unsigned test_accum_overlap(const H5F_io_info2_t *fio_info);
+unsigned test_accum_overlap_clean(const H5F_io_info2_t *fio_info);
+unsigned test_accum_overlap_size(const H5F_io_info2_t *fio_info);
+unsigned test_accum_non_overlap_size(const H5F_io_info2_t *fio_info);
+unsigned test_accum_adjust(const H5F_io_info2_t *fio_info);
+unsigned test_read_after(const H5F_io_info2_t *fio_info);
+unsigned test_free(const H5F_io_info2_t *fio_info);
+unsigned test_big(const H5F_io_info2_t *fio_info);
+unsigned test_random_write(const H5F_io_info2_t *fio_info);
unsigned test_swmr_write_big(hbool_t newest_format);
/* Helper Function Prototypes */
@@ -90,7 +88,7 @@ void accum_printf(void);
int
main(void)
{
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
unsigned nerrors = 0; /* track errors */
hid_t fid = -1;
@@ -109,7 +107,8 @@ main(void)
/* Set up I/O info for operation */
fio_info.f = f;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id))) FAIL_STACK_ERROR
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id))) FAIL_STACK_ERROR
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id))) FAIL_STACK_ERROR
/* Reset metadata accumulator for the file */
if(accum_reset(&fio_info) < 0) FAIL_STACK_ERROR
@@ -166,7 +165,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_write_read(const H5F_io_info_t *fio_info)
+test_write_read(const H5F_io_info2_t *fio_info)
{
int i = 0;
int *write_buf, *read_buf;
@@ -222,7 +221,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_write_read_nonacc_front(const H5F_io_info_t *fio_info)
+test_write_read_nonacc_front(const H5F_io_info2_t *fio_info)
{
int i = 0;
int *write_buf, *read_buf;
@@ -281,7 +280,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_write_read_nonacc_end(const H5F_io_info_t *fio_info)
+test_write_read_nonacc_end(const H5F_io_info2_t *fio_info)
{
int i = 0;
int *write_buf, *read_buf;
@@ -340,7 +339,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_free(const H5F_io_info_t *fio_info)
+test_free(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf = NULL;
@@ -527,7 +526,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_overlap(const H5F_io_info_t *fio_info)
+test_accum_overlap(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf, *rbuf;
@@ -699,7 +698,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_overlap_clean(const H5F_io_info_t *fio_info)
+test_accum_overlap_clean(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf, *rbuf;
@@ -878,7 +877,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_non_overlap_size(const H5F_io_info_t *fio_info)
+test_accum_non_overlap_size(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf, *rbuf;
@@ -945,7 +944,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_overlap_size(const H5F_io_info_t *fio_info)
+test_accum_overlap_size(const H5F_io_info2_t *fio_info)
{
int i = 0;
int32_t *wbuf, *rbuf;
@@ -1023,7 +1022,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_accum_adjust(const H5F_io_info_t *fio_info)
+test_accum_adjust(const H5F_io_info2_t *fio_info)
{
int i = 0;
int s = 1048576; /* size of buffer */
@@ -1279,7 +1278,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_read_after(const H5F_io_info_t *fio_info)
+test_read_after(const H5F_io_info2_t *fio_info)
{
int i = 0;
int s = 128; /* size of buffer */
@@ -1358,7 +1357,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_big(const H5F_io_info_t *fio_info)
+test_big(const H5F_io_info2_t *fio_info)
{
uint8_t *wbuf, *wbuf2, *rbuf, *zbuf; /* Buffers for reading & writing, etc */
unsigned u; /* Local index variable */
@@ -1666,7 +1665,7 @@ error:
*-------------------------------------------------------------------------
*/
unsigned
-test_random_write(const H5F_io_info_t *fio_info)
+test_random_write(const H5F_io_info2_t *fio_info)
{
uint8_t *wbuf, *rbuf; /* Buffers for reading & writing */
unsigned seed = 0; /* Random # seed */
@@ -1820,7 +1819,7 @@ test_swmr_write_big(hbool_t newest_format)
pid_t pid; /* Process ID */
#endif /* H5_HAVE_UNISTD_H */
int status; /* Status returned from child process */
- H5F_io_info_t fio_info; /* I/O info for operation */
+ H5F_io_info2_t fio_info; /* I/O info for operation */
char *new_argv[] = {NULL};
char *driver = NULL; /* VFD string (from env variable) */
@@ -1877,7 +1876,9 @@ test_swmr_write_big(hbool_t newest_format)
/* Set up I/O info for operation */
fio_info.f = rf;
- if(NULL == (fio_info.dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ if(NULL == (fio_info.meta_dxpl = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id)))
+ FAIL_STACK_ERROR
+ if(NULL == (fio_info.raw_dxpl = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id)))
FAIL_STACK_ERROR
/* We'll be writing lots of garbage data, so extend the
diff --git a/test/accum_swmr_reader.c b/test/accum_swmr_reader.c
index d98fe70..c1a238d 100644
--- a/test/accum_swmr_reader.c
+++ b/test/accum_swmr_reader.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5test.h"
diff --git a/test/aggr.h5 b/test/aggr.h5
new file mode 100644
index 0000000..b24365d
--- /dev/null
+++ b/test/aggr.h5
Binary files differ
diff --git a/test/app_ref.c b/test/app_ref.c
index 4b72bcb..3ef3fef 100644
--- a/test/app_ref.c
+++ b/test/app_ref.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/atomic_reader.c b/test/atomic_reader.c
new file mode 100644
index 0000000..3e3a20a
--- /dev/null
+++ b/test/atomic_reader.c
@@ -0,0 +1,361 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/*-------------------------------------------------------------------------
+ *
+ * Created: atomic_reader.c
+ *
+ * Purpose: This is the "reader" part of the standalone test to check
+ * atomic read-write operation on a system.
+ * a) atomic_reader.c--the reader (this file)
+ * a) atomic_writer.c--the writer
+ * c) atomic_data--the name of the data file used by writer and reader
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if !defined(WIN32) && !defined(__MINGW32__)
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define FILENAME "atomic_data"
+#define READ_TRIES 20
+#define OPEN_TRIES 50
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static void usage(void);
+int verify(int fd, unsigned int k);
+void print_info(int *info, unsigned int lastr, unsigned iteration);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: To print the command line options
+ *
+ * Parameters: None
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("Usage: atomic_reader -n <number of integers to read> -i <number of iterations for reader>\n");
+ printf(" Note**The number of integers for option n has to be positive\n");
+ printf(" Note**The number of integers for option i has to be positive\n");
+ printf("\n");
+} /* usage() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify
+ *
+ * Purpose: To verify that the data read is the pattern expected.
+ * Each integer read should be the same as the index.
+ * When a difference is encountered, the remaining integers
+ * read should be the same as the previous index.
+ * For example, the pattern expected should be either:
+ * a) 01234567....n-1
+ * or
+ * b) if at index 4, a difference is encountered,
+ * the remaining integers should be all "3"s as:
+ * 012333333333333
+ *
+ * Parameters:
+ * fd -- the file descriptor
+ * k -- the number of integers to read
+ *
+ * Return:
+ * positive on success
+ * negative on failure
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+verify(int fd, unsigned int k)
+{
+ unsigned int i; /* local index variable */
+ ssize_t bytes_read; /* the number of bytes read */
+ unsigned int *buf = NULL; /* buffer to hold data read */
+
+ /* Allocate buffer for data read */
+ if((buf = (unsigned int *)malloc(k * sizeof(unsigned int))) == NULL) {
+ printf("READER: error from malloc\n");
+ goto error;
+ } /* end if */
+
+ /* Position the file at the beginning */
+ if(lseek(fd, (off_t)0, SEEK_SET) < 0) {
+ printf("READER: error from lseek\n");
+ goto error;
+ } /* end if */
+
+ /* Read the whole file */
+ if((bytes_read = read(fd, buf, (k * sizeof(unsigned int)))) < 0) {
+ printf("READER: error from read\n");
+ goto error;
+ } /* end if */
+
+ /* Verify the bytes read are correct */
+ if(bytes_read != (ssize_t)(k*sizeof(unsigned int))) {
+ printf("READER: error from bytes read=%lu\n", (unsigned long)bytes_read);
+ goto error;
+ } /* end if */
+
+ /* Verify data read */
+ for(i=0; i < k; i++) {
+ if(buf[i] != i)
+ break;
+ } /* end for */
+
+ if(i < k) {
+ /* Compare the beginning and ending sentinel values */
+ if(buf[k-1] != (i-1)) {
+ printf("FAIL IN READER: ...beginning sentinel value=%u, i=%u\n", (i-1), i);
+ printf("FAIL IN READER: buf[%u]=%u\n", i-1, buf[i-1]);
+ printf("FAIL IN READER: buf[%u]=%u\n", i, buf[i]);
+ printf("FAIL IN READER: buf[%u]=%u\n", i+1, buf[i+1]);
+ printf("FAIL IN READER: ...ending sentinel value=%u\n", buf[k-1]);
+ goto error;
+ } /* end if */
+ } /* end if */
+
+ /* Free the buffer */
+ if(buf)
+ free(buf);
+ return 0;
+
+error:
+ /* Free the buffer */
+ if(buf)
+ free(buf);
+ return -1;
+} /* end verify() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: print_info
+ *
+ * Purpose: To print the statistics gathered for re-reads
+ *
+ * Parameters:
+ * info -- the array storing the statistics for re-reads
+ * lastr -- the last read completed
+ * iteration -- the current iteration
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+print_info(int *info, unsigned int lastr, unsigned iteration)
+{
+ unsigned j; /* local index variable */
+
+ printf("--------statistics for %u reads (iteration %u)--------\n", lastr, iteration);
+
+ for(j = 0; j <= READ_TRIES; j++)
+ printf("# of %u re-tries = %u\n", j, info[j]);
+
+ printf("--------end statistics for %u reads (iteration %u)--------\n", lastr, iteration);
+} /* print_info() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To verify that the data read is the pattern expected.
+ * (1) Make sure the file opens successfully and the # of bytes read is as expected
+ * (2) Iterate the reader with i iterations
+ * (3) Read and verify n integers for each iteration
+ * (4) On verification error, re-read the data at most READ_TRIES
+ * times to see if correct data can be obtained
+ * (5) Print out statistics for the number of re-retries for each iteration
+ *
+ * Note:
+ * (a) The # of integers (via -n option) used by the writer and reader should be the same.
+ * (b) The data file used by the writer and reader should be the same.
+ *
+ * Future enhancement:
+ * 1) Provide default values for n and i and allow user to run with either 0 or 1 option
+ * 2) Use HDF library HD<system calls> instead of the system calls
+ * 3) Handle large sized buffer (gigabytes) if needed
+ *
+ * Return: Success: EXIT_SUCCESS
+ * Failure: EXIT_FAILURE
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ int fd = -1; /* file descriptor */
+ unsigned int j=0, i=0, m=0; /* local index variables */
+ int temp; /* temporary variable */
+ unsigned int iterations = 0; /* the input for "-i" */
+ unsigned num = 0; /* the input for "-n" */
+ int opt = 0; /* option char */
+ int info[READ_TRIES+1]; /* re-tries statistics */
+
+ /* Ensure the expected # of arguments */
+ if(argc != 5) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+
+ /* Parse command line options */
+ while((opt = getopt(argc, argv, "n:i:")) != -1) {
+ switch(opt) {
+ case 'n':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+ num = (unsigned int)temp;
+ break;
+ case 'i':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+ iterations = (unsigned int)temp;
+ break;
+ default:
+ printf("Invalid option encountered\n");
+ break;
+ } /* end switch */
+ } /* end while */
+
+ printf("READER: number of integers to read = %u; # of iterations = %d\n", num, iterations);
+
+ printf("\n");
+ for(i = 1; i <= iterations; i++) {
+ unsigned opens = OPEN_TRIES;
+
+ printf("READER: *****start iteration %u*****\n", i);
+
+ /* Ensure open and file size are done properly */
+ while(opens--) {
+ struct stat sinfo;
+
+ memset(&sinfo, 0, sizeof(sinfo));
+
+ if((fd = open(FILENAME, O_RDONLY, 0644)) < 0) {
+ printf("READER: error from open--retry open again\n");
+ } else {
+ printf("READER: open succeed\n");
+
+ if((fstat(fd, &sinfo) == 0) &&
+ (sinfo.st_size == (off_t)(num * sizeof(unsigned int)))) {
+ printf("READER: file size is correct--%u\n", (unsigned int)sinfo.st_size);
+ break;
+ } /* end if */
+
+ printf("READER: error from fstat or file size of %u is incorrect--retry open again\n", (unsigned int)sinfo.st_size);
+ if(close(fd) < 0) {
+ printf("READER: error from close\n");
+ return EXIT_FAILURE;
+ } /* end if */
+ fd = -1;
+ } /* end else */
+
+ } /* end while */
+
+ if(fd < 0) {
+ printf("READER: *****open failure/incorrect file size for all %u tries, continue next iteration*****\n\n", OPEN_TRIES);
+ continue;
+ } /* end if */
+
+ memset(info, 0, sizeof(info));
+
+ /* Read and verify data */
+ for(j = 1; j <= num; j++) {
+
+ printf("READER: doing read %u\n", j);
+ if(verify(fd, num) < 0) {
+ printf("READER: error from read %u\n", j);
+
+ /* Perform re-read to see if correct data is obtained */
+ for(m = 1; m <= READ_TRIES; m++) {
+ printf("READER: ===============going to do re-read try %u\n", m);
+ if(verify(fd, num) < 0)
+ printf("READER: ===============error from re-read try %u\n", m);
+ else {
+ ++info[m];
+ printf("READER: ===============SUCCESS from re-read try %u\n", m);
+ break;
+ } /* end else */
+ } /* end for */
+
+ if(m > READ_TRIES) {
+ printf("READER: ===============error from all re-read tries: %u\n", READ_TRIES);
+ printf("READER:*****ERROR--stop on read %u\n", j);
+ break;
+ } /* end if */
+ } else {
+ ++info[0];
+ printf("READER: success from read %u\n", j);
+ } /* end else */
+
+ } /* end for */
+
+ /* Print the statistics for re-reads */
+ print_info(info, j-1, i);
+
+ /* Close the file */
+ if(close(fd) < 0) {
+ printf("READER: error from close\n");
+ return EXIT_FAILURE;
+ } /* end if */
+
+ printf("READER: *****end iteration %u*****\n\n", i);
+
+ } /* end for */
+
+ return EXIT_SUCCESS;
+}
+
+#else /* WIN32 / MINGW32 */
+
+int
+main(void)
+{
+ printf("Non-POSIX platform. Exiting.\n");
+ return EXIT_FAILURE;
+} /* end main() */
+
+#endif /* WIN32 / MINGW32 */
+
diff --git a/test/atomic_writer.c b/test/atomic_writer.c
new file mode 100644
index 0000000..218d4da
--- /dev/null
+++ b/test/atomic_writer.c
@@ -0,0 +1,243 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: atomic_writer.c
+ *
+ * Purpose: This is the "writer" part of the standalone test to check
+ * atomic read-write operation on a system.
+ * a) atomic_writer.c--the writer (this file)
+ * b) atomic_reader.c--the reader
+ * c) atomic_data--the name of the data file used by writer and reader
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#if !defined(WIN32) && !defined(__MINGW32__)
+
+#include <fcntl.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/wait.h>
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define FILENAME "atomic_data"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: To print information about the command line options
+ *
+ * Parameters: None
+ *
+ * Return: void
+ *
+ *-------------------------------------------------------------------------
+ */
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("Usage: atomic_writer -n <number of integers to write> -i <number of iterations for writer>\n");
+ printf(" Note**The number of integers for option n has to be positive\n");
+ printf(" Note**The number of integers for option i has to be positive\n");
+ printf("\n");
+} /* usage() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: To write a series of integers to a file for the reader to verify the data.
+ * A write is atomic if the whole amount written in one operation is not interleaved
+ * with data from any other process.
+ * (1) Iterate with i iterations
+ * (2) Write a series of integers (0 to n-1) to the file with this pattern:
+ * offset 0: 0000000000000000000000000000000
+ * offset 1: 111111111111111111111111111111
+ * offset 2: 22222222222222222222222222222
+ * offset 3: 3333333333333333333333333333
+ * ...
+ * ...
+ * offset n-1: (n-1)
+ *
+ * At the end of the writes, the data in the file will be:
+ * 01234567........(n-1)
+ *
+ * Note:
+ * (a) The # of integers (via -n option) used by the writer and reader should be the same.
+ * (b) The data file used by the writer and reader should be the same.
+ *
+ * Future enhancement:
+ * 1) Provide default values for n and i and allow user to run with either 0 or 1 option
+ * 2) Use HDF library HD<system calls> instead of the system calls
+ * 3) Handle large sized buffer (gigabytes) if needed
+ *
+ * Return: Success: EXIT_SUCCESS
+ * Failure: EXIT_FAILURE
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(int argc, char *argv[])
+{
+ int fd = -1; /* file descriptor */
+ ssize_t bytes_wrote; /* the nubmer of bytes written */
+ unsigned int *buf = NULL; /* buffer to hold written data */
+ unsigned int n, u, i; /* local index variable */
+ int temp; /* temporary variable */
+ unsigned int iterations = 0; /* the input for "-i" */
+ unsigned int num = 0; /* the input for "-n" */
+ int opt = 0; /* option char */
+
+ /* Ensure the # of arguments is as expected */
+ if(argc != 5) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+
+ /* Parse command line options */
+ while((opt = getopt(argc, argv, "n:i:")) != -1) {
+ switch(opt) {
+ case 'n':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+ num = (unsigned int)temp;
+ break;
+ case 'i':
+ if((temp = atoi(optarg)) < 0) {
+ usage();
+ exit(EXIT_FAILURE);
+ } /* end if */
+ iterations = (unsigned int)temp;
+ break;
+ default:
+ printf("Invalid option encountered\n");
+ break;
+ } /* end switch */
+ } /* end while */
+
+ printf("WRITER: # of integers to write = %u; # of iterations = %d\n", num, iterations);
+
+ /* Remove existing data file if needed */
+ if(remove(FILENAME) < 0) {
+ if(errno == ENOENT)
+ printf("WRITER: remove %s--%s\n", FILENAME, strerror(errno));
+ else {
+ printf("WRITER: error from remove: %d--%s\n", errno, strerror(errno));
+ goto error;
+ } /* end else */
+ } else
+ printf("WRITER: %s is removed\n", FILENAME);
+
+ /* Create the data file */
+ if((fd = open(FILENAME, O_RDWR|O_TRUNC|O_CREAT, 0664)) < 0) {
+ printf("WRITER: error from open\n");
+ goto error;
+ } /* end if */
+
+ /* Allocate buffer for holding data to be written */
+ if((buf = (unsigned int *)malloc(num * sizeof(unsigned int))) == NULL) {
+ printf("WRITER: error from malloc\n");
+ if(fd >= 0 && close(fd) < 0)
+ printf("WRITER: error from close\n");
+ goto error;
+ } /* end if */
+
+ printf("\n");
+
+ for(i = 1; i <= iterations; i++) {
+ printf("WRITER: *****start iteration %u*****\n", i);
+
+ /* Write the series of integers to the file */
+ for(n = 0; n < num; n++) {
+
+ /* Set up data to be written */
+ for(u=0; u < num; u++)
+ buf[u] = n;
+
+ /* Position the file to the proper location */
+ if(lseek(fd, (off_t)(n*sizeof(unsigned int)), SEEK_SET) < 0) {
+ printf("WRITER: error from lseek\n");
+ goto error;
+ } /* end if */
+
+ /* Write the data */
+ if((bytes_wrote = write(fd, buf, ((num-n) * sizeof(unsigned int)))) < 0) {
+ printf("WRITER: error from write\n");
+ goto error;
+ } /* end if */
+
+ /* Verify the bytes written is correct */
+ if(bytes_wrote != (ssize_t)((num-n) * sizeof(unsigned int))) {
+ printf("WRITER: error from bytes written\n");
+ goto error;
+ } /* end if */
+ } /* end for */
+
+ printf("WRITER: *****end iteration %u*****\n\n", i);
+
+ } /* end for */
+
+ /* Close the file */
+ if(close(fd) < 0) {
+ printf("WRITER: error from close\n");
+ goto error;
+ } /* end if */
+
+ /* Free the buffer */
+ if(buf)
+ free(buf);
+
+ return EXIT_SUCCESS;
+
+error:
+ return EXIT_FAILURE;
+} /* end main() */
+
+#else /* WIN32 / MINGW32 */
+
+int
+main(void)
+{
+ printf("Non-POSIX platform. Exiting.\n");
+ return EXIT_FAILURE;
+} /* end main() */
+
+#endif /* WIN32 / MINGW32 */
+
diff --git a/test/big.c b/test/big.c
index 0505e54..3685821 100644
--- a/test/big.c
+++ b/test/big.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/bittests.c b/test/bittests.c
index cf83ead..ccd725c 100644
--- a/test/bittests.c
+++ b/test/bittests.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/btree2.c b/test/btree2.c
index ea16583..ec3e20e 100644
--- a/test/btree2.c
+++ b/test/btree2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/test/cache.c b/test/cache.c
index 5c42dfd..3df8156 100644
--- a/test/cache.c
+++ b/test/cache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: John Mainzer
@@ -19,9 +17,6 @@
* This file contains tests for the cache implemented in
* H5C.c
*/
-#include "h5test.h"
-#include "H5Iprivate.h"
-#include "H5ACprivate.h"
#include "cache_common.h"
@@ -100,30 +95,30 @@ struct move_entry_test_spec
/* private function declarations: */
-static unsigned smoke_check_1(int express_test);
-static unsigned smoke_check_2(int express_test);
-static unsigned smoke_check_3(int express_test);
-static unsigned smoke_check_4(int express_test);
-static unsigned smoke_check_5(int express_test);
-static unsigned smoke_check_6(int express_test);
-static unsigned smoke_check_7(int express_test);
-static unsigned smoke_check_8(int express_test);
-static unsigned smoke_check_9(int express_test);
-static unsigned smoke_check_10(int express_test);
-static unsigned write_permitted_check(int express_test);
-static unsigned check_insert_entry(void);
-static unsigned check_flush_cache(void);
+static unsigned smoke_check_1(int express_test, unsigned paged);
+static unsigned smoke_check_2(int express_test, unsigned paged);
+static unsigned smoke_check_3(int express_test, unsigned paged);
+static unsigned smoke_check_4(int express_test, unsigned paged);
+static unsigned smoke_check_5(int express_test, unsigned paged);
+static unsigned smoke_check_6(int express_test, unsigned paged);
+static unsigned smoke_check_7(int express_test, unsigned paged);
+static unsigned smoke_check_8(int express_test, unsigned paged);
+static unsigned smoke_check_9(int express_test, unsigned paged);
+static unsigned smoke_check_10(int express_test, unsigned paged);
+static unsigned write_permitted_check(int express_test, unsigned paged);
+static unsigned check_insert_entry(unsigned paged);
+static unsigned check_flush_cache(unsigned paged);
static void check_flush_cache__empty_cache(H5F_t * file_ptr);
static void check_flush_cache__multi_entry(H5F_t * file_ptr);
static void check_flush_cache__multi_entry_test(H5F_t * file_ptr,
int test_num,
unsigned int flush_flags,
- int spec_size,
+ unsigned int spec_size,
struct flush_cache_test_spec spec[]);
static void check_flush_cache__pe_multi_entry_test(H5F_t * file_ptr,
int test_num,
unsigned int flush_flags,
- int spec_size,
+ unsigned int spec_size,
struct pe_flush_cache_test_spec spec[]);
static void check_flush_cache__single_entry(H5F_t * file_ptr);
static void check_flush_cache__single_entry_test(H5F_t * file_ptr,
@@ -155,55 +150,56 @@ static void check_flush_cache__flush_op_test(H5F_t * file_ptr,
unsigned int flush_flags,
int spec_size,
const struct fo_flush_cache_test_spec spec[],
- int init_expected_index_len,
+ unsigned init_expected_index_len,
size_t init_expected_index_size,
- int expected_index_len,
+ unsigned expected_index_len,
size_t expected_index_size,
int check_size,
struct fo_flush_entry_check check[]);
static void check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr);
-static unsigned check_get_entry_status(void);
-static unsigned check_expunge_entry(void);
-static unsigned check_multiple_read_protect(void);
-static unsigned check_move_entry(void);
+static unsigned check_get_entry_status(unsigned paged);
+static unsigned check_expunge_entry(unsigned paged);
+static unsigned check_multiple_read_protect(unsigned paged);
+static unsigned check_move_entry(unsigned paged);
static void check_move_entry__run_test(H5F_t * file_ptr, unsigned test_num,
struct move_entry_test_spec * spec_ptr);
-static unsigned check_pin_protected_entry(void);
-static unsigned check_resize_entry(void);
-static unsigned check_evictions_enabled(void);
-static unsigned check_flush_protected_err(void);
-static unsigned check_destroy_pinned_err(void);
-static unsigned check_destroy_protected_err(void);
-static unsigned check_duplicate_insert_err(void);
-static unsigned check_double_pin_err(void);
-static unsigned check_double_unpin_err(void);
-static unsigned check_pin_entry_errs(void);
-static unsigned check_double_protect_err(void);
-static unsigned check_double_unprotect_err(void);
-static unsigned check_mark_entry_dirty_errs(void);
-static unsigned check_expunge_entry_errs(void);
-static unsigned check_move_entry_errs(void);
-static unsigned check_resize_entry_errs(void);
-static unsigned check_unprotect_ro_dirty_err(void);
-static unsigned check_protect_ro_rw_err(void);
-static unsigned check_check_evictions_enabled_err(void);
-static unsigned check_auto_cache_resize(hbool_t cork_ageout);
-static unsigned check_auto_cache_resize_disable(void);
-static unsigned check_auto_cache_resize_epoch_markers(void);
-static unsigned check_auto_cache_resize_input_errs(void);
-static unsigned check_auto_cache_resize_aux_fcns(void);
-static unsigned check_metadata_blizzard_absence(hbool_t fill_via_insertion);
-static unsigned check_flush_deps(void);
-static unsigned check_flush_deps_err(void);
-static unsigned check_flush_deps_order(void);
-static unsigned check_notify_cb(void);
-static unsigned check_metadata_cork(hbool_t fill_via_insertion);
-static unsigned check_entry_deletions_during_scans(void);
+static unsigned check_pin_protected_entry(unsigned paged);
+static unsigned check_resize_entry(unsigned paged);
+static unsigned check_evictions_enabled(unsigned paged);
+static unsigned check_flush_protected_err(unsigned paged);
+static unsigned check_destroy_pinned_err(unsigned paged);
+static unsigned check_destroy_protected_err(unsigned paged);
+static unsigned check_duplicate_insert_err(unsigned paged);
+static unsigned check_double_pin_err(unsigned paged);
+static unsigned check_double_unpin_err(unsigned paged);
+static unsigned check_pin_entry_errs(unsigned paged);
+static unsigned check_double_protect_err(unsigned paged);
+static unsigned check_double_unprotect_err(unsigned paged);
+static unsigned check_mark_entry_dirty_errs(unsigned paged);
+static unsigned check_expunge_entry_errs(unsigned paged);
+static unsigned check_move_entry_errs(unsigned paged);
+static unsigned check_resize_entry_errs(unsigned paged);
+static unsigned check_unprotect_ro_dirty_err(unsigned paged);
+static unsigned check_protect_ro_rw_err(unsigned paged);
+static unsigned check_protect_retries(unsigned paged);
+static unsigned check_check_evictions_enabled_err(unsigned paged);
+static unsigned check_auto_cache_resize(hbool_t cork_ageout, unsigned paged);
+static unsigned check_auto_cache_resize_disable(unsigned paged);
+static unsigned check_auto_cache_resize_epoch_markers(unsigned paged);
+static unsigned check_auto_cache_resize_input_errs(unsigned paged);
+static unsigned check_auto_cache_resize_aux_fcns(unsigned paged);
+static unsigned check_metadata_blizzard_absence(hbool_t fill_via_insertion, unsigned paged);
+static unsigned check_flush_deps(unsigned paged);
+static unsigned check_flush_deps_err(unsigned paged);
+static unsigned check_flush_deps_order(unsigned paged);
+static unsigned check_notify_cb(unsigned paged);
+static unsigned check_metadata_cork(hbool_t fill_via_insertion, unsigned paged);
+static unsigned check_entry_deletions_during_scans(unsigned paged);
static void cedds__expunge_dirty_entry_in_flush_test(H5F_t * file_ptr);
static void cedds__H5C_make_space_in_cache(H5F_t * file_ptr);
static void cedds__H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * file_ptr);
static void cedds__H5C_flush_invalidate_cache__bucket_scan(H5F_t * file_ptr);
-static unsigned check_stats(void);
+static unsigned check_stats(unsigned paged);
#if H5C_COLLECT_CACHE_STATS
static void check_stats__smoke_check_1(H5F_t * file_ptr);
#endif /* H5C_COLLECT_CACHE_STATS */
@@ -231,7 +227,7 @@ static void check_stats__smoke_check_1(H5F_t * file_ptr);
*/
static unsigned
-smoke_check_1(int express_test)
+smoke_check_1(int express_test, unsigned paged)
{
hbool_t show_progress = FALSE;
int dirty_unprotects = FALSE;
@@ -242,7 +238,16 @@ smoke_check_1(int express_test)
int mile_stone = 1;
H5F_t * file_ptr = NULL;
- TESTING("smoke check #1 -- all clean, ins, dest, ren, 4/2 MB cache");
+ if(paged)
+ TESTING("smoke check #1P -- all clean, ins, dest, ren, 4/2 MB cache")
+ else
+ TESTING("smoke check #1 -- all clean, ins, dest, ren, 4/2 MB cache")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -277,8 +282,7 @@ smoke_check_1(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(4 * 1024 * 1024),
- (size_t)(2 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(4 * 1024 * 1024), (size_t)(2 * 1024 * 1024), paged);
if(show_progress) /* 3 */
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
@@ -430,7 +434,7 @@ smoke_check_1(int express_test)
*/
static unsigned
-smoke_check_2(int express_test)
+smoke_check_2(int express_test, unsigned paged)
{
hbool_t show_progress = FALSE;
int dirty_unprotects = TRUE;
@@ -441,7 +445,16 @@ smoke_check_2(int express_test)
int mile_stone = 1;
H5F_t * file_ptr = NULL;
- TESTING("smoke check #2 -- ~1/2 dirty, ins, dest, ren, 4/2 MB cache");
+ if(paged)
+ TESTING("smoke check #2P -- ~1/2 dirty, ins, dest, ren, 4/2 MB cache")
+ else
+ TESTING("smoke check #2 -- ~1/2 dirty, ins, dest, ren, 4/2 MB cache")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -476,8 +489,7 @@ smoke_check_2(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(4 * 1024 * 1024),
- (size_t)(2 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(4 * 1024 * 1024), (size_t)(2 * 1024 * 1024), paged);
if(show_progress) /* 3 */
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
@@ -628,7 +640,7 @@ smoke_check_2(int express_test)
*/
static unsigned
-smoke_check_3(int express_test)
+smoke_check_3(int express_test, unsigned paged)
{
hbool_t show_progress = FALSE;
int dirty_unprotects = FALSE;
@@ -639,7 +651,16 @@ smoke_check_3(int express_test)
int mile_stone = 1;
H5F_t * file_ptr = NULL;
- TESTING("smoke check #3 -- all clean, ins, dest, ren, 2/1 KB cache");
+ if(paged)
+ TESTING("smoke check #3P -- all clean, ins, dest, ren, 2/1 KB cache")
+ else
+ TESTING("smoke check #3 -- all clean, ins, dest, ren, 2/1 KB cache")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -674,8 +695,7 @@ smoke_check_3(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
if(show_progress) /* 3 */
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
@@ -827,7 +847,7 @@ smoke_check_3(int express_test)
*/
static unsigned
-smoke_check_4(int express_test)
+smoke_check_4(int express_test, unsigned paged)
{
hbool_t show_progress = FALSE;
int dirty_unprotects = TRUE;
@@ -838,7 +858,16 @@ smoke_check_4(int express_test)
int mile_stone = 1;
H5F_t * file_ptr = NULL;
- TESTING("smoke check #4 -- ~1/2 dirty, ins, dest, ren, 2/1 KB cache");
+ if(paged)
+ TESTING("smoke check #4P -- ~1/2 dirty, ins, dest, ren, 2/1 KB cache")
+ else
+ TESTING("smoke check #4 -- ~1/2 dirty, ins, dest, ren, 2/1 KB cache")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -873,8 +902,7 @@ smoke_check_4(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
if(show_progress) /* 3 */
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
@@ -1026,7 +1054,7 @@ smoke_check_4(int express_test)
*/
static unsigned
-smoke_check_5(int express_test)
+smoke_check_5(int express_test, unsigned paged)
{
herr_t result;
hbool_t show_progress = FALSE;
@@ -1085,7 +1113,16 @@ smoke_check_5(int express_test)
/* double empty_reserve = */ 0.5f
};
- TESTING("smoke check #5 -- all clean, ins, prot, unprot, AR cache 1");
+ if(paged)
+ TESTING("smoke check #5P -- all clean, ins, prot, unprot, AR cache 1")
+ else
+ TESTING("smoke check #5 -- all clean, ins, prot, unprot, AR cache 1")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -1120,8 +1157,7 @@ smoke_check_5(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
if(pass) {
@@ -1262,7 +1298,7 @@ smoke_check_5(int express_test)
*/
static unsigned
-smoke_check_6(int express_test)
+smoke_check_6(int express_test, unsigned paged)
{
herr_t result;
hbool_t show_progress = FALSE;
@@ -1321,7 +1357,16 @@ smoke_check_6(int express_test)
/* double empty_reserve = */ 0.05f
};
- TESTING("smoke check #6 -- ~1/2 dirty, ins, prot, unprot, AR cache 1");
+ if(paged)
+ TESTING("smoke check #6P -- ~1/2 dirty, ins, prot, unprot, AR cache 1")
+ else
+ TESTING("smoke check #6 -- ~1/2 dirty, ins, prot, unprot, AR cache 1")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
pass = TRUE;
@@ -1356,8 +1401,7 @@ smoke_check_6(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
if(pass) {
@@ -1498,7 +1542,7 @@ smoke_check_6(int express_test)
*/
static unsigned
-smoke_check_7(int express_test)
+smoke_check_7(int express_test, unsigned paged)
{
herr_t result;
hbool_t show_progress = FALSE;
@@ -1558,7 +1602,16 @@ smoke_check_7(int express_test)
/* double empty_reserve = */ 0.1f
};
- TESTING("smoke check #7 -- all clean, ins, prot, unprot, AR cache 2");
+ if(paged)
+ TESTING("smoke check #7P -- all clean, ins, prot, unprot, AR cache 2")
+ else
+ TESTING("smoke check #7 -- all clean, ins, prot, unprot, AR cache 2")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -1593,8 +1646,7 @@ smoke_check_7(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
if(pass) {
@@ -1735,7 +1787,7 @@ smoke_check_7(int express_test)
*/
static unsigned
-smoke_check_8(int express_test)
+smoke_check_8(int express_test, unsigned paged)
{
herr_t result;
hbool_t show_progress = FALSE;
@@ -1795,7 +1847,16 @@ smoke_check_8(int express_test)
/* double empty_reserve = */ 0.1f
};
- TESTING("smoke check #8 -- ~1/2 dirty, ins, prot, unprot, AR cache 2");
+ if(paged)
+ TESTING("smoke check #8P -- ~1/2 dirty, ins, prot, unprot, AR cache 2")
+ else
+ TESTING("smoke check #8 -- ~1/2 dirty, ins, prot, unprot, AR cache 2")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -1830,7 +1891,7 @@ smoke_check_8(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
if(pass) {
@@ -1978,7 +2039,7 @@ smoke_check_8(int express_test)
*/
static unsigned
-smoke_check_9(int express_test)
+smoke_check_9(int express_test, unsigned paged)
{
herr_t result;
hbool_t show_progress = FALSE;
@@ -1992,7 +2053,16 @@ smoke_check_9(int express_test)
H5F_t * file_ptr = NULL;
H5C_t * cache_ptr = NULL;
- TESTING("smoke check #9 -- all clean, ins, dest, ren, 4/2 MB, corked");
+ if(paged)
+ TESTING("smoke check #9P -- all clean, ins, dest, ren, 4/2 MB, corked")
+ else
+ TESTING("smoke check #9 -- all clean, ins, dest, ren, 4/2 MB, corked")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -2027,8 +2097,7 @@ smoke_check_9(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(4 * 1024 * 1024),
- (size_t)(2 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(4 * 1024 * 1024), (size_t)(2 * 1024 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
/* disable evictions */
@@ -2298,7 +2367,7 @@ smoke_check_9(int express_test)
*/
static unsigned
-smoke_check_10(int express_test)
+smoke_check_10(int express_test, unsigned paged)
{
herr_t result;
hbool_t show_progress = FALSE;
@@ -2312,7 +2381,16 @@ smoke_check_10(int express_test)
H5F_t * file_ptr = NULL;
H5C_t * cache_ptr = NULL;
- TESTING("smoke check #10 -- ~1/2 dirty, ins, dest, ren, 4/2 MB, corked");
+ if(paged)
+ TESTING("smoke check #10P -- ~1/2 dirty, ins, dest, ren, 4/2 MB, corked")
+ else
+ TESTING("smoke check #10 -- ~1/2 dirty, ins, dest, ren, 4/2 MB, corked")
+
+ if ( paged && ( express_test > 0 ) ) {
+
+ SKIPPED();
+ return(0);
+ }
switch (express_test)
{
@@ -2347,8 +2425,7 @@ smoke_check_10(int express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(4 * 1024 * 1024),
- (size_t)(2 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(4 * 1024 * 1024), (size_t)(2 * 1024 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
if(show_progress) /* 3 */
@@ -2612,7 +2689,7 @@ write_permitted_check(int
#if !H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
H5_ATTR_UNUSED
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
-express_test)
+express_test, unsigned paged)
{
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
@@ -2626,7 +2703,10 @@ express_test)
#endif /* H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS */
- TESTING("write permitted check -- 1/0 MB cache");
+ if(paged)
+ TESTING("write permitted check -- 1/0 MB cache (paged aggregation)")
+ else
+ TESTING("write permitted check -- 1/0 MB cache")
#if H5C_MAINTAIN_CLEAN_AND_DIRTY_LRU_LISTS
@@ -2663,8 +2743,7 @@ express_test)
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
FUNC, mile_stone++, (int)pass);
- file_ptr = setup_cache((size_t)(1 * 1024 * 1024),
- (size_t)(0));
+ file_ptr = setup_cache((size_t)(1 * 1024 * 1024), (size_t)0, paged);
if(show_progress) /* 3 */
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
@@ -2826,7 +2905,7 @@ express_test)
*/
static unsigned
-check_insert_entry(void)
+check_insert_entry(unsigned paged)
{
int entry_type = PICO_ENTRY_TYPE;
int i;
@@ -2842,8 +2921,10 @@ check_insert_entry(void)
test_entry_t * entry_ptr;
struct H5C_cache_entry_t * search_ptr;
-
- TESTING("H5C_insert_entry() functionality");
+ if(paged)
+ TESTING("H5C_insert_entry() functionality (paged aggregation)")
+ else
+ TESTING("H5C_insert_entry() functionality")
pass = TRUE;
@@ -2861,8 +2942,7 @@ check_insert_entry(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
}
@@ -2886,7 +2966,7 @@ check_insert_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -3118,11 +3198,14 @@ check_insert_entry(void)
*/
static unsigned
-check_flush_cache(void)
+check_flush_cache(unsigned paged)
{
H5F_t * file_ptr = NULL;
- TESTING("H5C_flush_cache() functionality");
+ if(paged)
+ TESTING("H5C_flush_cache() functionality (paged aggregation)")
+ else
+ TESTING("H5C_flush_cache() functionality")
pass = TRUE;
@@ -3135,8 +3218,7 @@ check_flush_cache(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
}
/* first test behaviour on an empty cache. Can't do much sanity
@@ -3312,7 +3394,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
{
int test_num = 1;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -3406,7 +3488,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
{
int test_num = 2;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -3500,7 +3582,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
{
int test_num = 3;
unsigned int flush_flags = H5C__FLUSH_CLEAR_ONLY_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -3594,7 +3676,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
{
int test_num = 4;
unsigned int flush_flags = H5C__FLUSH_MARKED_ENTRIES_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -3689,7 +3771,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
int test_num = 5;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG |
H5C__FLUSH_CLEAR_ONLY_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -3784,7 +3866,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
int test_num = 6;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG |
H5C__FLUSH_MARKED_ENTRIES_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -3879,7 +3961,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
int test_num = 7;
unsigned int flush_flags = H5C__FLUSH_CLEAR_ONLY_FLAG |
H5C__FLUSH_MARKED_ENTRIES_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -3975,7 +4057,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG |
H5C__FLUSH_CLEAR_ONLY_FLAG |
H5C__FLUSH_MARKED_ENTRIES_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -4073,7 +4155,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
~(H5C__FLUSH_INVALIDATE_FLAG |
H5C__FLUSH_CLEAR_ONLY_FLAG |
H5C__FLUSH_MARKED_ENTRIES_FLAG);
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct flush_cache_test_spec spec[8] =
{
{
@@ -4172,7 +4254,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
{
int test_num = 1;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct pe_flush_cache_test_spec spec[8] =
{
{
@@ -4318,7 +4400,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
{
int test_num = 2;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct pe_flush_cache_test_spec spec[8] =
{
{
@@ -4448,7 +4530,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
int test_num = 3;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG |
H5C__FLUSH_CLEAR_ONLY_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct pe_flush_cache_test_spec spec[8] =
{
{
@@ -4570,7 +4652,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
int test_num = 4;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG |
H5C__FLUSH_MARKED_ENTRIES_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct pe_flush_cache_test_spec spec[8] =
{
{
@@ -4701,7 +4783,7 @@ check_flush_cache__multi_entry(H5F_t * file_ptr)
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG |
H5C__FLUSH_CLEAR_ONLY_FLAG |
H5C__FLUSH_MARKED_ENTRIES_FLAG;
- int spec_size = 8;
+ unsigned int spec_size = 8;
struct pe_flush_cache_test_spec spec[8] =
{
{
@@ -4845,13 +4927,13 @@ static void
check_flush_cache__multi_entry_test(H5F_t * file_ptr,
int test_num,
unsigned int flush_flags,
- int spec_size,
+ unsigned int spec_size,
struct flush_cache_test_spec spec[])
{
H5C_t * cache_ptr = file_ptr->shared->cache;
static char msg[128];
herr_t result;
- int i;
+ unsigned u;
size_t total_entry_size = 0;
test_entry_t * base_addr;
test_entry_t * entry_ptr;
@@ -4890,43 +4972,43 @@ check_flush_cache__multi_entry_test(H5F_t * file_ptr,
failure_mssg = msg;
}
- i = 0;
- while(pass && (i < spec_size))
+ u = 0;
+ while(pass && (u < spec_size))
{
- if((spec[i].entry_num != i) ||
- (spec[i].entry_type < 0) ||
- (spec[i].entry_type >= NUMBER_OF_ENTRY_TYPES) ||
- (spec[i].entry_index < 0) ||
- (spec[i].entry_index > max_indices[spec[i].entry_type])) {
+ if(((unsigned)spec[u].entry_num != u) ||
+ (spec[u].entry_type < 0) ||
+ (spec[u].entry_type >= NUMBER_OF_ENTRY_TYPES) ||
+ (spec[u].entry_index < 0) ||
+ (spec[u].entry_index > max_indices[spec[u].entry_type])) {
pass = FALSE;
HDsnprintf(msg, (size_t)128,
- "bad data in spec[%d] on entry to multi entry test #%d.",
- i, test_num);
+ "bad data in spec[%u] on entry to multi entry test #%d.",
+ u, test_num);
failure_mssg = msg;
}
- i++;
+ u++;
}
- i = 0;
- while(pass && (i < spec_size))
+ u = 0;
+ while(pass && (u < spec_size))
{
- if(spec[i].insert_flag) {
+ if(spec[u].insert_flag) {
- insert_entry(file_ptr, spec[i].entry_type, spec[i].entry_index,
- spec[i].flags);
+ insert_entry(file_ptr, spec[u].entry_type, spec[u].entry_index,
+ spec[u].flags);
} else {
- protect_entry(file_ptr, spec[i].entry_type, spec[i].entry_index);
+ protect_entry(file_ptr, spec[u].entry_type, spec[u].entry_index);
- unprotect_entry(file_ptr, spec[i].entry_type, spec[i].entry_index,
- spec[i].flags);
+ unprotect_entry(file_ptr, spec[u].entry_type, spec[u].entry_index,
+ spec[u].flags);
}
- total_entry_size += entry_sizes[spec[i].entry_type];
+ total_entry_size += entry_sizes[spec[u].entry_type];
- i++;
+ u++;
}
if(pass) {
@@ -4943,36 +5025,36 @@ check_flush_cache__multi_entry_test(H5F_t * file_ptr,
}
}
- i = 0;
- while(pass && (i < spec_size))
+ u = 0;
+ while(pass && (u < spec_size))
{
- base_addr = entries[spec[i].entry_type];
- entry_ptr = &(base_addr[spec[i].entry_index]);
+ base_addr = entries[spec[u].entry_type];
+ entry_ptr = &(base_addr[spec[u].entry_index]);
- if((entry_ptr->deserialized != spec[i].expected_deserialized) ||
- (entry_ptr->serialized != spec[i].expected_serialized) ||
- (entry_ptr->destroyed != spec[i].expected_destroyed)) {
+ if((entry_ptr->deserialized != spec[u].expected_deserialized) ||
+ (entry_ptr->serialized != spec[u].expected_serialized) ||
+ (entry_ptr->destroyed != spec[u].expected_destroyed)) {
#if 0 /* This is useful debugging code. Lets keep it around. */
HDfprintf(stdout,
"deslzd = %d(%d), slzd = %d(%d), dest = %d(%d)\n",
(int)(entry_ptr->deserialized),
- (int)(spec[i].expected_deserialized),
+ (int)(spec[u].expected_deserialized),
(int)(entry_ptr->serialized),
- (int)(spec[i].expected_serialized),
+ (int)(spec[u].expected_serialized),
(int)(entry_ptr->destroyed),
- (int)(spec[i].expected_destroyed));
+ (int)(spec[u].expected_destroyed));
#endif
pass = FALSE;
HDsnprintf(msg, (size_t)128,
- "Bad status on entry %d after flush in multi entry test #%d.",
- i, test_num);
+ "Bad status on entry %u after flush in multi entry test #%d.",
+ u, test_num);
failure_mssg = msg;
}
- i++;
+ u++;
}
if(pass) {
@@ -5027,17 +5109,17 @@ check_flush_cache__multi_entry_test(H5F_t * file_ptr,
}
}
- i = 0;
- while(pass && (i < spec_size))
+ u = 0;
+ while(pass && (u < spec_size))
{
- base_addr = entries[spec[i].entry_type];
- entry_ptr = &(base_addr[spec[i].entry_index]);
+ base_addr = entries[spec[u].entry_type];
+ entry_ptr = &(base_addr[spec[u].entry_index]);
entry_ptr->deserialized = FALSE;
entry_ptr->serialized = FALSE;
entry_ptr->destroyed = FALSE;
- i++;
+ u++;
}
return;
@@ -5064,13 +5146,13 @@ static void
check_flush_cache__pe_multi_entry_test(H5F_t * file_ptr,
int test_num,
unsigned int flush_flags,
- int spec_size,
+ unsigned int spec_size,
struct pe_flush_cache_test_spec spec[])
{
H5C_t *cache_ptr = file_ptr->shared->cache;
static char msg[128];
herr_t result;
- int i;
+ unsigned u;
int j;
size_t total_entry_size = 0;
test_entry_t * base_addr;
@@ -5110,54 +5192,54 @@ check_flush_cache__pe_multi_entry_test(H5F_t * file_ptr,
failure_mssg = msg;
}
- i = 0;
- while(pass && (i < spec_size))
+ u = 0;
+ while(pass && (u < spec_size))
{
- if((spec[i].entry_num != i) ||
- (spec[i].entry_type < 0) ||
- (spec[i].entry_type >= NUMBER_OF_ENTRY_TYPES) ||
- (spec[i].entry_index < 0) ||
- (spec[i].entry_index > max_indices[spec[i].entry_type]) ||
- (spec[i].num_pins < 0) ||
- (spec[i].num_pins > MAX_PINS)) {
+ if(((unsigned)spec[u].entry_num != u) ||
+ (spec[u].entry_type < 0) ||
+ (spec[u].entry_type >= NUMBER_OF_ENTRY_TYPES) ||
+ (spec[u].entry_index < 0) ||
+ (spec[u].entry_index > max_indices[spec[u].entry_type]) ||
+ (spec[u].num_pins < 0) ||
+ (spec[u].num_pins > MAX_PINS)) {
pass = FALSE;
HDsnprintf(msg, (size_t)128,
- "bad data in spec[%d] on entry to pe multi entry test #%d.",
- i, test_num);
+ "bad data in spec[%u] on entry to pe multi entry test #%d.",
+ u, test_num);
failure_mssg = msg;
}
- i++;
+ u++;
}
- i = 0;
- while(pass && (i < spec_size))
+ u = 0;
+ while(pass && (u < spec_size))
{
- if(spec[i].insert_flag) {
+ if(spec[u].insert_flag) {
- insert_entry(file_ptr, spec[i].entry_type, spec[i].entry_index,
- spec[i].flags);
+ insert_entry(file_ptr, spec[u].entry_type, spec[u].entry_index,
+ spec[u].flags);
} else {
- protect_entry(file_ptr, spec[i].entry_type, spec[i].entry_index);
+ protect_entry(file_ptr, spec[u].entry_type, spec[u].entry_index);
- unprotect_entry(file_ptr, spec[i].entry_type, spec[i].entry_index,
- spec[i].flags);
+ unprotect_entry(file_ptr, spec[u].entry_type, spec[u].entry_index,
+ spec[u].flags);
}
- total_entry_size += entry_sizes[spec[i].entry_type];
+ total_entry_size += entry_sizes[spec[u].entry_type];
- for (j = 0; j < spec[i].num_pins; j++)
+ for (j = 0; j < spec[u].num_pins; j++)
{
create_pinned_entry_dependency(file_ptr,
- spec[i].entry_type,
- spec[i].entry_index,
- spec[i].pin_type[j],
- spec[i].pin_idx[j]);
+ spec[u].entry_type,
+ spec[u].entry_index,
+ spec[u].pin_type[j],
+ spec[u].pin_idx[j]);
}
- i++;
+ u++;
}
if(pass) {
@@ -5174,36 +5256,36 @@ check_flush_cache__pe_multi_entry_test(H5F_t * file_ptr,
}
}
- i = 0;
- while(pass && (i < spec_size))
+ u = 0;
+ while(pass && (u < spec_size))
{
- base_addr = entries[spec[i].entry_type];
- entry_ptr = &(base_addr[spec[i].entry_index]);
+ base_addr = entries[spec[u].entry_type];
+ entry_ptr = &(base_addr[spec[u].entry_index]);
- if((entry_ptr->deserialized != spec[i].expected_deserialized) ||
- (entry_ptr->serialized != spec[i].expected_serialized) ||
- (entry_ptr->destroyed != spec[i].expected_destroyed)) {
+ if((entry_ptr->deserialized != spec[u].expected_deserialized) ||
+ (entry_ptr->serialized != spec[u].expected_serialized) ||
+ (entry_ptr->destroyed != spec[u].expected_destroyed)) {
#if 0 /* This is useful debugging code. Lets keep it around. */
HDfprintf(stdout,
"desrlzd = %d(%d), srlzd = %d(%d), dest = %d(%d)\n",
(int)(entry_ptr->deserialized),
- (int)(spec[i].expected_deserialized),
+ (int)(spec[u].expected_deserialized),
(int)(entry_ptr->serialized),
- (int)(spec[i].expected_serialized),
+ (int)(spec[u].expected_serialized),
(int)(entry_ptr->destroyed),
- (int)(spec[i].expected_destroyed));
+ (int)(spec[u].expected_destroyed));
#endif
pass = FALSE;
HDsnprintf(msg, (size_t)128,
- "Bad status on entry %d after flush in pe multi entry test #%d.",
- i, test_num);
+ "Bad status on entry %u after flush in pe multi entry test #%d.",
+ u, test_num);
failure_mssg = msg;
}
- i++;
+ u++;
}
if(pass) {
@@ -5258,17 +5340,17 @@ check_flush_cache__pe_multi_entry_test(H5F_t * file_ptr,
}
}
- i = 0;
- while(pass && (i < spec_size))
+ u = 0;
+ while(pass && (u < spec_size))
{
- base_addr = entries[spec[i].entry_type];
- entry_ptr = &(base_addr[spec[i].entry_index]);
+ base_addr = entries[spec[u].entry_type];
+ entry_ptr = &(base_addr[spec[u].entry_index]);
entry_ptr->deserialized = FALSE;
entry_ptr->serialized = FALSE;
entry_ptr->destroyed = FALSE;
- i++;
+ u++;
}
return;
@@ -5323,9 +5405,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 1;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 2;
- int init_expected_index_len = 2;
+ unsigned init_expected_index_len = 2;
size_t init_expected_index_size = 2 * PICO_ENTRY_SIZE;
- int expected_index_len = 2;
+ unsigned expected_index_len = 2;
size_t expected_index_size = 2 * PICO_ENTRY_SIZE;
struct fo_flush_cache_test_spec spec[2] =
{
@@ -5430,9 +5512,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 2;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
int spec_size = 2;
- int init_expected_index_len = 2;
+ unsigned init_expected_index_len = 2;
size_t init_expected_index_size = 2 * PICO_ENTRY_SIZE;
- int expected_index_len = 0;
+ unsigned expected_index_len = 0;
size_t expected_index_size = 0;
struct fo_flush_cache_test_spec spec[2] =
{
@@ -5534,9 +5616,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 3;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = VARIABLE_ENTRY_SIZE / 4;
- int expected_index_len = 1;
+ unsigned expected_index_len = 1;
size_t expected_index_size = VARIABLE_ENTRY_SIZE / 2;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -5612,9 +5694,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 4;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = VARIABLE_ENTRY_SIZE / 4;
- int expected_index_len = 0;
+ unsigned expected_index_len = 0;
size_t expected_index_size = 0;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -5697,9 +5779,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 5; /* and 6 */
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = VARIABLE_ENTRY_SIZE;
- int expected_index_len = 1;
+ unsigned expected_index_len = 1;
size_t expected_index_size = VARIABLE_ENTRY_SIZE / 2;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -5814,9 +5896,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 7; /* and 8 */
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = VARIABLE_ENTRY_SIZE;
- int expected_index_len = 1;
+ unsigned expected_index_len = 1;
size_t expected_index_size = VARIABLE_ENTRY_SIZE / 2;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -5927,9 +6009,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 9; /* and 10 */
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = VARIABLE_ENTRY_SIZE / 2;
- int expected_index_len = 1;
+ unsigned expected_index_len = 1;
size_t expected_index_size = VARIABLE_ENTRY_SIZE / 4;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -6038,9 +6120,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 11; /* and 12 */
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = VARIABLE_ENTRY_SIZE / 2;
- int expected_index_len = 1;
+ unsigned expected_index_len = 1;
size_t expected_index_size = VARIABLE_ENTRY_SIZE / 4;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -6152,9 +6234,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 13;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = 1 * PICO_ENTRY_SIZE;
- int expected_index_len = 3;
+ unsigned expected_index_len = 3;
size_t expected_index_size = 3 * PICO_ENTRY_SIZE;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -6248,9 +6330,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 14;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = 1 * PICO_ENTRY_SIZE;
- int expected_index_len = 0;
+ unsigned expected_index_len = 0;
size_t expected_index_size = (size_t)0;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -6341,9 +6423,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 15;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = 1 * VARIABLE_ENTRY_SIZE;
- int expected_index_len = 3;
+ unsigned expected_index_len = 3;
size_t expected_index_size = VARIABLE_ENTRY_SIZE +
(VARIABLE_ENTRY_SIZE / 4) +
(VARIABLE_ENTRY_SIZE / 2);
@@ -6438,9 +6520,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 16;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = 1 * VARIABLE_ENTRY_SIZE;
- int expected_index_len = 0;
+ unsigned expected_index_len = 0;
size_t expected_index_size = (size_t)0;
struct fo_flush_cache_test_spec spec[1] =
{
@@ -6531,9 +6613,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 17; /* and 18 */
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = 1 * VARIABLE_ENTRY_SIZE;
- int expected_index_len = 3;
+ unsigned expected_index_len = 3;
size_t expected_index_size = VARIABLE_ENTRY_SIZE +
(VARIABLE_ENTRY_SIZE / 4) +
(VARIABLE_ENTRY_SIZE / 2);
@@ -6658,9 +6740,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 19; /* and 20 */
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 1;
- int init_expected_index_len = 1;
+ unsigned init_expected_index_len = 1;
size_t init_expected_index_size = 1 * VARIABLE_ENTRY_SIZE;
- int expected_index_len = 3;
+ unsigned expected_index_len = 3;
size_t expected_index_size = VARIABLE_ENTRY_SIZE +
(VARIABLE_ENTRY_SIZE / 4) +
(VARIABLE_ENTRY_SIZE / 2);
@@ -6796,9 +6878,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 21;
unsigned int flush_flags = H5C__FLUSH_MARKED_ENTRIES_FLAG;
int spec_size = 4;
- int init_expected_index_len = 4;
+ unsigned init_expected_index_len = 4;
size_t init_expected_index_size = (2 * VARIABLE_ENTRY_SIZE) + (2 * PICO_ENTRY_SIZE);
- int expected_index_len = 6;
+ unsigned expected_index_len = 6;
size_t expected_index_size = (2 * VARIABLE_ENTRY_SIZE) +
(VARIABLE_ENTRY_SIZE / 4) +
(VARIABLE_ENTRY_SIZE / 2) +
@@ -7007,9 +7089,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 22;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 6;
- int init_expected_index_len = 6;
+ unsigned init_expected_index_len = 6;
size_t init_expected_index_size = (2 * VARIABLE_ENTRY_SIZE) + (4 * PICO_ENTRY_SIZE);
- int expected_index_len = 10;
+ unsigned expected_index_len = 10;
size_t expected_index_size = (2 * VARIABLE_ENTRY_SIZE) +
(2 * (VARIABLE_ENTRY_SIZE / 4)) +
(2 * (VARIABLE_ENTRY_SIZE / 2)) +
@@ -7276,9 +7358,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 23;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
int spec_size = 6;
- int init_expected_index_len = 6;
+ unsigned init_expected_index_len = 6;
size_t init_expected_index_size = (2 * VARIABLE_ENTRY_SIZE) + (4 * PICO_ENTRY_SIZE);
- int expected_index_len = 0;
+ unsigned expected_index_len = 0;
size_t expected_index_size = 0;
struct fo_flush_cache_test_spec spec[6] =
{
@@ -7538,9 +7620,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 24;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 3;
- int init_expected_index_len = 3;
+ unsigned init_expected_index_len = 3;
size_t init_expected_index_size = 3 * PICO_ENTRY_SIZE;
- int expected_index_len = 3;
+ unsigned expected_index_len = 3;
size_t expected_index_size = 3 * PICO_ENTRY_SIZE;
struct fo_flush_cache_test_spec spec[3] =
{
@@ -7671,9 +7753,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 25;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
int spec_size = 3;
- int init_expected_index_len = 3;
+ unsigned init_expected_index_len = 3;
size_t init_expected_index_size = 3 * PICO_ENTRY_SIZE;
- int expected_index_len = 0;
+ unsigned expected_index_len = 0;
size_t expected_index_size = (size_t)0;
struct fo_flush_cache_test_spec spec[3] =
{
@@ -7874,9 +7956,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 26;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 10;
- int init_expected_index_len = 10;
+ unsigned init_expected_index_len = 10;
size_t init_expected_index_size = 10 * VARIABLE_ENTRY_SIZE;
- int expected_index_len = 13;
+ unsigned expected_index_len = 13;
size_t expected_index_size = 9 * VARIABLE_ENTRY_SIZE;
struct fo_flush_cache_test_spec spec[10] =
{
@@ -8308,9 +8390,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 27;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
int spec_size = 10;
- int init_expected_index_len = 10;
+ unsigned init_expected_index_len = 10;
size_t init_expected_index_size = 10 * VARIABLE_ENTRY_SIZE;
- int expected_index_len = 0;
+ unsigned expected_index_len = 0;
size_t expected_index_size = (size_t)0;
struct fo_flush_cache_test_spec spec[10] =
{
@@ -8667,9 +8749,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 28;
unsigned int flush_flags = H5C__NO_FLAGS_SET;
int spec_size = 5;
- int init_expected_index_len = 5;
+ unsigned init_expected_index_len = 5;
size_t init_expected_index_size = 3 * VARIABLE_ENTRY_SIZE;
- int expected_index_len = 5;
+ unsigned expected_index_len = 5;
size_t expected_index_size = 4 * VARIABLE_ENTRY_SIZE;
struct fo_flush_cache_test_spec spec[5] =
{
@@ -8859,9 +8941,9 @@ check_flush_cache__flush_ops(H5F_t * file_ptr)
int test_num = 29;
unsigned int flush_flags = H5C__FLUSH_INVALIDATE_FLAG;
int spec_size = 5;
- int init_expected_index_len = 5;
+ unsigned init_expected_index_len = 5;
size_t init_expected_index_size = 3 * VARIABLE_ENTRY_SIZE;
- int expected_index_len = 0;
+ unsigned expected_index_len = 0;
size_t expected_index_size = 0;
struct fo_flush_cache_test_spec spec[5] =
{
@@ -9070,9 +9152,9 @@ check_flush_cache__flush_op_test(H5F_t * file_ptr,
unsigned int flush_flags,
int spec_size,
const struct fo_flush_cache_test_spec spec[],
- int init_expected_index_len,
+ unsigned init_expected_index_len,
size_t init_expected_index_size,
- int expected_index_len,
+ unsigned expected_index_len,
size_t expected_index_size,
int check_size,
struct fo_flush_entry_check check[])
@@ -9828,7 +9910,6 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
}
}
-
if(pass) {
/* Now load a large entry. This should result in the eviction
@@ -10373,14 +10454,28 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
* However, (VET 9)'s serialize function needs to modify (VET, 8),
* which is currently not in cache. Thus it calls H5C_protect(VET, 8)
* to gain access to it. H5C_protect(VET, 8) loads (VET, 8), and
- * then attempts to evict entries to make space for it. While (VET, 9)
- * is still at the bottom of the LRU, it is marked flush in progress
- * and this is skipped. Thus the next entries on the LRU are (MET, 0)
- * thru (MET, 30) and (LET, 0) thru (LET, 10) -- all of which are dirty,
+ * then attempts to evict entries to make space for it.
+ *
+ * However, H5C_make_space_in_cache() now exits without taking
+ * any action on re-entrant calls. Thus H5C_protect(VET, 8) simply
+ * loads the entry into the cache -- resulting in a cache that is
+ * 10 KB oversize. The subsequent unprotect puts (VET, 8) at the
+ * head of the LRU and marks it dirty.
+ *
+ * After (VET, 9) is serialized, it is flushed, and moved to the
+ * head of the LRU.
+ *
+ * At this point, the H5C_make_space_in_cache() call made by
+ * H5C_protect(LET, 11) now has 14 KB of space to make.
+ *
+ * The next entries on the LRU are (MET, 0) thru (MET, 30),
+ * (LET, 0) thru (LET, 10), and (VET, 8) -- all of which are dirty,
* and are therefore flushed and moved to the head of the LRU list.
*
* The next entry on the bottom of the LRU list is (VET, 0), which
- * is clean, and is therefore evicted to make space for (VET, 8).
+ * is clean, and is therefore evicted, leaving H5C_make_space_in_cache()
+ * with 4 KB of space to create.
+ *
* This space is sufficient, so H5C_protect(VET, 8) inserts
* (VET, 8) into the cache's index, marks it as protected, and
* returns to the serialize function for (VET, 9).
@@ -10389,22 +10484,10 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
* calls H5C_unprotect(VET, 8), which markes (VET, 8) as dirty and
* unprotected, and places it at the head of the LRU.
*
- * The serialize function for (VET, 9) then returns, and (VET, 9) is
- * is written to disk, marked clean, and moved to the head of the LRU.
+ * (VET, 0) is the next item on the LRU -- it is clean and is therefore
+ * evicted -- leaving 6 KB of free space after (LET, 11) is inserted
+ * into the cache.
*
- * At this point, the cache is still full (since (VET, 8) took the
- * space created by the eviction of (VET, 0)). Thus
- * H5C_protect(LET, 11) continues to look for space. While
- * (MET, 0) was the next item on the LRU list when it called the
- * serialize function for (VET, 9), the function notices that the
- * LRU has been modified, and restarts its search for candidates
- * for eviction at the bottom of the LRU.
- *
- * (MET, 0) is now at the bottom of the LRU, and is clean. Thus
- * it is evicted. This makes sufficient space for (LET, 11), so
- * H5C_protect(LET, 11) inserts it into the cache, marks it as
- * protected, and returns.
- *
* H5C_unprotect(LET, 11) marks (LET, 11) as unprotected, and then
* returns as well.
*
@@ -10430,9 +10513,9 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
*
* (VET, 7) N 5 KB N N - -
*
- * (VET, 8) Y 10 KB Y N - -
+ * (VET, 8) Y 10 KB N N - -
*
- * (VET, 9) Y 10 KB N N - -
+ * (VET, 9) N 10 KB N N - -
*
* Start by updating the expected table for the expected changes in
* entry status:
@@ -10451,25 +10534,22 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
expected[0].serialized = TRUE;
expected[0].destroyed = TRUE;
expected[8].in_cache = TRUE;
- expected[8].is_dirty = TRUE;
+ expected[8].is_dirty = FALSE;
expected[8].deserialized = TRUE;
- expected[8].serialized = FALSE;
+ expected[8].serialized = TRUE;
expected[8].destroyed = FALSE;
- expected[9].in_cache = TRUE;
+ expected[9].in_cache = FALSE;
expected[9].is_dirty = FALSE;
expected[9].serialized = TRUE;
- expected[9].destroyed = FALSE;
+ expected[9].destroyed = TRUE;
- expected[10].in_cache = FALSE;
+ expected[10].in_cache = TRUE;
expected[10].is_dirty = FALSE;
expected[10].serialized = TRUE;
- expected[10].destroyed = TRUE;
+ expected[10].destroyed = FALSE;
num_large_entries = 12;
- /* a newly loaded entry is not inserted in the cache until after
- * space has been made for it. Thus (LET, 11) will not be flushed.
- */
for (i = num_variable_entries;
i < num_variable_entries + num_monster_entries + num_large_entries - 1;
i++)
@@ -10487,10 +10567,10 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
/* verify cache size */
if((cache_ptr->index_len != 44) ||
(cache_ptr->index_size != (2 * 1024 * 1024) -
- (2 * VARIABLE_ENTRY_SIZE) -
- (10 * LARGE_ENTRY_SIZE)) ||
- (cache_ptr->index_size != ((2 * VARIABLE_ENTRY_SIZE) +
- (30 * MONSTER_ENTRY_SIZE) +
+ (2 * 1024) -
+ (1 * LARGE_ENTRY_SIZE)) ||
+ (cache_ptr->index_size != ((1 * VARIABLE_ENTRY_SIZE) +
+ (31 * MONSTER_ENTRY_SIZE) +
(12 * LARGE_ENTRY_SIZE)))) {
pass = FALSE;
@@ -10500,15 +10580,27 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
/* verify entry status */
verify_entry_status(cache_ptr,
9,
- (num_variable_entries + num_monster_entries + num_large_entries),
+ (num_variable_entries + num_monster_entries +
+ num_large_entries),
expected);
}
if(pass) {
- /* protect and unprotect VET 8 to move it to the top of the LRU */
+ /* protect and unprotect VET 9 to evict MET 0 */
+ protect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 9);
+ unprotect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 9, H5C__NO_FLAGS_SET);
+
+ /* protect and unprotect VET 8 to dirty it and move it to the
+ * top of the LRU. Since we are dirtying it again, reset its
+ * serialized flag.
+ */
+ base_addr = entries[VARIABLE_ENTRY_TYPE];
+ entry_ptr = &(base_addr[8]);
+ entry_ptr->serialized = FALSE;
+
protect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 8);
- unprotect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 8, H5C__NO_FLAGS_SET);
+ unprotect_entry(file_ptr, VARIABLE_ENTRY_TYPE, 8, H5C__DIRTIED_FLAG);
/* Again, touch all the non VARIABLE_ENTRY_TYPE entries in the
@@ -10520,7 +10612,13 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
/* skip MET 0 in first pass so that we evict VET 9 when we
* reload MET 0
+ *
+ * Since we are reloading MET 0, reset its destroyed flag.
*/
+ base_addr = entries[MONSTER_ENTRY_TYPE];
+ entry_ptr = &(base_addr[0]);
+ entry_ptr->destroyed = FALSE;
+
for (i = 1; i < num_monster_entries; i++)
{
protect_entry(file_ptr, MONSTER_ENTRY_TYPE, i);
@@ -10553,7 +10651,9 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
expected[i].is_dirty = TRUE;
}
- /* update MET 0 to set its in cache flag, and reset the its destroyed flag */
+ /* update MET 0 to set its in cache flag, and reset
+ * its destroyed flag
+ */
expected[10].in_cache = TRUE;
/* pass through non variable entries will flush VET 8, and evict VET 9.
@@ -10722,8 +10822,8 @@ check_flush_cache__flush_op_eviction_test(H5F_t * file_ptr)
if((cache_ptr->insertions[VARIABLE_ENTRY_TYPE] != 0) ||
(cache_ptr->pinned_insertions[VARIABLE_ENTRY_TYPE] != 0) ||
(cache_ptr->clears[VARIABLE_ENTRY_TYPE] != 0) ||
- (cache_ptr->flushes[VARIABLE_ENTRY_TYPE] != 8) ||
- (cache_ptr->evictions[VARIABLE_ENTRY_TYPE] != 11) ||
+ (cache_ptr->flushes[VARIABLE_ENTRY_TYPE] != 9) ||
+ (cache_ptr->evictions[VARIABLE_ENTRY_TYPE] != 12) ||
(cache_ptr->take_ownerships[VARIABLE_ENTRY_TYPE] != 0) ||
(cache_ptr->moves[VARIABLE_ENTRY_TYPE] != 1) ||
(cache_ptr->entry_flush_moves[VARIABLE_ENTRY_TYPE] != 0) ||
@@ -12736,7 +12836,7 @@ check_flush_cache__pinned_single_entry_test(H5F_t * file_ptr,
*/
static unsigned
-check_get_entry_status(void)
+check_get_entry_status(unsigned paged)
{
static char msg[128];
herr_t result;
@@ -12749,7 +12849,10 @@ check_get_entry_status(void)
test_entry_t * base_addr = NULL;
test_entry_t * entry_ptr = NULL;
- TESTING("H5C_get_entry_status() functionality");
+ if(paged)
+ TESTING("H5C_get_entry_status() functionality (paged aggregation)")
+ else
+ TESTING("H5C_get_entry_status() functionality")
pass = TRUE;
@@ -12757,8 +12860,7 @@ check_get_entry_status(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
if(file_ptr == NULL) {
@@ -12781,7 +12883,7 @@ check_get_entry_status(void)
*/
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -12809,7 +12911,7 @@ check_get_entry_status(void)
if(pass) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -12835,7 +12937,7 @@ check_get_entry_status(void)
if(pass) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -12861,7 +12963,7 @@ check_get_entry_status(void)
if(pass) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -12887,7 +12989,7 @@ check_get_entry_status(void)
if(pass) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -12913,7 +13015,7 @@ check_get_entry_status(void)
if(pass) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -12965,7 +13067,7 @@ check_get_entry_status(void)
*/
static unsigned
-check_expunge_entry(void)
+check_expunge_entry(unsigned paged)
{
static char msg[128];
herr_t result;
@@ -12978,7 +13080,10 @@ check_expunge_entry(void)
test_entry_t * base_addr;
test_entry_t * entry_ptr;
- TESTING("H5C_expunge_entry() functionality");
+ if(paged)
+ TESTING("H5C_expunge_entry() functionality (paged aggregation)")
+ else
+ TESTING("H5C_expunge_entry() functionality")
pass = TRUE;
@@ -12986,8 +13091,7 @@ check_expunge_entry(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
base_addr = entries[0];
entry_ptr = &(base_addr[0]);
@@ -13001,7 +13105,7 @@ check_expunge_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -13042,7 +13146,7 @@ check_expunge_entry(void)
if(pass) {
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -13085,7 +13189,7 @@ check_expunge_entry(void)
*/
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
- &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL);
+ &in_cache, &is_dirty, &is_protected, &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -13126,7 +13230,7 @@ check_expunge_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -13168,7 +13272,7 @@ check_expunge_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -13212,7 +13316,7 @@ check_expunge_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -13274,7 +13378,7 @@ check_expunge_entry(void)
*-------------------------------------------------------------------------
*/
static unsigned
-check_multiple_read_protect(void)
+check_multiple_read_protect(unsigned paged)
{
H5F_t * file_ptr = NULL;
#if H5C_COLLECT_CACHE_STATS
@@ -13282,7 +13386,10 @@ check_multiple_read_protect(void)
#endif /* H5C_COLLECT_CACHE_STATS */
test_entry_t * entry_ptr;
- TESTING("multiple read only protects on a single entry");
+ if(paged)
+ TESTING("multiple read only protects on a single entry (paged aggr)")
+ else
+ TESTING("multiple read only protects on a single entry")
pass = TRUE;
@@ -13311,8 +13418,7 @@ check_multiple_read_protect(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
#if H5C_COLLECT_CACHE_STATS
cache_ptr = file_ptr->shared->cache;
#endif /* H5C_COLLECT_CACHE_STATS */
@@ -13696,11 +13802,11 @@ check_multiple_read_protect(void)
*/
static unsigned
-check_move_entry(void)
+check_move_entry(unsigned paged)
{
unsigned u;
H5F_t * file_ptr = NULL;
- struct move_entry_test_spec test_specs[8] =
+ struct move_entry_test_spec test_specs[4] =
{
{
/* int entry_type = */ PICO_ENTRY_TYPE,
@@ -13728,7 +13834,10 @@ check_move_entry(void)
},
};
- TESTING("H5C_move_entry() functionality");
+ if(paged)
+ TESTING("H5C_move_entry() functionality (paged aggregation)")
+ else
+ TESTING("H5C_move_entry() functionality")
pass = TRUE;
@@ -13761,8 +13870,7 @@ check_move_entry(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
}
u = 0;
@@ -13988,7 +14096,7 @@ check_move_entry__run_test(H5F_t * file_ptr,
*/
static unsigned
-check_pin_protected_entry(void)
+check_pin_protected_entry(unsigned paged)
{
static char msg[128];
herr_t result;
@@ -13996,7 +14104,10 @@ check_pin_protected_entry(void)
test_entry_t * base_addr;
test_entry_t * entry_ptr;
- TESTING("H5C_pin_protected_entry() functionality");
+ if(paged)
+ TESTING("H5C_pin_protected_entry() functionality (paged aggregation)")
+ else
+ TESTING("H5C_pin_protected_entry() functionality")
pass = TRUE;
@@ -14009,8 +14120,7 @@ check_pin_protected_entry(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
if(file_ptr == NULL) {
@@ -14087,7 +14197,7 @@ check_pin_protected_entry(void)
*/
static unsigned
-check_resize_entry(void)
+check_resize_entry(unsigned paged)
{
static char msg[128];
herr_t result;
@@ -14102,7 +14212,10 @@ check_resize_entry(void)
test_entry_t * base_addr;
test_entry_t * entry_ptr = NULL;
- TESTING("entry resize functionality");
+ if(paged)
+ TESTING("entry resize functionality (paged aggregation)")
+ else
+ TESTING("entry resize functionality")
/* Setup a cache and verify that it is empty.
*
@@ -14141,8 +14254,8 @@ check_resize_entry(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
+
if(file_ptr == NULL) {
pass = FALSE;
@@ -14199,7 +14312,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14277,7 +14390,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14362,7 +14475,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14430,7 +14543,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14490,7 +14603,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14530,7 +14643,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14644,7 +14757,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14724,7 +14837,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14809,7 +14922,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14879,7 +14992,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14939,7 +15052,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
&reported_entry_size, &in_cache,
&is_dirty, &is_protected, &is_pinned,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -14979,7 +15092,7 @@ check_resize_entry(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr, &entry_size,
&in_cache, &is_dirty, &is_protected,
- &is_pinned, NULL, NULL, NULL);
+ &is_pinned, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -15082,7 +15195,7 @@ check_resize_entry(void)
*/
static unsigned
-check_evictions_enabled(void)
+check_evictions_enabled(unsigned paged)
{
static char msg[128];
herr_t result;
@@ -15096,7 +15209,10 @@ check_evictions_enabled(void)
test_entry_t * base_addr = NULL;
test_entry_t * entry_ptr;
- TESTING("evictions enabled/disabled functionality");
+ if(paged)
+ TESTING("evictions enabled/disabled functionality (paged aggregation)")
+ else
+ TESTING("evictions enabled/disabled functionality")
/* Setup a cache and verify that it is empty.
*
@@ -15144,8 +15260,8 @@ check_evictions_enabled(void)
reset_entries();
- file_ptr = setup_cache((size_t)(1 * 1024 * 1024),
- (size_t)( 512 * 1024));
+ file_ptr = setup_cache((size_t)(1 * 1024 * 1024), (size_t)(512 * 1024), paged);
+
if(file_ptr == NULL) {
pass = FALSE;
@@ -15275,7 +15391,7 @@ check_evictions_enabled(void)
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
NULL, &in_cache, NULL, NULL, NULL,
- NULL, NULL, NULL);
+ NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -15341,7 +15457,7 @@ check_evictions_enabled(void)
entry_ptr = &(base_addr[1]);
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -15560,7 +15676,7 @@ check_evictions_enabled(void)
entry_ptr = &(base_addr[2]);
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -15595,7 +15711,7 @@ check_evictions_enabled(void)
entry_ptr = &(base_addr[3]);
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -15729,7 +15845,7 @@ check_evictions_enabled(void)
entry_ptr = &(base_addr[4]);
result = H5C_get_entry_status(file_ptr, entry_ptr->addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
if(result < 0) {
@@ -15815,11 +15931,14 @@ check_evictions_enabled(void)
*/
static unsigned
-check_flush_protected_err(void)
+check_flush_protected_err(unsigned paged)
{
H5F_t * file_ptr = NULL;
- TESTING("flush cache with protected entry error");
+ if(paged)
+ TESTING("flush cache with protected entry error (paged aggregation)")
+ else
+ TESTING("flush cache with protected entry error")
pass = TRUE;
@@ -15832,8 +15951,7 @@ check_flush_protected_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
@@ -15888,13 +16006,15 @@ check_flush_protected_err(void)
*
*-------------------------------------------------------------------------
*/
-
static unsigned
-check_destroy_pinned_err(void)
+check_destroy_pinned_err(unsigned paged)
{
H5F_t * file_ptr = NULL;
- TESTING("destroy cache with permanently pinned entry error");
+ if(paged)
+ TESTING("destroy cache with permanently pinned entry error (pgd aggr)")
+ else
+ TESTING("destroy cache with permanently pinned entry error")
pass = TRUE;
@@ -15902,58 +16022,51 @@ check_destroy_pinned_err(void)
* should fail. Unpin the entry and flush destroy again -- should
* succeed.
*/
-
if(pass) {
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
unprotect_entry(file_ptr, 0, 0, H5C__PIN_ENTRY_FLAG);
- if(H5C_dest(file_ptr, H5AC_ind_read_dxpl_id) >= 0) {
+ if(H5C_prep_for_file_close(file_ptr, H5P_DATASET_XFER_DEFAULT) < 0) {
+ pass = FALSE;
+ failure_mssg = "unexpected failure of prep for file close.\n";
+ } /* end if */
+ if(H5C_dest(file_ptr, H5AC_ind_read_dxpl_id) >= 0) {
pass = FALSE;
failure_mssg = "destroy succeeded on cache with pinned entry.\n";
-
- } else {
-
+ } /* end if */
+ else {
unpin_entry(0, 0);
if(H5C_dest(file_ptr, H5AC_ind_read_dxpl_id) < 0) {
-
pass = FALSE;
failure_mssg = "destroy failed after unpin.\n";
-
- } else {
+ } /* end if */
+ else
file_ptr->shared->cache = NULL;
- }
- }
+ } /* end else */
if(saved_cache != NULL) {
-
file_ptr->shared->cache = saved_cache;
saved_cache = NULL;
-
- }
+ } /* end if */
/* call takedown_cache() with a NULL file_ptr parameter.
* This causes the function to close and delete the file,
* while skipping the call to H5C_dest().
*/
takedown_cache(NULL, FALSE, FALSE);
-
- }
+ } /* end if */
if(pass) { PASSED(); } else { H5_FAILED(); }
- if(!pass) {
-
- HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
- FUNC, failure_mssg);
- }
+ if(!pass)
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", FUNC, failure_mssg);
return (unsigned)!pass;
@@ -15977,11 +16090,14 @@ check_destroy_pinned_err(void)
*/
static unsigned
-check_destroy_protected_err(void)
+check_destroy_protected_err(unsigned paged)
{
H5F_t * file_ptr = NULL;
- TESTING("destroy cache with protected entry error");
+ if(paged)
+ TESTING("destroy cache with protected entry error (paged aggregation)")
+ else
+ TESTING("destroy cache with protected entry error")
pass = TRUE;
@@ -15994,51 +16110,55 @@ check_destroy_protected_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
+
+ /* Note: normally this call would go just before the series of
+ * flushes prior to file close -- in particular, all entries
+ * should be unprotected when this call is made.
+ *
+ * Thus H5C_prep_for_file_close() contains an assert to verify
+ * this. Since this assert would be triggered by the condition
+ * we are trying to test, put the call to H5C_prep_for_file_close()
+ * prior to the final protect call.
+ */
+ if(H5C_prep_for_file_close(file_ptr, H5P_DATASET_XFER_DEFAULT) < 0) {
+ pass = FALSE;
+ failure_mssg = "unexpected failure of prep for file close.\n";
+ } /* end if */
protect_entry(file_ptr, 0, 0);
if(H5C_dest(file_ptr, H5AC_ind_read_dxpl_id) >= 0) {
-
pass = FALSE;
failure_mssg = "destroy succeeded on cache with protected entry.\n";
-
- } else {
-
+ } /* end if */
+ else {
unprotect_entry(file_ptr, 0, 0, H5C__DIRTIED_FLAG);
-
if(H5C_dest(file_ptr, H5AC_ind_read_dxpl_id) < 0) {
-
pass = FALSE;
failure_mssg = "destroy failed after unprotect.\n";
-
- } else {
+ } /* end if */
+ else {
file_ptr->shared->cache = NULL;
- }
- }
+ } /* end else */
+ } /* end else */
if(saved_cache != NULL) {
-
file_ptr->shared->cache = saved_cache;
saved_cache = NULL;
-
- }
+ } /* end if */
/* call takedown_cache() with a NULL file_ptr parameter.
* This causes the function to close and delete the file,
* while skipping the call to H5C_dest().
*/
takedown_cache(NULL, FALSE, FALSE);
- }
+ } /* end if */
if(pass) { PASSED(); } else { H5_FAILED(); }
- if(!pass) {
-
- HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n",
- FUNC, failure_mssg);
- }
+ if(!pass)
+ HDfprintf(stdout, "%s(): failure_mssg = \"%s\".\n", FUNC, failure_mssg);
return (unsigned)!pass;
@@ -16062,14 +16182,17 @@ check_destroy_protected_err(void)
*/
static unsigned
-check_duplicate_insert_err(void)
+check_duplicate_insert_err(unsigned paged)
{
herr_t result = -1;
H5F_t * file_ptr = NULL;
test_entry_t * base_addr;
test_entry_t * entry_ptr;
- TESTING("duplicate entry insertion error");
+ if(paged)
+ TESTING("duplicate entry insertion error (paged aggregation)")
+ else
+ TESTING("duplicate entry insertion error")
pass = TRUE;
@@ -16082,8 +16205,7 @@ check_duplicate_insert_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
@@ -16093,7 +16215,7 @@ check_duplicate_insert_err(void)
entry_ptr = &(base_addr[0]);
result = H5C_insert_entry(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[0]), entry_ptr->addr,
+ types[0], entry_ptr->addr,
(void *)entry_ptr, H5C__NO_FLAGS_SET);
if(result >= 0) {
@@ -16142,13 +16264,16 @@ check_duplicate_insert_err(void)
*/
static unsigned
-check_double_pin_err(void)
+check_double_pin_err(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
- TESTING("pin a pinned entry error");
+ if(paged)
+ TESTING("pin a pinned entry error (paged aggregation)")
+ else
+ TESTING("pin a pinned entry error")
pass = TRUE;
@@ -16162,8 +16287,7 @@ check_double_pin_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
@@ -16228,13 +16352,16 @@ check_double_pin_err(void)
*/
static unsigned
-check_double_unpin_err(void)
+check_double_unpin_err(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
- TESTING("unpin an unpinned entry error");
+ if(paged)
+ TESTING("unpin an unpinned entry error (paged aggregation)")
+ else
+ TESTING("unpin an unpinned entry error")
pass = TRUE;
@@ -16250,8 +16377,7 @@ check_double_unpin_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
@@ -16325,13 +16451,16 @@ check_double_unpin_err(void)
*/
static unsigned
-check_pin_entry_errs(void)
+check_pin_entry_errs(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
- TESTING("pin entry related errors");
+ if(paged)
+ TESTING("pin entry related errors (paged aggregation)")
+ else
+ TESTING("pin entry related errors")
pass = TRUE;
@@ -16351,8 +16480,7 @@ check_pin_entry_errs(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
@@ -16431,13 +16559,16 @@ check_pin_entry_errs(void)
*/
static unsigned
-check_double_protect_err(void)
+check_double_protect_err(unsigned paged)
{
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
H5C_cache_entry_t * cache_entry_ptr;
- TESTING("protect a protected entry error");
+ if(paged)
+ TESTING("protect a protected entry error (paged aggregation)")
+ else
+ TESTING("protect a protected entry error")
pass = TRUE;
@@ -16450,8 +16581,7 @@ check_double_protect_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
@@ -16461,7 +16591,7 @@ check_double_protect_err(void)
if(pass) {
cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[0]), entry_ptr->addr,
+ types[0], entry_ptr->addr,
&entry_ptr->addr, H5C__NO_FLAGS_SET);
if(cache_entry_ptr != NULL) {
@@ -16509,13 +16639,16 @@ check_double_protect_err(void)
*/
static unsigned
-check_double_unprotect_err(void)
+check_double_unprotect_err(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
- TESTING("unprotect an unprotected entry error");
+ if(paged)
+ TESTING("unprotect an unprotected entry error (paged aggregation)")
+ else
+ TESTING("unprotect an unprotected entry error")
pass = TRUE;
@@ -16528,8 +16661,7 @@ check_double_unprotect_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
@@ -16587,13 +16719,16 @@ check_double_unprotect_err(void)
*/
static unsigned
-check_mark_entry_dirty_errs(void)
+check_mark_entry_dirty_errs(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
- TESTING("mark entry dirty related errors");
+ if(paged)
+ TESTING("mark entry dirty related errors (paged aggregation)")
+ else
+ TESTING("mark entry dirty related errors")
pass = TRUE;
@@ -16607,8 +16742,7 @@ check_mark_entry_dirty_errs(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry(file_ptr, 0, 0);
@@ -16667,13 +16801,16 @@ check_mark_entry_dirty_errs(void)
*/
static unsigned
-check_expunge_entry_errs(void)
+check_expunge_entry_errs(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
- TESTING("expunge entry related errors");
+ if(paged)
+ TESTING("expunge entry related errors (paged aggregation)")
+ else
+ TESTING("expunge entry related errors")
pass = TRUE;
@@ -16693,8 +16830,7 @@ check_expunge_entry_errs(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
entry_ptr = &((entries[0])[0]);
@@ -16705,7 +16841,7 @@ check_expunge_entry_errs(void)
if(pass) {
result = H5C_expunge_entry(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[0]), entry_ptr->addr, H5C__NO_FLAGS_SET);
+ types[0], entry_ptr->addr, H5C__NO_FLAGS_SET);
if(result > 0) {
@@ -16723,7 +16859,7 @@ check_expunge_entry_errs(void)
if(pass) {
result = H5C_expunge_entry(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[0]), entry_ptr->addr, H5C__NO_FLAGS_SET);
+ types[0], entry_ptr->addr, H5C__NO_FLAGS_SET);
if(result > 0) {
@@ -16741,7 +16877,7 @@ check_expunge_entry_errs(void)
if(pass) {
result = H5C_expunge_entry(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[0]), entry_ptr->addr, H5C__NO_FLAGS_SET);
+ types[0], entry_ptr->addr, H5C__NO_FLAGS_SET);
if(result < 0) {
@@ -16784,9 +16920,8 @@ check_expunge_entry_errs(void)
*
*-------------------------------------------------------------------------
*/
-
static unsigned
-check_move_entry_errs(void)
+check_move_entry_errs(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
@@ -16796,7 +16931,10 @@ check_move_entry_errs(void)
test_entry_t * entry_0_1_ptr;
test_entry_t * entry_1_0_ptr;
- TESTING("move entry related errors");
+ if(paged)
+ TESTING("move entry related errors (paged aggregation)")
+ else
+ TESTING("move entry related errors")
pass = TRUE;
@@ -16806,10 +16944,9 @@ check_move_entry_errs(void)
*/
if(pass) {
-
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
insert_entry(file_ptr, 0, 0, H5C__NO_FLAGS_SET);
@@ -16819,36 +16956,29 @@ check_move_entry_errs(void)
entry_0_0_ptr = &((entries[0])[0]);
entry_0_1_ptr = &((entries[0])[1]);
entry_1_0_ptr = &((entries[1])[0]);
- }
+ } /* end if */
if(pass) {
-
- result = H5C_move_entry(cache_ptr, &(types[0]),
- entry_0_0_ptr->addr, entry_0_1_ptr->addr);
+ result = H5C_move_entry(cache_ptr, types[0], entry_0_0_ptr->addr, entry_0_1_ptr->addr);
if(result >= 0) {
-
pass = FALSE;
failure_mssg = "move to addr of same type succeeded.\n";
- }
- }
+ } /* end if */
+ } /* end if */
if(pass) {
-
- result = H5C_move_entry(cache_ptr, &(types[0]),
- entry_0_0_ptr->addr, entry_1_0_ptr->addr);
+ result = H5C_move_entry(cache_ptr, types[0], entry_0_0_ptr->addr, entry_1_0_ptr->addr);
if(result >= 0) {
-
pass = FALSE;
failure_mssg = "move to addr of different type succeeded.\n";
- }
- }
+ } /* end if */
+ } /* end if */
if(pass)
takedown_cache(file_ptr, FALSE, FALSE);
-
/* Allocate a cache, protect an entry R/O, and then call
* H5C_move_entry() to move it -- this should fail.
*
@@ -16857,37 +16987,27 @@ check_move_entry_errs(void)
*/
if(pass) {
-
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024));
-
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
insert_entry(file_ptr, 0, 0, H5C__NO_FLAGS_SET);
-
protect_entry_ro(file_ptr, 0, 0);
entry_ptr = &((entries[0])[0]);
-
- }
+ } /* end if */
if(pass) {
-
- result = H5C_move_entry(cache_ptr, &(types[0]), entry_ptr->header.addr, entry_ptr->header.addr + 10);
+ result = H5C_move_entry(cache_ptr, types[0], entry_ptr->header.addr, entry_ptr->header.addr + 10);
if(result >= 0) {
-
pass = FALSE;
- failure_mssg =
- "Call to H5C_move_entry on a R/O protected entry succeeded.\n";
-
- } else {
-
+ failure_mssg = "Call to H5C_move_entry on a R/O protected entry succeeded.\n";
+ } /* end if */
+ else
unprotect_entry(file_ptr, 0, 0, H5C__NO_FLAGS_SET);
-
- }
- }
+ } /* end if */
if(pass)
takedown_cache(file_ptr, FALSE, FALSE);
@@ -16897,8 +17017,7 @@ check_move_entry_errs(void)
else {
H5_FAILED()
- HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
- FUNC, failure_mssg);
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", FUNC, failure_mssg);
} /* end else */
return (unsigned)!pass;
@@ -16920,13 +17039,16 @@ check_move_entry_errs(void)
*/
static unsigned
-check_resize_entry_errs(void)
+check_resize_entry_errs(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
- TESTING("resize entry related errors");
+ if(paged)
+ TESTING("resize entry related errors (paged aggregation)")
+ else
+ TESTING("resize entry related errors")
pass = TRUE;
@@ -16945,8 +17067,7 @@ check_resize_entry_errs(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
entry_ptr = &((entries[0])[0]);
@@ -17021,13 +17142,16 @@ check_resize_entry_errs(void)
*/
static unsigned
-check_unprotect_ro_dirty_err(void)
+check_unprotect_ro_dirty_err(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
- TESTING("unprotect a read only entry dirty error");
+ if(paged)
+ TESTING("unprotect a read only entry dirty error (paged aggregation)")
+ else
+ TESTING("unprotect a read only entry dirty error")
pass = TRUE;
@@ -17040,8 +17164,7 @@ check_unprotect_ro_dirty_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry_ro(file_ptr, 0, 0);
@@ -17082,8 +17205,7 @@ check_unprotect_ro_dirty_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry_ro(file_ptr, 0, 0);
protect_entry_ro(file_ptr, 0, 0);
@@ -17148,13 +17270,16 @@ check_unprotect_ro_dirty_err(void)
*/
static unsigned
-check_protect_ro_rw_err(void)
+check_protect_ro_rw_err(unsigned paged)
{
H5F_t * file_ptr = NULL;
test_entry_t * entry_ptr;
void * thing_ptr = NULL;
- TESTING("protect a read only entry rw error");
+ if(paged)
+ TESTING("protect a read only entry rw error (paged aggregation)")
+ else
+ TESTING("protect a read only entry rw error")
pass = TRUE;
@@ -17168,8 +17293,7 @@ check_protect_ro_rw_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
protect_entry_ro(file_ptr, 0, 0);
@@ -17179,7 +17303,7 @@ check_protect_ro_rw_err(void)
if(pass) {
thing_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[0]), entry_ptr->addr,
+ types[0], entry_ptr->addr,
&entry_ptr->addr, H5C__NO_FLAGS_SET);
if(thing_ptr != NULL) {
@@ -17225,7 +17349,7 @@ check_protect_ro_rw_err(void)
*-------------------------------------------------------------------------
*/
static unsigned
-check_protect_retries(void)
+check_protect_retries(unsigned paged)
{
H5F_t * file_ptr = NULL;
H5C_t *cache_ptr = NULL;
@@ -17235,7 +17359,10 @@ check_protect_retries(void)
int32_t type;
int32_t idx;
- TESTING("protect an entry to verify retries");
+ if(paged)
+ TESTING("protect an entry to verify retries (paged aggregation)")
+ else
+ TESTING("protect an entry to verify retries")
pass = TRUE;
@@ -17244,8 +17371,7 @@ check_protect_retries(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
/* Set up read attempts for verifying checksum */
file_ptr->shared->read_attempts = 10;
@@ -17272,13 +17398,13 @@ check_protect_retries(void)
entry_ptr->verify_ct = 0;
cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[type]), entry_ptr->addr, &entry_ptr->addr, H5C__READ_ONLY_FLAG);
+ types[type], entry_ptr->addr, &entry_ptr->addr, H5C__READ_ONLY_FLAG);
if((cache_entry_ptr != (void *)entry_ptr) ||
(!(entry_ptr->header.is_protected)) ||
(!(entry_ptr->header.is_read_only)) ||
(entry_ptr->header.ro_ref_count <= 0) ||
- (entry_ptr->header.type != &(types[type])) ||
+ (entry_ptr->header.type != types[type]) ||
(entry_ptr->size != entry_ptr->header.size) ||
(entry_ptr->addr != entry_ptr->header.addr) ||
(entry_ptr->verify_ct != entry_ptr->max_verify_ct)) {
@@ -17317,7 +17443,7 @@ check_protect_retries(void)
entry_ptr->verify_ct = 0;
cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[type]), entry_ptr->addr, &entry_ptr->addr, H5C__READ_ONLY_FLAG);
+ types[type], entry_ptr->addr, &entry_ptr->addr, H5C__READ_ONLY_FLAG);
/* H5C_protect() should fail after all retries fail */
if(cache_entry_ptr != NULL)
@@ -17360,14 +17486,17 @@ check_protect_retries(void)
*/
static unsigned
-check_check_evictions_enabled_err(void)
+check_check_evictions_enabled_err(unsigned paged)
{
herr_t result;
hbool_t evictions_enabled;
H5F_t * file_ptr = NULL;
H5C_t * cache_ptr = NULL;
- TESTING("get/set evictions enabled errors");
+ if(paged)
+ TESTING("get/set evictions enabled errors (paged aggregation)")
+ else
+ TESTING("get/set evictions enabled errors")
pass = TRUE;
@@ -17388,8 +17517,7 @@ check_check_evictions_enabled_err(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
}
@@ -17512,7 +17640,7 @@ static void test_rpt_fcn(H5_ATTR_UNUSED H5C_t * cache_ptr,
}
static unsigned
-check_auto_cache_resize(hbool_t cork_ageout)
+check_auto_cache_resize(hbool_t cork_ageout, unsigned paged)
{
hbool_t show_progress = FALSE;
herr_t result;
@@ -17566,7 +17694,10 @@ check_auto_cache_resize(hbool_t cork_ageout)
/* double empty_reserve = */ 0.05f
};
- TESTING("automatic cache resizing");
+ if(paged)
+ TESTING("automatic cache resizing (paged aggregation)")
+ else
+ TESTING("automatic cache resizing")
pass = TRUE;
@@ -17580,7 +17711,7 @@ check_auto_cache_resize(hbool_t cork_ageout)
if(pass) {
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
}
@@ -21582,7 +21713,7 @@ check_auto_cache_resize(hbool_t cork_ageout)
*/
static unsigned
-check_auto_cache_resize_disable(void)
+check_auto_cache_resize_disable(unsigned paged)
{
hbool_t show_progress = FALSE;
herr_t result;
@@ -21636,7 +21767,10 @@ check_auto_cache_resize_disable(void)
/* double empty_reserve = */ 0.05f
};
- TESTING("automatic cache resize disable");
+ if(paged)
+ TESTING("automatic cache resize disable (paged aggregation)")
+ else
+ TESTING("automatic cache resize disable")
pass = TRUE;
@@ -21651,8 +21785,7 @@ check_auto_cache_resize_disable(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
if(file_ptr == NULL) {
@@ -24307,7 +24440,7 @@ check_auto_cache_resize_disable(void)
*/
static unsigned
-check_auto_cache_resize_epoch_markers(void)
+check_auto_cache_resize_epoch_markers(unsigned paged)
{
hbool_t show_progress = FALSE;
herr_t result;
@@ -24362,7 +24495,10 @@ check_auto_cache_resize_epoch_markers(void)
/* double empty_reserve = */ 0.05f
};
- TESTING("automatic cache resize epoch marker management");
+ if(paged)
+ TESTING("automatic cache resize epoch marker management (paged aggr)")
+ else
+ TESTING("automatic cache resize epoch marker management")
pass = TRUE;
@@ -24372,8 +24508,7 @@ check_auto_cache_resize_epoch_markers(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
}
@@ -25016,7 +25151,7 @@ check_auto_cache_resize_epoch_markers(void)
*/
static unsigned
-check_auto_cache_resize_input_errs(void)
+check_auto_cache_resize_input_errs(unsigned paged)
{
herr_t result;
H5F_t * file_ptr = NULL;
@@ -25070,7 +25205,10 @@ check_auto_cache_resize_input_errs(void)
H5C_auto_size_ctl_t invalid_auto_size_ctl;
H5C_auto_size_ctl_t test_auto_size_ctl;
- TESTING("automatic cache resize input errors");
+ if(paged)
+ TESTING("automatic cache resize input errors (paged aggregation)")
+ else
+ TESTING("automatic cache resize input errors")
pass = TRUE;
@@ -25084,8 +25222,7 @@ check_auto_cache_resize_input_errs(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
}
@@ -27423,7 +27560,7 @@ check_auto_cache_resize_input_errs(void)
*/
static unsigned
-check_auto_cache_resize_aux_fcns(void)
+check_auto_cache_resize_aux_fcns(unsigned paged)
{
herr_t result;
int32_t i;
@@ -27433,7 +27570,7 @@ check_auto_cache_resize_aux_fcns(void)
size_t max_size;
size_t min_clean_size;
size_t cur_size;
- int32_t cur_num_entries;
+ uint32_t cur_num_entries;
H5C_auto_size_ctl_t auto_size_ctl =
{
/* int32_t version = */ H5C__CURR_AUTO_SIZE_CTL_VER,
@@ -27484,7 +27621,10 @@ check_auto_cache_resize_aux_fcns(void)
};
- TESTING("automatic cache resize auxilary functions");
+ if(paged)
+ TESTING("automatic cache resize auxilary functions (paged aggregation)")
+ else
+ TESTING("automatic cache resize auxilary functions")
pass = TRUE;
@@ -27495,8 +27635,7 @@ check_auto_cache_resize_aux_fcns(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024),
- (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
}
@@ -27968,7 +28107,7 @@ check_auto_cache_resize_aux_fcns(void)
*/
static unsigned
-check_metadata_blizzard_absence(hbool_t fill_via_insertion)
+check_metadata_blizzard_absence(hbool_t fill_via_insertion, unsigned paged)
{
int entry_type = HUGE_ENTRY_TYPE;
size_t entry_size = HUGE_ENTRY_SIZE; /* 16 KB */
@@ -28148,17 +28287,20 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion)
reset_entries();
if(fill_via_insertion) {
-
- TESTING("to ensure metadata blizzard absence when inserting");
-
- } else {
-
- TESTING("to ensure metadata blizzard absence on protect/unprotect");
- }
+ if(paged)
+ TESTING("to ensure metadata blizzard absence when inserting (pgd aggr)")
+ else
+ TESTING("to ensure metadata blizzard absence when inserting")
+ } /* end if */
+ else {
+ if(paged)
+ TESTING("to ensure metadata blizzard absence on protect/unprotect (pa)")
+ else
+ TESTING("to ensure metadata blizzard absence on protect/unprotect")
+ } /* end else */
if(show_progress) /* 0 */
- HDfprintf(stdout, "\n%s: check point %d -- pass %d\n",
- FUNC, checkpoint++, pass);
+ HDfprintf(stdout, "\n%s: check point %d -- pass %d\n", FUNC, checkpoint++, pass);
if(pass) {
@@ -28167,8 +28309,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion)
* The max_cache_size should have room for 50 entries.
* The min_clean_size is half of that, or 25 entries.
*/
- file_ptr = setup_cache((size_t)(50 * entry_size), /* max_cache_size */
- (size_t)(25 * entry_size)); /* min_clean_size */
+ file_ptr = setup_cache((size_t)(50 * entry_size), (size_t)(25 * entry_size), paged);
if(file_ptr == NULL) {
@@ -28812,7 +28953,7 @@ check_metadata_blizzard_absence(hbool_t fill_via_insertion)
*/
static unsigned
-check_flush_deps(void)
+check_flush_deps(unsigned paged)
{
H5F_t * file_ptr = NULL; /* File for this test */
H5C_t * cache_ptr = NULL; /* Metadata cache for this test */
@@ -28830,7 +28971,10 @@ check_flush_deps(void)
{ PICO_ENTRY_TYPE, 4, PICO_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE}
};
- TESTING("flush dependencies");
+ if(paged)
+ TESTING("flush dependencies (paged aggregation)")
+ else
+ TESTING("flush dependencies")
pass = TRUE;
@@ -28839,7 +28983,7 @@ check_flush_deps(void)
*/
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
base_addr = entries[entry_type];
@@ -28877,7 +29021,7 @@ check_flush_deps(void)
/* Check the parent's entry status */
entry_ptr = &(base_addr[1]);
if(H5C_get_entry_status(file_ptr, entry_ptr->addr, NULL, &in_cache,
- NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child, NULL) < 0)
CACHE_ERROR("H5C_get_entry_status() failed")
if(!in_cache || is_flush_dep_parent || is_flush_dep_child)
CACHE_ERROR("invalid entry status")
@@ -28885,7 +29029,7 @@ check_flush_deps(void)
/* Check the child's entry status */
entry_ptr = &(base_addr[0]);
if(H5C_get_entry_status(file_ptr, entry_ptr->addr, NULL, &in_cache,
- NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child, NULL) < 0)
CACHE_ERROR("H5C_get_entry_status() failed")
if(!in_cache || is_flush_dep_parent || is_flush_dep_child)
CACHE_ERROR("invalid entry status")
@@ -28896,7 +29040,7 @@ check_flush_deps(void)
/* Check the parent's entry status */
entry_ptr = &(base_addr[1]);
if(H5C_get_entry_status(file_ptr, entry_ptr->addr, NULL, &in_cache,
- NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child, NULL) < 0)
CACHE_ERROR("H5C_get_entry_status() failed")
if(!in_cache || !is_flush_dep_parent || is_flush_dep_child)
CACHE_ERROR("invalid entry status")
@@ -28904,7 +29048,7 @@ check_flush_deps(void)
/* Check the child's entry status */
entry_ptr = &(base_addr[0]);
if(H5C_get_entry_status(file_ptr, entry_ptr->addr, NULL, &in_cache,
- NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child) < 0)
+ NULL, NULL, NULL, NULL, &is_flush_dep_parent, &is_flush_dep_child, NULL) < 0)
CACHE_ERROR("H5C_get_entry_status() failed")
if(!in_cache || is_flush_dep_parent || !is_flush_dep_child)
CACHE_ERROR("invalid entry status")
@@ -30548,13 +30692,16 @@ done:
*/
static unsigned
-check_flush_deps_err(void)
+check_flush_deps_err(unsigned paged)
{
H5F_t * file_ptr = NULL; /* File for this test */
int entry_type = PICO_ENTRY_TYPE; /* Use very small entry size (size of entries doesn't matter) */
unsigned test_count; /* Test iteration variable */
- TESTING("flush dependency errors");
+ if(paged)
+ TESTING("flush dependency errors (paged aggregation)")
+ else
+ TESTING("flush dependency errors")
pass = TRUE;
@@ -30567,7 +30714,7 @@ check_flush_deps_err(void)
/* Allocate a cache */
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
if(!pass) CACHE_ERROR("setup_cache failed")
/* Insert entries to work with into the cache */
@@ -30769,7 +30916,7 @@ done:
*/
static unsigned
-check_flush_deps_order(void)
+check_flush_deps_order(unsigned paged)
{
H5F_t * file_ptr = NULL; /* File for this test */
H5C_t * cache_ptr = NULL; /* Metadata cache for this test */
@@ -30787,7 +30934,10 @@ check_flush_deps_order(void)
};
unsigned flush_order; /* Index for tracking flush order */
- TESTING("flush dependencies flush order");
+ if(paged)
+ TESTING("flush dependencies flush order (paged aggregation)")
+ else
+ TESTING("flush dependencies flush order")
pass = TRUE;
@@ -30796,7 +30946,7 @@ check_flush_deps_order(void)
*/
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
cache_ptr = file_ptr->shared->cache;
if(!pass) CACHE_ERROR("setup_cache failed")
@@ -33337,7 +33487,7 @@ done:
*/
static unsigned
-check_notify_cb(void)
+check_notify_cb(unsigned paged)
{
H5F_t * file_ptr = NULL; /* File for this test */
H5C_t * cache_ptr = NULL; /* Metadata cache for this test */
@@ -33356,7 +33506,10 @@ check_notify_cb(void)
{ NOTIFY_ENTRY_TYPE, 4, NOTIFY_ENTRY_SIZE, FALSE, TRUE, FALSE, FALSE, FALSE, FALSE, FALSE, FALSE, {0,0,0,0,0,0,0,0}, {0,0,0,0,0,0,0,0}, 0, 0, 0, -1, FALSE}
};
- TESTING("'notify' callback");
+ if(paged)
+ TESTING("'notify' callback (paged)")
+ else
+ TESTING("'notify' callback")
pass = TRUE;
@@ -33365,7 +33518,7 @@ check_notify_cb(void)
*/
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024), (size_t)(1 * 1024), paged);
if(!file_ptr) CACHE_ERROR("setup_cache returned NULL")
cache_ptr = file_ptr->shared->cache;
base_addr = entries[entry_type];
@@ -33538,7 +33691,7 @@ done:
*-------------------------------------------------------------------------
*/
static unsigned
-check_metadata_cork(hbool_t fill_via_insertion)
+check_metadata_cork(hbool_t fill_via_insertion, unsigned paged)
{
const char * fcn_name = "check_metadata_cork";
int entry_type = HUGE_ENTRY_TYPE;
@@ -33718,14 +33871,10 @@ check_metadata_cork(hbool_t fill_via_insertion)
reset_entries();
- if(fill_via_insertion) {
-
- TESTING("to ensure cork/uncork metadata when inserting");
-
- } else {
-
- TESTING("to ensure cork/uncork metadata on protect/unprotect");
- }
+ if(fill_via_insertion)
+ TESTING("to ensure cork/uncork metadata when inserting")
+ else
+ TESTING("to ensure cork/uncork metadata on protect/unprotect")
if(show_progress) /* 0 */
HDfprintf(stdout, "\n%s: check point %d -- pass %d\n",
@@ -33738,8 +33887,7 @@ check_metadata_cork(hbool_t fill_via_insertion)
* The max_cache_size should have room for 50 entries.
* The min_clean_size is half of that, or 25 entries.
*/
- file_ptr = setup_cache((size_t)(50 * entry_size), /* max_cache_size */
- (size_t)(25 * entry_size)); /* min_clean_size */
+ file_ptr = setup_cache((size_t)(50 * entry_size), (size_t)(25 * entry_size), paged);
if(file_ptr == NULL) {
@@ -34288,11 +34436,14 @@ check_metadata_cork(hbool_t fill_via_insertion)
*-------------------------------------------------------------------------
*/
static unsigned
-check_entry_deletions_during_scans(void)
+check_entry_deletions_during_scans(unsigned paged)
{
H5F_t * file_ptr = NULL;
- TESTING("entry deletion during list scan detection and adaption");
+ if(paged)
+ TESTING("entry deletion during list scan detection and adaption (par)")
+ else
+ TESTING("entry deletion during list scan detection and adaption")
pass = TRUE;
@@ -34305,8 +34456,7 @@ check_entry_deletions_during_scans(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
}
/* run the tests. This set of tests is somewhat eclectic, as
@@ -34532,8 +34682,7 @@ cedds__expunge_dirty_entry_in_flush_test(H5F_t * file_ptr)
/* If we are collecting stats, check to see if we get the expected
* values.
*/
- if(pass) {
-
+ if(pass)
if((cache_ptr->insertions[HUGE_ENTRY_TYPE] != 0) ||
(cache_ptr->pinned_insertions[HUGE_ENTRY_TYPE] != 0) ||
(cache_ptr->clears[HUGE_ENTRY_TYPE] != 1) ||
@@ -34555,31 +34704,23 @@ cedds__expunge_dirty_entry_in_flush_test(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected huge size entry stats in cedds__expunge_dirty_entry_in_flush_test().";
- }
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
if((cache_ptr->slist_scan_restarts != 1) ||
(cache_ptr->LRU_scan_restarts != 0) ||
- (cache_ptr->hash_bucket_scan_restarts != 0)) {
-
+ (cache_ptr->index_scan_restarts != 0)) {
pass = FALSE;
failure_mssg = "unexpected scan restart stats in cedds__expunge_dirty_entry_in_flush_test().";
- }
- }
+ } /* end if */
#endif /* H5C_COLLECT_CACHE_STATS */
- if(pass) {
-
- reset_entries();
- }
-
- if(pass) {
+ if(pass)
+ reset_entries();
- /* reset cache min clean size to its expected value */
+ if(pass)
+ /* reset cache min clean size to its expected value */
cache_ptr->min_clean_size = (1 * 1024 * 1024);
- }
return;
@@ -34589,7 +34730,7 @@ cedds__expunge_dirty_entry_in_flush_test(H5F_t * file_ptr)
/*-------------------------------------------------------------------------
* Function: cedds__H5C_make_space_in_cache()
*
- * Purpose: Verify that H5C_make_space_in_cache() can handle the
+ * Purpose: Verify that H5C__make_space_in_cache() can handle the
* removal from the cache of the next item in its reverse scan
* of the LRU list.
*
@@ -34599,7 +34740,7 @@ cedds__expunge_dirty_entry_in_flush_test(H5F_t * file_ptr)
* load an additional entry, triggering the flush of the last
* item, and thereby the deletion of the second to last item.
*
- * H5C_make_space_in_cache() should detect this deletion, and
+ * H5C__make_space_in_cache() should detect this deletion, and
* restart its scan of the LRU from the tail, instead of
* examining the now deleted next item up on the LRU.
*
@@ -34677,7 +34818,7 @@ cedds__H5C_make_space_in_cache(H5F_t * file_ptr)
if(cache_ptr == NULL) {
pass = FALSE;
- failure_mssg = "cache_ptr NULL on entry to cedds for H5C_make_space_in_cache() test.";
+ failure_mssg = "cache_ptr NULL on entry to cedds for H5C__make_space_in_cache() test.";
}
else if((cache_ptr->index_len != 0) ||
(cache_ptr->index_size != 0)) {
@@ -34817,7 +34958,7 @@ cedds__H5C_make_space_in_cache(H5F_t * file_ptr)
* and HET 0, 2, and 3 will be evicted to make room for the new
* monster entry (MET, 31).
*
- * Verify this. If H5C_make_space_in_cache() chokes, failure will
+ * Verify this. If H5C__make_space_in_cache() chokes, failure will
* be detected in protect_entry(). Thus end the "if(pass)" clause
* there so the error message will not be overwritten.
*/
@@ -34920,8 +35061,7 @@ cedds__H5C_make_space_in_cache(H5F_t * file_ptr)
}
}
- if(pass) {
-
+ if(pass)
if((cache_ptr->insertions[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->pinned_insertions[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->clears[MONSTER_ENTRY_TYPE] != 0) ||
@@ -34943,31 +35083,25 @@ cedds__H5C_make_space_in_cache(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster entry stats in cedds__H5C_make_space_in_cache().";
- }
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
if((cache_ptr->slist_scan_restarts != 0) ||
(cache_ptr->LRU_scan_restarts != 1) ||
- (cache_ptr->hash_bucket_scan_restarts != 0)) {
+ (cache_ptr->index_scan_restarts != 0)) {
pass = FALSE;
failure_mssg = "unexpected scan restart stats in cedds__H5C_make_space_in_cache().";
- }
- }
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- if(pass) {
+ } /* end if */
- reset_entries();
- }
+#endif /* H5C_COLLECT_CACHE_STATS */
- if(pass) {
+ if(pass)
+ reset_entries();
- /* reset cache min clean size to its expected value */
+ if(pass)
+ /* reset cache min clean size to its expected value */
cache_ptr->min_clean_size = (1 * 1024 * 1024);
- }
return;
@@ -34987,7 +35121,7 @@ cedds__H5C_make_space_in_cache(H5F_t * file_ptr)
* access the first item in the LRU repeatedly until the
* item, and thereby the deletion of the second to last item.
*
- * H5C_make_space_in_cache() should detect this deletion, and
+ * H5C__make_space_in_cache() should detect this deletion, and
* restart its scan of the LRU from the tail, instead of
* examining the now deleted next item up on the LRU.
*
@@ -35337,8 +35471,7 @@ cedds__H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * file_ptr)
* values.
*/
- if(pass) {
-
+ if(pass)
if((cache_ptr->insertions[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->pinned_insertions[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->clears[MONSTER_ENTRY_TYPE] != 0) ||
@@ -35360,31 +35493,25 @@ cedds__H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster entry stats in cedds__H5C__autoadjust__ageout__evict_aged_out_entries().";
- }
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
if((cache_ptr->slist_scan_restarts != 0) ||
(cache_ptr->LRU_scan_restarts != 1) ||
- (cache_ptr->hash_bucket_scan_restarts != 0)) {
+ (cache_ptr->index_scan_restarts != 0)) {
pass = FALSE;
failure_mssg = "unexpected scan restart stats in cedds__H5C__autoadjust__ageout__evict_aged_out_entries().";
- }
- }
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- if(pass) {
+ } /* end if */
- reset_entries();
- }
+#endif /* H5C_COLLECT_CACHE_STATS */
- if(pass) {
+ if(pass)
+ reset_entries();
- /* reset cache min clean size to its expected value */
+ if(pass)
+ /* reset cache min clean size to its expected value */
cache_ptr->min_clean_size = (1 * 1024 * 1024);
- }
return;
@@ -35394,7 +35521,26 @@ cedds__H5C__autoadjust__ageout__evict_aged_out_entries(H5F_t * file_ptr)
/*-------------------------------------------------------------------------
* Function: cedds__H5C_flush_invalidate_cache__bucket_scan()
*
- * Purpose: Verify that H5C_flush_invalidate_cache() can handle
+ * Purpose: Note: We now use the index list when we scan the
+ * contents of the metadata cache, so in principal,
+ * this test is obsolete. However, even using the
+ * index list, restarts are possible, and must be
+ * handled gracefully.
+ *
+ * As it turns out, this test triggers index list
+ * scan restarts, and thus with minor changes is
+ * still a useful test.
+ *
+ * For this reason, with the exception of changing
+ * to check the index_scan_restart stat instead of
+ * hash bucket restarts, I'm leaving the test
+ * alone. If and when it starts to fail due to
+ * other changes, we can re-work it to test
+ * index list scan restarts explicitly.
+ *
+ * JRM -- 11/2/16
+ *
+ * Verify that H5C_flush_invalidate_cache() can handle
* the removal from the cache of the next item in
* its scans of hash buckets.
*
@@ -35496,20 +35642,20 @@ cedds__H5C_flush_invalidate_cache__bucket_scan(H5F_t * file_ptr)
if(cache_ptr == NULL) {
pass = FALSE;
- failure_mssg = "cache_ptr NULL on entry to cedds for H5C__autoadjust__ageout__evict_aged_out_entries() test.";
+ failure_mssg = "cache_ptr NULL on entry to cedds for cedds__H5C_flush_invalidate_cache__bucket_scan() test.";
}
else if((cache_ptr->index_len != 0) ||
(cache_ptr->index_size != 0)) {
pass = FALSE;
- failure_mssg = "cache not empty at start cedds for H5C__autoadjust__ageout__evict_aged_out_entries() test.";
+ failure_mssg = "cache not empty at start cedds for cedds__H5C_flush_invalidate_cache__bucket_scan() test.";
}
else if((cache_ptr->max_cache_size != (2 * 1024 * 1024)) ||
(cache_ptr->min_clean_size != (1 * 1024 * 1024))) {
pass = FALSE;
failure_mssg =
- "unexpected cache config at start of cedds H5C__autoadjust__ageout__evict_aged_out_entries() test.";
+ "unexpected cache config at start of cedds cedds__H5C_flush_invalidate_cache__bucket_scan() test.";
} else {
@@ -35712,8 +35858,7 @@ cedds__H5C_flush_invalidate_cache__bucket_scan(H5F_t * file_ptr)
* values.
*/
- if(pass) {
-
+ if(pass)
if((cache_ptr->insertions[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->pinned_insertions[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->clears[MONSTER_ENTRY_TYPE] != 0) ||
@@ -35735,31 +35880,28 @@ cedds__H5C_flush_invalidate_cache__bucket_scan(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster entry stats in cedds__H5C_flush_invalidate_cache__bucket_scan().";
- }
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
+ /* as this test is now checking for index list scan restarts,
+ * the following has been modified to check this instead of
+ * hash bucket scan restarts.
+ */
if((cache_ptr->slist_scan_restarts != 0) ||
(cache_ptr->LRU_scan_restarts != 0) ||
- (cache_ptr->hash_bucket_scan_restarts != 1)) {
-
+ (cache_ptr->index_scan_restarts != 1)) {
pass = FALSE;
failure_mssg = "unexpected scan restart stats in cedds__H5C_flush_invalidate_cache__bucket_scan().";
}
- }
-#endif /* H5C_COLLECT_CACHE_STATS */
-
- if(pass) {
- reset_entries();
- }
+#endif /* H5C_COLLECT_CACHE_STATS */
- if(pass) {
+ if(pass)
+ reset_entries();
- /* reset cache min clean size to its expected value */
+ if(pass)
+ /* reset cache min clean size to its expected value */
cache_ptr->min_clean_size = (1 * 1024 * 1024);
- }
return;
@@ -35788,7 +35930,7 @@ cedds__H5C_flush_invalidate_cache__bucket_scan(H5F_t * file_ptr)
*/
static unsigned
-check_stats(void)
+check_stats(unsigned paged)
{
#if H5C_COLLECT_CACHE_STATS
@@ -35797,7 +35939,10 @@ check_stats(void)
#endif /* H5C_COLLECT_CACHE_STATS */
- TESTING("metadata cache statistics collection");
+ if(paged)
+ TESTING("metadata cache statistics collection (paged aggregation)")
+ else
+ TESTING("metadata cache statistics collection")
#if H5C_COLLECT_CACHE_STATS
@@ -35805,8 +35950,7 @@ check_stats(void)
reset_entries();
- file_ptr = setup_cache((size_t)(2 * 1024 * 1024),
- (size_t)(1 * 1024 * 1024));
+ file_ptr = setup_cache((size_t)(2 * 1024 * 1024), (size_t)(1 * 1024 * 1024), paged);
if(pass) {
@@ -35876,46 +36020,39 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
herr_t result;
if(pass) {
-
- if(cache_ptr == NULL) {
+ if(cache_ptr == NULL) {
pass = FALSE;
failure_mssg = "cache_ptr NULL on entry to check_stats__smoke_check_1().";
- }
+ } /* end if */
else if((cache_ptr->index_len != 0) ||
(cache_ptr->index_size != 0)) {
pass = FALSE;
failure_mssg = "cache not empty on entry to check_stats__smoke_check_1().";
- }
+ } /* end else-if */
else if((cache_ptr->max_cache_size != (2 * 1024 * 1024)) ||
(cache_ptr->min_clean_size != (1 * 1024 * 1024))) {
pass = FALSE;
- failure_mssg =
- "unexpected cache config at start of check_stats__smoke_check_1().";
+ failure_mssg = "unexpected cache config at start of check_stats__smoke_check_1().";
- } else {
+ } /* end else-if */
+ else {
/* set min clean size to zero for this test as it simplifies
* computing the expected cache size after each operation.
*/
-
cache_ptr->min_clean_size = 0;
- }
- }
-
- if(pass) {
+ } /* end else */
+ } /* end if */
+ if(pass)
/* first fill the cache with monster entryies via insertion */
-
- for (i = 0; i < 32; i++)
-
+ for(i = 0; i < 32; i++)
insert_entry(file_ptr, MONSTER_ENTRY_TYPE, i, H5C__NO_FLAGS_SET);
- }
-
- if(pass) {
+ if(pass)
if((cache_ptr->hits[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->misses[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->write_protects[MONSTER_ENTRY_TYPE] != 0) ||
@@ -35942,11 +36079,9 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster size entry stats in check_stats__smoke_check_1(1).";
- }
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
if((cache_ptr->total_ht_insertions != 32) ||
(cache_ptr->total_ht_deletions != 0) ||
(cache_ptr->successful_ht_searches != 0) ||
@@ -35971,18 +36106,15 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
(cache_ptr->entries_scanned_to_make_space != 0) ||
(cache_ptr->slist_scan_restarts != 0) ||
(cache_ptr->LRU_scan_restarts != 0) ||
- (cache_ptr->hash_bucket_scan_restarts != 0)) {
+ (cache_ptr->index_scan_restarts != 0)) {
pass = FALSE;
failure_mssg = "Unexpected cache stats in check_stats__smoke_check_1(1).";
- }
- }
+ } /* end if */
#if H5C_COLLECT_CACHE_ENTRY_STATS
- if(pass) {
-
+ if(pass)
/* Note that most entry level stats are only updated on entry eviction */
-
if((cache_ptr->max_accesses[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->min_accesses[MONSTER_ENTRY_TYPE] != 1000000) || /* initial value */
(cache_ptr->max_clears[MONSTER_ENTRY_TYPE] != 0) ||
@@ -35992,26 +36124,20 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster entry level stats in check_stats__smoke_check_1(1).";
- }
- }
+ } /* end if */
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
- if(pass) {
-
+ if(pass)
/* protect and unprotect each entry once. Note
* that all entries are already dirty, as they
* entered the cache via insertion
*/
-
- for (i = 0; i < 32; i++)
- {
+ for(i = 0; i < 32; i++) {
protect_entry(file_ptr, MONSTER_ENTRY_TYPE, i);
unprotect_entry(file_ptr, MONSTER_ENTRY_TYPE, i, H5C__NO_FLAGS_SET);
- }
- }
-
- if(pass) {
+ } /* end for */
+ if(pass)
if((cache_ptr->hits[MONSTER_ENTRY_TYPE] != 32) ||
(cache_ptr->misses[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->write_protects[MONSTER_ENTRY_TYPE] != 32) ||
@@ -36038,11 +36164,9 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster size entry stats in check_stats__smoke_check_1(2).";
- }
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
if((cache_ptr->total_ht_insertions != 32) ||
(cache_ptr->total_ht_deletions != 0) ||
(cache_ptr->successful_ht_searches != 32) ||
@@ -36067,18 +36191,15 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
(cache_ptr->entries_scanned_to_make_space != 0) ||
(cache_ptr->slist_scan_restarts != 0) ||
(cache_ptr->LRU_scan_restarts != 0) ||
- (cache_ptr->hash_bucket_scan_restarts != 0)) {
+ (cache_ptr->index_scan_restarts != 0)) {
pass = FALSE;
failure_mssg = "Unexpected cache stats in check_stats__smoke_check_1(2).";
- }
- }
+ } /* end if */
#if H5C_COLLECT_CACHE_ENTRY_STATS
- if(pass) {
-
+ if(pass)
/* Note that most entry level stats are only updated on entry eviction */
-
if((cache_ptr->max_accesses[MONSTER_ENTRY_TYPE] != 0) ||
(cache_ptr->min_accesses[MONSTER_ENTRY_TYPE] != 1000000) || /* initial value */
(cache_ptr->max_clears[MONSTER_ENTRY_TYPE] != 0) ||
@@ -36088,12 +36209,10 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster entry level stats in check_stats__smoke_check_1(2).";
- }
- }
+ } /* end if */
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
if(pass) {
-
/* protect and unprotect an entry that is not currently
* in the cache. Since the cache is full and all entries
* are dirty, this will force a flush of each entry, and
@@ -36101,10 +36220,9 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
*/
protect_entry(file_ptr, MONSTER_ENTRY_TYPE, 32);
unprotect_entry(file_ptr, MONSTER_ENTRY_TYPE, 32, H5C__DIRTIED_FLAG);
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
if((cache_ptr->hits[MONSTER_ENTRY_TYPE] != 32) ||
(cache_ptr->misses[MONSTER_ENTRY_TYPE] != 1) ||
(cache_ptr->write_protects[MONSTER_ENTRY_TYPE] != 33) ||
@@ -36131,11 +36249,9 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster size entry stats in check_stats__smoke_check_1(3).";
- }
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
if((cache_ptr->total_ht_insertions != 33) ||
(cache_ptr->total_ht_deletions != 1) ||
(cache_ptr->successful_ht_searches != 32) ||
@@ -36160,18 +36276,15 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
(cache_ptr->entries_scanned_to_make_space != 33) ||
(cache_ptr->slist_scan_restarts != 0) ||
(cache_ptr->LRU_scan_restarts != 0) ||
- (cache_ptr->hash_bucket_scan_restarts != 0)) {
+ (cache_ptr->index_scan_restarts != 0)) {
pass = FALSE;
failure_mssg = "Unexpected cache stats in check_stats__smoke_check_1(3).";
- }
- }
+ } /* end if */
#if H5C_COLLECT_CACHE_ENTRY_STATS
- if(pass) {
-
+ if(pass)
/* Note that most entry level stats are only updated on entry eviction */
-
if((cache_ptr->max_accesses[MONSTER_ENTRY_TYPE] != 1) ||
(cache_ptr->min_accesses[MONSTER_ENTRY_TYPE] != 1) ||
(cache_ptr->max_clears[MONSTER_ENTRY_TYPE] != 0) ||
@@ -36181,18 +36294,17 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster entry level stats in check_stats__smoke_check_1(3).";
- }
- }
+ } /* end if */
+
#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
if(pass) {
-
/* protect and unprotect dirty (MET, 1), and then flush destroy
* the cache.
*/
protect_entry(file_ptr, MONSTER_ENTRY_TYPE, 1);
unprotect_entry(file_ptr, MONSTER_ENTRY_TYPE, 1, H5C__DIRTIED_FLAG);
- }
+ } /* end if */
/* flush the cache to end the test and collect all entry stats */
@@ -36204,18 +36316,17 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Cache flush invalidate failed in check_stats__smoke_check_1()";
- }
+ } /* end if */
else if((cache_ptr->index_len != 0) ||
(cache_ptr->index_size != 0)) {
pass = FALSE;
failure_mssg = "Unexpected cache len/size after check_stats__smoke_check_1()";
- }
- }
-
- if(pass) {
+ } /* end else-if */
+ } /* end if */
+ if(pass)
if((cache_ptr->hits[MONSTER_ENTRY_TYPE] != 33) ||
(cache_ptr->misses[MONSTER_ENTRY_TYPE] != 1) ||
(cache_ptr->write_protects[MONSTER_ENTRY_TYPE] != 34) ||
@@ -36242,11 +36353,9 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster size entry stats in check_stats__smoke_check_1(4).";
- }
- }
-
- if(pass) {
+ } /* end if */
+ if(pass)
if((cache_ptr->total_ht_insertions != 33) ||
(cache_ptr->total_ht_deletions != 33) ||
(cache_ptr->successful_ht_searches != 33) ||
@@ -36271,18 +36380,15 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
(cache_ptr->entries_scanned_to_make_space != 33) ||
(cache_ptr->slist_scan_restarts != 0) ||
(cache_ptr->LRU_scan_restarts != 0) ||
- (cache_ptr->hash_bucket_scan_restarts != 0)) {
+ (cache_ptr->index_scan_restarts != 0)) {
pass = FALSE;
failure_mssg = "Unexpected cache stats in check_stats__smoke_check_1(4).";
- }
- }
+ } /* end if */
#if H5C_COLLECT_CACHE_ENTRY_STATS
- if(pass) {
-
+ if(pass)
/* Note that most entry level stats are only updated on entry eviction */
-
if((cache_ptr->max_accesses[MONSTER_ENTRY_TYPE] != 2) ||
(cache_ptr->min_accesses[MONSTER_ENTRY_TYPE] != 1) ||
(cache_ptr->max_clears[MONSTER_ENTRY_TYPE] != 0) ||
@@ -36292,20 +36398,16 @@ check_stats__smoke_check_1(H5F_t * file_ptr)
pass = FALSE;
failure_mssg = "Unexpected monster entry level stats in check_stats__smoke_check_1(4).";
- }
- }
-#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
-
- if(pass) {
+ } /* end if */
- reset_entries();
- }
+#endif /* H5C_COLLECT_CACHE_ENTRY_STATS */
- if(pass) {
+ if(pass)
+ reset_entries();
- /* reset cache min clean size to its expected value */
+ if(pass)
+ /* reset cache min clean size to its expected value */
cache_ptr->min_clean_size = (1 * 1024 * 1024);
- }
return;
@@ -36329,6 +36431,7 @@ int
main(void)
{
unsigned nerrs = 0;
+ unsigned paged;
int express_test;
H5open();
@@ -36346,61 +36449,75 @@ main(void)
return EXIT_FAILURE;
} /* end if */
- nerrs += smoke_check_1(express_test);
- nerrs += smoke_check_2(express_test);
- nerrs += smoke_check_3(express_test);
- nerrs += smoke_check_4(express_test);
- nerrs += smoke_check_5(express_test);
- nerrs += smoke_check_6(express_test);
- nerrs += smoke_check_7(express_test);
- nerrs += smoke_check_8(express_test);
- nerrs += smoke_check_9(express_test);
- nerrs += smoke_check_10(express_test);
-
- nerrs += write_permitted_check(express_test);
-
- nerrs += check_insert_entry();
- nerrs += check_flush_cache();
- nerrs += check_get_entry_status();
- nerrs += check_expunge_entry();
- nerrs += check_multiple_read_protect();
- nerrs += check_move_entry();
- nerrs += check_pin_protected_entry();
- nerrs += check_resize_entry();
- nerrs += check_evictions_enabled();
- nerrs += check_flush_protected_err();
- nerrs += check_destroy_pinned_err();
- nerrs += check_destroy_protected_err();
- nerrs += check_duplicate_insert_err();
- nerrs += check_double_pin_err();
- nerrs += check_double_unpin_err();
- nerrs += check_pin_entry_errs();
- nerrs += check_double_protect_err();
- nerrs += check_double_unprotect_err();
- nerrs += check_mark_entry_dirty_errs();
- nerrs += check_expunge_entry_errs();
- nerrs += check_move_entry_errs();
- nerrs += check_resize_entry_errs();
- nerrs += check_unprotect_ro_dirty_err();
- nerrs += check_protect_ro_rw_err();
- nerrs += check_protect_retries();
- nerrs += check_check_evictions_enabled_err();
- nerrs += check_auto_cache_resize(FALSE);
- nerrs += check_auto_cache_resize(TRUE);
- nerrs += check_auto_cache_resize_disable();
- nerrs += check_auto_cache_resize_epoch_markers();
- nerrs += check_auto_cache_resize_input_errs();
- nerrs += check_auto_cache_resize_aux_fcns();
- nerrs += check_metadata_blizzard_absence(TRUE);
- nerrs += check_metadata_blizzard_absence(FALSE);
- nerrs += check_flush_deps();
- nerrs += check_flush_deps_err();
- nerrs += check_flush_deps_order();
- nerrs += check_notify_cb();
- nerrs += check_metadata_cork(TRUE);
- nerrs += check_metadata_cork(FALSE);
- nerrs += check_entry_deletions_during_scans();
- nerrs += check_stats();
+ /* Test with paged aggregation enabled or not */
+ /* Each test will call setup_cache() which set up the file space strategy according to "paged" */
+ for(paged = FALSE; paged <= TRUE; paged++) {
+
+ if ( paged ) {
+
+ HDfprintf(stdout,
+ "\n\nRe-running tests with paged aggregation:\n");
+
+ if ( express_test > 0 )
+ HDfprintf(stdout, " Skipping smoke checks.\n");
+
+ HDfprintf(stdout, "\n");
+ }
+
+ nerrs += smoke_check_1(express_test, paged);
+ nerrs += smoke_check_2(express_test, paged);
+ nerrs += smoke_check_3(express_test, paged);
+ nerrs += smoke_check_4(express_test, paged);
+ nerrs += smoke_check_5(express_test, paged);
+ nerrs += smoke_check_6(express_test, paged);
+ nerrs += smoke_check_7(express_test, paged);
+ nerrs += smoke_check_8(express_test, paged);
+ nerrs += smoke_check_9(express_test, paged);
+ nerrs += smoke_check_10(express_test, paged);
+ nerrs += write_permitted_check(express_test, paged);
+ nerrs += check_insert_entry(paged);
+ nerrs += check_flush_cache(paged);
+ nerrs += check_get_entry_status(paged);
+ nerrs += check_expunge_entry(paged);
+ nerrs += check_multiple_read_protect(paged);
+ nerrs += check_move_entry(paged);
+ nerrs += check_pin_protected_entry(paged);
+ nerrs += check_resize_entry(paged);
+ nerrs += check_evictions_enabled(paged);
+ nerrs += check_flush_protected_err(paged);
+ nerrs += check_destroy_pinned_err(paged);
+ nerrs += check_destroy_protected_err(paged);
+ nerrs += check_duplicate_insert_err(paged);
+ nerrs += check_double_pin_err(paged);
+ nerrs += check_double_unpin_err(paged);
+ nerrs += check_pin_entry_errs(paged);
+ nerrs += check_double_protect_err(paged);
+ nerrs += check_double_unprotect_err(paged);
+ nerrs += check_mark_entry_dirty_errs(paged);
+ nerrs += check_expunge_entry_errs(paged);
+ nerrs += check_move_entry_errs(paged);
+ nerrs += check_resize_entry_errs(paged);
+ nerrs += check_unprotect_ro_dirty_err(paged);
+ nerrs += check_protect_ro_rw_err(paged);
+ nerrs += check_protect_retries(paged);
+ nerrs += check_check_evictions_enabled_err(paged);
+ nerrs += check_auto_cache_resize(FALSE, paged);
+ nerrs += check_auto_cache_resize(TRUE, paged);
+ nerrs += check_auto_cache_resize_disable(paged);
+ nerrs += check_auto_cache_resize_epoch_markers(paged);
+ nerrs += check_auto_cache_resize_input_errs(paged);
+ nerrs += check_auto_cache_resize_aux_fcns(paged);
+ nerrs += check_metadata_blizzard_absence(TRUE, paged);
+ nerrs += check_metadata_blizzard_absence(FALSE, paged);
+ nerrs += check_flush_deps(paged);
+ nerrs += check_flush_deps_err(paged);
+ nerrs += check_flush_deps_order(paged);
+ nerrs += check_notify_cb(paged);
+ nerrs += check_metadata_cork(TRUE, paged);
+ nerrs += check_metadata_cork(FALSE, paged);
+ nerrs += check_entry_deletions_during_scans(paged);
+ nerrs += check_stats(paged);
+ } /* end for */
/* can't fail, returns void */
free_entry_arrays();
diff --git a/test/cache_api.c b/test/cache_api.c
index 1d3c9cf..7c0ffa3 100644
--- a/test/cache_api.c
+++ b/test/cache_api.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: John Mainzer
@@ -20,9 +18,6 @@
* with the cache implemented in H5C.c
*/
-#include "h5test.h"
-#include "H5Iprivate.h"
-#include "H5ACprivate.h"
#include "cache_common.h"
/* extern declarations */
@@ -33,12 +28,12 @@
/* private function declarations: */
-static hbool_t check_fapl_mdc_api_calls(void);
-static hbool_t check_file_mdc_api_calls(void);
-static hbool_t mdc_api_call_smoke_check(int express_test);
+static hbool_t check_fapl_mdc_api_calls(unsigned paged, hid_t fcpl_id);
+static hbool_t check_file_mdc_api_calls(unsigned paged, hid_t fcpl_id);
+static hbool_t mdc_api_call_smoke_check(int express_test, unsigned paged, hid_t fcpl_id);
static H5AC_cache_config_t * init_invalid_configs(void);
static hbool_t check_fapl_mdc_api_errs(void);
-static hbool_t check_file_mdc_api_errs(void);
+static hbool_t check_file_mdc_api_errs(unsigned paged, hid_t fcpl_id);
@@ -67,7 +62,7 @@ static hbool_t check_file_mdc_api_errs(void);
*-------------------------------------------------------------------------
*/
static hbool_t
-check_fapl_mdc_api_calls(void)
+check_fapl_mdc_api_calls(unsigned paged, hid_t fcpl_id)
{
char filename[512];
herr_t result;
@@ -116,7 +111,10 @@ check_fapl_mdc_api_calls(void)
H5C_auto_size_ctl_t default_auto_size_ctl;
H5C_auto_size_ctl_t mod_auto_size_ctl;
- TESTING("MDC/FAPL related API calls");
+ if(paged)
+ TESTING("MDC/FAPL related API calls for paged aggregation strategy")
+ else
+ TESTING("MDC/FAPL related API calls")
pass = TRUE;
@@ -220,7 +218,7 @@ check_fapl_mdc_api_calls(void)
/* create the file using the default FAPL */
if ( pass ) {
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
if ( file_id < 0 ) {
@@ -368,7 +366,7 @@ check_fapl_mdc_api_calls(void)
/* create the file using the modified FAPL */
if ( pass ) {
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id);
if ( file_id < 0 ) {
@@ -520,7 +518,7 @@ check_fapl_mdc_api_calls(void)
*-------------------------------------------------------------------------
*/
static hbool_t
-check_file_mdc_api_calls(void)
+check_file_mdc_api_calls(unsigned paged, hid_t fcpl_id)
{
char filename[512];
hid_t file_id = -1;
@@ -672,7 +670,10 @@ check_file_mdc_api_calls(void)
H5AC__DEFAULT_METADATA_WRITE_STRATEGY
};
- TESTING("MDC/FILE related API calls");
+ if(paged)
+ TESTING("MDC/FILE related API calls for paged aggregation strategy")
+ else
+ TESTING("MDC/FILE related API calls")
pass = TRUE;
@@ -698,7 +699,7 @@ check_file_mdc_api_calls(void)
/* create the file using the default FAPL */
if ( pass ) {
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
if ( file_id < 0 ) {
@@ -877,7 +878,7 @@ check_file_mdc_api_calls(void)
#define NUM_RANDOM_ACCESSES 200000
static hbool_t
-mdc_api_call_smoke_check(int express_test)
+mdc_api_call_smoke_check(int express_test, unsigned paged, hid_t fcpl_id)
{
char filename[512];
hbool_t valid_chunk;
@@ -1006,7 +1007,10 @@ mdc_api_call_smoke_check(int express_test)
H5AC__DEFAULT_METADATA_WRITE_STRATEGY
};
- TESTING("MDC API smoke check");
+ if(paged)
+ TESTING("MDC API smoke check for paged aggregation strategy")
+ else
+ TESTING("MDC API smoke check")
pass = TRUE;
@@ -1041,7 +1045,7 @@ mdc_api_call_smoke_check(int express_test)
/* create the file using the default FAPL */
if ( pass ) {
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
if ( file_id < 0 ) {
@@ -1924,7 +1928,7 @@ check_fapl_mdc_api_errs(void)
*-------------------------------------------------------------------------
*/
static hbool_t
-check_file_mdc_api_errs(void)
+check_file_mdc_api_errs(unsigned paged, hid_t fcpl_id)
{
char filename[512];
static char msg[128];
@@ -1940,7 +1944,10 @@ check_file_mdc_api_errs(void)
H5AC_cache_config_t default_config = H5AC__DEFAULT_CACHE_CONFIG;
H5AC_cache_config_t scratch;
- TESTING("MDC/FILE related API input errors");
+ if(paged)
+ TESTING("MDC/FILE related API input errors for paged aggregation strategy")
+ else
+ TESTING("MDC/FILE related API input errors")
pass = TRUE;
@@ -1971,7 +1978,7 @@ check_file_mdc_api_errs(void)
HDfprintf(stdout, "%s: calling H5Fcreate().\n", FUNC);
}
- file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT);
if ( file_id < 0 ) {
@@ -2270,6 +2277,9 @@ main(void)
{
unsigned nerrs = 0;
int express_test;
+ hid_t fcpl_id = -1;
+ hid_t fcpl2_id = -1;
+ unsigned paged;
H5open();
@@ -2284,52 +2294,67 @@ main(void)
/* Initialize invalid configurations.
*/
invalid_configs = init_invalid_configs();
-
if ( NULL == invalid_configs ) {
-
failure_mssg = "Unable to allocate memory for invalid configs.";
HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", FUNC, failure_mssg);
return EXIT_FAILURE;
- }
+ } /* end if */
- if ( !check_fapl_mdc_api_calls() ) {
+ if((fcpl_id = H5Pcreate(H5P_FILE_CREATE)) < 0) {
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.\n";
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", FUNC, failure_mssg);
+ return EXIT_FAILURE;
+ } /* end if */
- nerrs += 1;
- }
+ /* Set file space strategy to default or paged aggregation strategy */
+ if((fcpl2_id = H5Pcopy(fcpl_id)) < 0) {
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.\n";
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", FUNC, failure_mssg);
+ return EXIT_FAILURE;
+ } /* end if */
- if ( !check_file_mdc_api_calls() ) {
+ if(H5Pset_file_space_strategy(fcpl2_id, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)1) < 0) {
+ failure_mssg = "H5Pset_file_space_strategy() failed.\n";
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", FUNC, failure_mssg);
+ return EXIT_FAILURE;
+ } /* end if */
- nerrs += 1;
- }
+ /* Test with paged aggregation enabled or not */
+ /* The "my_fcpl" passed to each test has the paged or non-paged strategy set up accordinly */
+ for(paged = FALSE; paged <= TRUE; paged++) {
+ hid_t my_fcpl = fcpl_id;
- if ( !mdc_api_call_smoke_check(express_test) ) {
+ if(paged)
+ my_fcpl = fcpl2_id;
- nerrs += 1;
- }
+ if(!check_fapl_mdc_api_calls(paged, my_fcpl))
+ nerrs += 1;
- if ( !check_fapl_mdc_api_errs() ) {
+ if(!check_file_mdc_api_calls(paged, my_fcpl))
+ nerrs += 1;
- nerrs += 1;
- }
+ if(!mdc_api_call_smoke_check(express_test, paged, my_fcpl))
+ nerrs += 1;
- if ( !check_file_mdc_api_errs() ) {
+ if(!check_file_mdc_api_errs(paged, my_fcpl))
+ nerrs += 1;
+ } /* end for paged */
+ if(!check_fapl_mdc_api_errs())
nerrs += 1;
- }
-
- if ( invalid_configs ) {
+ if(invalid_configs)
HDfree(invalid_configs);
- }
-
- if ( nerrs > 0 ) {
+ if(H5Pclose(fcpl_id) < 0 ) {
+ failure_mssg = "H5Pclose() failed.\n";
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n", FUNC, failure_mssg);
return EXIT_FAILURE;
+ } /* end if */
- } else {
-
+ if(nerrs > 0)
+ return EXIT_FAILURE;
+ else
return EXIT_SUCCESS;
- }
-
} /* main() */
diff --git a/test/cache_common.c b/test/cache_common.c
index f387f05..69bc26a 100644
--- a/test/cache_common.c
+++ b/test/cache_common.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: John Mainzer
@@ -19,9 +17,6 @@
* This file contains common code for tests of the cache
* implemented in H5C.c
*/
-#include "h5test.h"
-#include "H5Cprivate.h"
-#include "H5Iprivate.h"
#include "H5MFprivate.h"
#include "H5MMprivate.h"
#include "cache_common.h"
@@ -42,6 +37,12 @@ hid_t saved_fapl_id = H5P_DEFAULT; /* store the fapl id here between
* close.
*/
+hid_t saved_fcpl_id = H5P_DEFAULT; /* store the fcpl id here between
+ * cache setup and takedown. Note
+ * that if saved_fcpl_id == H5P_DEFAULT,
+ * we assume that there is no fcpl to
+ * close.
+ */
hid_t saved_fid = -1; /* store the file id here between cache setup
* and takedown.
*/
@@ -128,37 +129,37 @@ static herr_t monster_image_len(const void *thing, size_t *image_len_ptr);
static herr_t variable_image_len(const void *thing, size_t *image_len_ptr);
static herr_t notify_image_len(const void *thing, size_t *image_len_ptr);
-static herr_t pico_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t pico_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t nano_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t nano_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t micro_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t micro_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t tiny_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t tiny_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t small_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t small_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t medium_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t medium_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t large_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t large_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t huge_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t huge_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t monster_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t monster_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t variable_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t variable_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
-static herr_t notify_pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t notify_pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr,
size_t *new_len_ptr, unsigned *flags_ptr);
@@ -211,7 +212,7 @@ static herr_t get_final_load_size(const void *image, size_t image_len,
static void *deserialize(const void *image_ptr, size_t len, void *udata_ptr,
hbool_t *dirty_ptr, int32_t entry_type);
static herr_t image_len(const void *thing, size_t *image_len_ptr, int32_t entry_type);
-static herr_t pre_serialize(const H5F_t *f, hid_t dxpl_id, void *thing,
+static herr_t pre_serialize(H5F_t *f, hid_t dxpl_id, void *thing,
haddr_t addr, size_t len, haddr_t *new_addr_ptr, size_t *new_len_ptr,
unsigned *flags_ptr);
static herr_t serialize(const H5F_t *f, void *image_ptr, size_t len,
@@ -288,27 +289,9 @@ const haddr_t alt_base_addrs[NUMBER_OF_ENTRY_TYPES] =
NOTIFY_ALT_BASE_ADDR
};
-const char *entry_type_names[NUMBER_OF_ENTRY_TYPES] =
-{
- "pico entries -- 1 B",
- "nano entries -- 4 B",
- "micro entries -- 16 B",
- "tiny entries -- 64 B",
- "small entries -- 256 B",
- "medium entries -- 1 KB",
- "large entries -- 4 KB",
- "huge entries -- 16 KB",
- "monster entries -- 64 KB",
- "variable entries -- 1B - 10KB",
- "notify entries -- 1B"
-};
-
-/* callback table declaration */
-
-const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
-{
- {
+/* Callback classes */
+static const H5C_class_t pico_class[1] = {{
PICO_ENTRY_TYPE,
"pico_entry",
H5FD_MEM_DEFAULT,
@@ -323,8 +306,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
pico_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t nano_class[1] = {{
NANO_ENTRY_TYPE,
"nano_entry",
H5FD_MEM_DEFAULT,
@@ -339,8 +323,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
nano_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t micro_class[1] = {{
MICRO_ENTRY_TYPE,
"micro_entry",
H5FD_MEM_DEFAULT,
@@ -355,8 +340,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
micro_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t tiny_class[1] = {{
TINY_ENTRY_TYPE,
"tiny_entry",
H5FD_MEM_DEFAULT,
@@ -371,8 +357,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
tiny_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t small_class[1] = {{
SMALL_ENTRY_TYPE,
"small_entry",
H5FD_MEM_DEFAULT,
@@ -387,8 +374,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
small_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t medium_class[1] = {{
MEDIUM_ENTRY_TYPE,
"medium_entry",
H5FD_MEM_DEFAULT,
@@ -403,8 +391,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
medium_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t large_class[1] = {{
LARGE_ENTRY_TYPE,
"large_entry",
H5FD_MEM_DEFAULT,
@@ -419,8 +408,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
large_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t huge_class[1] = {{
HUGE_ENTRY_TYPE,
"huge_entry",
H5FD_MEM_DEFAULT,
@@ -435,8 +425,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
huge_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t monster_class[1] = {{
MONSTER_ENTRY_TYPE,
"monster_entry",
H5FD_MEM_DEFAULT,
@@ -451,8 +442,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
monster_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t variable_class[1] = {{
VARIABLE_ENTRY_TYPE,
"variable_entry",
H5FD_MEM_DEFAULT,
@@ -467,8 +459,9 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
NULL,
variable_free_icr,
NULL,
- },
- {
+}};
+
+static const H5C_class_t notify_class[1] = {{
NOTIFY_ENTRY_TYPE,
"notify_entry",
H5FD_MEM_DEFAULT,
@@ -483,7 +476,22 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
notify_notify,
notify_free_icr,
NULL,
- }
+}};
+
+/* callback table declaration */
+
+const H5C_class_t *types[NUMBER_OF_ENTRY_TYPES] = {
+ pico_class,
+ nano_class,
+ micro_class,
+ tiny_class,
+ small_class,
+ medium_class,
+ large_class,
+ huge_class,
+ monster_class,
+ variable_class,
+ notify_class
};
/* address translation functions: */
@@ -1114,7 +1122,7 @@ notify_image_len(const void *thing, size_t *image_length)
*-------------------------------------------------------------------------
*/
herr_t
-pre_serialize(const H5F_t *f,
+pre_serialize(H5F_t *f,
hid_t H5_ATTR_UNUSED dxpl_id,
void *thing,
haddr_t addr,
@@ -1199,7 +1207,7 @@ pre_serialize(const H5F_t *f,
} /* pre_serialize() */
herr_t
-pico_pre_serialize(const H5F_t *f,
+pico_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1213,7 +1221,7 @@ pico_pre_serialize(const H5F_t *f,
}
herr_t
-nano_pre_serialize(const H5F_t *f,
+nano_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1227,7 +1235,7 @@ nano_pre_serialize(const H5F_t *f,
}
herr_t
-micro_pre_serialize(const H5F_t *f,
+micro_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1241,7 +1249,7 @@ micro_pre_serialize(const H5F_t *f,
}
herr_t
-tiny_pre_serialize(const H5F_t *f,
+tiny_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1255,7 +1263,7 @@ tiny_pre_serialize(const H5F_t *f,
}
herr_t
-small_pre_serialize(const H5F_t *f,
+small_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1269,7 +1277,7 @@ small_pre_serialize(const H5F_t *f,
}
herr_t
-medium_pre_serialize(const H5F_t *f,
+medium_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1283,7 +1291,7 @@ medium_pre_serialize(const H5F_t *f,
}
herr_t
-large_pre_serialize(const H5F_t *f,
+large_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1297,7 +1305,7 @@ large_pre_serialize(const H5F_t *f,
}
herr_t
-huge_pre_serialize(const H5F_t *f,
+huge_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1311,7 +1319,7 @@ huge_pre_serialize(const H5F_t *f,
}
herr_t
-monster_pre_serialize(const H5F_t *f,
+monster_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1325,7 +1333,7 @@ monster_pre_serialize(const H5F_t *f,
}
herr_t
-variable_pre_serialize(const H5F_t *f,
+variable_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1339,7 +1347,7 @@ variable_pre_serialize(const H5F_t *f,
}
herr_t
-notify_pre_serialize(const H5F_t *f,
+notify_pre_serialize(H5F_t *f,
hid_t dxpl_id,
void *thing,
haddr_t addr,
@@ -1565,6 +1573,8 @@ notify(H5C_notify_action_t action, void *thing, int32_t entry_type)
case H5C_NOTIFY_ACTION_ENTRY_CLEANED:
case H5C_NOTIFY_ACTION_CHILD_DIRTIED:
case H5C_NOTIFY_ACTION_CHILD_CLEANED:
+ case H5C_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ case H5C_NOTIFY_ACTION_CHILD_SERIALIZED:
/* do nothing */
break;
@@ -1626,9 +1636,12 @@ free_icr(test_entry_t *entry, int32_t entry_type)
HDassert(entry->cache_ptr->magic == H5C__H5C_T_MAGIC);
HDassert((entry->header.destroy_in_progress) ||
(entry->header.addr == entry->addr));
+ HDassert(entry->header.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
HDassert(entry->header.size == entry->size);
HDassert((entry->type == VARIABLE_ENTRY_TYPE) ||
(entry->size == entry_sizes[entry->type]));
+ HDassert(entry->header.tl_next == NULL);
+ HDassert(entry->header.tl_prev == NULL);
if(entry->num_pins > 0) {
int i;
@@ -3159,7 +3172,8 @@ verify_unprotected(void)
H5F_t *
setup_cache(size_t max_cache_size,
- size_t min_clean_size)
+ size_t min_clean_size,
+ unsigned paged)
{
char filename[512];
hbool_t show_progress = FALSE;
@@ -3171,6 +3185,7 @@ setup_cache(size_t max_cache_size,
H5F_t * ret_val = NULL;
haddr_t actual_base_addr;
hid_t fapl_id = H5P_DEFAULT;
+ hid_t fcpl_id = H5P_DEFAULT;
if(show_progress) /* 1 */
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
@@ -3178,6 +3193,36 @@ setup_cache(size_t max_cache_size,
saved_fid = -1;
+ if(pass) {
+ if((fcpl_id = H5Pcreate(H5P_FILE_CREATE)) == FAIL) {
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.\n";
+ }
+ }
+
+ if(pass && paged) {
+ /* Set up paged aggregation strategy */
+ if(H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)1) == FAIL) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_file_space_strategy() failed.\n";
+ H5Pclose(fcpl_id);
+ fcpl_id = H5P_DEFAULT;
+ }
+ }
+
+ if(pass && paged) {
+ /* Set up file space page size to BASE_ADDR */
+ if(H5Pset_file_space_page_size(fcpl_id, (hsize_t)BASE_ADDR) == FAIL) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_file_space_page_size() failed.\n";
+ H5Pclose(fcpl_id);
+ fcpl_id = H5P_DEFAULT;
+ }
+ }
+
+ if(pass)
+ saved_fcpl_id = fcpl_id;
+
/* setup the file name */
if(pass) {
if(NULL == h5_fixname(FILENAME[0], H5P_DEFAULT, filename, sizeof(filename))) {
@@ -3201,7 +3246,7 @@ setup_cache(size_t max_cache_size,
pass = FALSE;
failure_mssg = "H5P_set_fapl_core() failed.\n";
}
- else if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id)) < 0) {
+ else if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id)) < 0) {
core_file_driver_failed = TRUE;
if(verbose)
@@ -3220,8 +3265,8 @@ setup_cache(size_t max_cache_size,
* If this fails, we are cooked.
*/
if(pass && fid < 0) {
- fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
- saved_fid = fid;
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl_id, fapl_id);
+ saved_fid = fid;
if(fid < 0) {
pass = FALSE;
@@ -3229,8 +3274,8 @@ setup_cache(size_t max_cache_size,
if(verbose)
HDfprintf(stdout, "%s: H5Fcreate() failed.\n", FUNC);
- }
- }
+ } /* end if */
+ } /* end if */
if(show_progress) /* 4 */
HDfprintf(stdout, "%s() - %0d -- pass = %d\n",
@@ -3298,7 +3343,7 @@ setup_cache(size_t max_cache_size,
cache_ptr = H5C_create(max_cache_size,
min_clean_size,
(NUMBER_OF_ENTRY_TYPES - 1),
- (const char **)entry_type_names,
+ types,
check_write_permitted,
TRUE,
NULL,
@@ -3408,6 +3453,12 @@ takedown_cache(H5F_t * file_ptr,
H5C_stats(cache_ptr, "test cache", dump_detailed_stats);
}
+ if ( H5C_prep_for_file_close(file_ptr, H5P_DATASET_XFER_DEFAULT) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected failure of prep for file close.\n";
+ }
+
flush_cache(file_ptr, TRUE, FALSE, FALSE);
H5C_dest(file_ptr, H5AC_ind_read_dxpl_id);
@@ -3426,6 +3477,11 @@ takedown_cache(H5F_t * file_ptr,
saved_fapl_id = H5P_DEFAULT;
}
+ if ( saved_fcpl_id != H5P_DEFAULT ) {
+ H5Pclose(saved_fcpl_id);
+ saved_fcpl_id = H5P_DEFAULT;
+ }
+
if ( saved_fid != -1 ) {
if ( H5F_addr_defined(saved_actual_base_addr) ) {
@@ -3525,7 +3581,7 @@ expunge_entry(H5F_t * file_ptr,
HDassert( ! ( entry_ptr->is_pinned ) );
result = H5C_expunge_entry(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[type]), entry_ptr->addr, H5C__NO_FLAGS_SET);
+ types[type], entry_ptr->addr, H5C__NO_FLAGS_SET);
if ( result < 0 ) {
@@ -3746,11 +3802,11 @@ insert_entry(H5F_t * file_ptr,
}
result = H5C_insert_entry(file_ptr, xfer,
- &(types[type]), entry_ptr->addr, (void *)entry_ptr, flags);
+ types[type], entry_ptr->addr, (void *)entry_ptr, flags);
if ( ( result < 0 ) ||
( entry_ptr->header.is_protected ) ||
- ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->header.type != types[type] ) ||
( entry_ptr->size != entry_ptr->header.size ) ||
( entry_ptr->addr != entry_ptr->header.addr ) ) {
@@ -3763,8 +3819,8 @@ insert_entry(H5F_t * file_ptr,
HDfprintf(stdout, "entry_ptr->header.is_protected = %d\n",
(int)(entry_ptr->header.is_protected));
HDfprintf(stdout,
- "entry_ptr->header.type != &(types[type]) = %d\n",
- (int)(entry_ptr->header.type != &(types[type])));
+ "entry_ptr->header.type != types[type] = %d\n",
+ (int)(entry_ptr->header.type != types[type]));
HDfprintf(stdout,
"entry_ptr->size != entry_ptr->header.size = %d\n",
(int)(entry_ptr->size != entry_ptr->header.size));
@@ -3847,7 +3903,7 @@ mark_entry_dirty(int32_t type,
( !entry_ptr->header.is_protected && !entry_ptr->header.is_pinned ) ||
( entry_ptr->header.is_protected && !entry_ptr->header.dirtied ) ||
( !entry_ptr->header.is_protected && !entry_ptr->header.is_dirty ) ||
- ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->header.type != types[type] ) ||
( entry_ptr->size != entry_ptr->header.size ) ||
( entry_ptr->addr != entry_ptr->header.addr ) ) {
@@ -3940,7 +3996,7 @@ move_entry(H5C_t * cache_ptr,
mark_flush_dep_dirty(entry_ptr);
entry_ptr->action = TEST_ENTRY_ACTION_MOVE;
- result = H5C_move_entry(cache_ptr, &(types[type]), old_addr, new_addr);
+ result = H5C_move_entry(cache_ptr, types[type], old_addr, new_addr);
entry_ptr->action = TEST_ENTRY_ACTION_NUL;
}
@@ -4019,12 +4075,12 @@ protect_entry(H5F_t * file_ptr, int32_t type, int32_t idx)
} /* end if */
cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, xfer,
- &(types[type]), entry_ptr->addr, &entry_ptr->addr,
+ types[type], entry_ptr->addr, &entry_ptr->addr,
H5C__NO_FLAGS_SET);
if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
( !(entry_ptr->header.is_protected) ) ||
- ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->header.type != types[type] ) ||
( entry_ptr->size != entry_ptr->header.size ) ||
( entry_ptr->addr != entry_ptr->header.addr ) ) {
@@ -4041,8 +4097,8 @@ protect_entry(H5F_t * file_ptr, int32_t type, int32_t idx)
HDfprintf(stdout, "entry_ptr->header.is_protected = %d\n",
(int)(entry_ptr->header.is_protected));
HDfprintf(stdout,
- "( entry_ptr->header.type != &(types[type]) ) = %d\n",
- (int)( entry_ptr->header.type != &(types[type]) ));
+ "( entry_ptr->header.type != types[type] ) = %d\n",
+ (int)( entry_ptr->header.type != types[type] ));
HDfprintf(stdout,
"entry_ptr->size = %d, entry_ptr->header.size = %d\n",
(int)(entry_ptr->size), (int)(entry_ptr->header.size));
@@ -4123,13 +4179,13 @@ protect_entry_ro(H5F_t * file_ptr,
( entry_ptr->ro_ref_count > 0 ) ) );
cache_entry_ptr = (H5C_cache_entry_t *)H5C_protect(file_ptr, H5AC_ind_read_dxpl_id,
- &(types[type]), entry_ptr->addr, &entry_ptr->addr, H5C__READ_ONLY_FLAG);
+ types[type], entry_ptr->addr, &entry_ptr->addr, H5C__READ_ONLY_FLAG);
if ( ( cache_entry_ptr != (void *)entry_ptr ) ||
( !(entry_ptr->header.is_protected) ) ||
( !(entry_ptr->header.is_read_only) ) ||
( entry_ptr->header.ro_ref_count <= 0 ) ||
- ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->header.type != types[type] ) ||
( entry_ptr->size != entry_ptr->header.size ) ||
( entry_ptr->addr != entry_ptr->header.addr ) ) {
@@ -4260,7 +4316,7 @@ unpin_entry(int32_t type,
if ( ( result < 0 ) ||
( entry_ptr->header.pinned_from_client ) ||
( entry_ptr->header.is_pinned && !entry_ptr->header.pinned_from_cache ) ||
- ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->header.type != types[type] ) ||
( entry_ptr->size != entry_ptr->header.size ) ||
( entry_ptr->addr != entry_ptr->header.addr ) ) {
@@ -4345,7 +4401,7 @@ unprotect_entry(H5F_t * file_ptr,
( ( entry_ptr->header.is_protected ) &&
( ( ! ( entry_ptr->is_read_only ) ) ||
( entry_ptr->ro_ref_count <= 0 ) ) ) ||
- ( entry_ptr->header.type != &(types[type]) ) ||
+ ( entry_ptr->header.type != types[type] ) ||
( entry_ptr->size != entry_ptr->header.size ) ||
( entry_ptr->addr != entry_ptr->header.addr ) ) {
@@ -6120,7 +6176,7 @@ check_and_validate_cache_size(hid_t file_id,
size_t min_clean_size;
size_t expected_cur_size;
size_t cur_size;
- int32_t expected_cur_num_entries;
+ uint32_t expected_cur_num_entries;
int cur_num_entries;
H5F_t * file_ptr = NULL;
H5C_t * cache_ptr = NULL;
@@ -6426,7 +6482,7 @@ dump_LRU(H5F_t * file_ptr)
entry_ptr = cache_ptr->LRU_head_ptr;
HDfprintf(stdout,
- "\n\nIndex len/size/clean size/dirty size = %d/%lld/%lld/%lld\n",
+ "\n\nIndex len/size/clean size/dirty size = %u/%lld/%lld/%lld\n",
cache_ptr->index_len, (long long)(cache_ptr->index_size),
(long long)(cache_ptr->clean_index_size),
(long long)(cache_ptr->dirty_index_size));
diff --git a/test/cache_common.h b/test/cache_common.h
index 1ab04cb..8999e44 100644
--- a/test/cache_common.h
+++ b/test/cache_common.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: John Mainzer
@@ -384,7 +382,7 @@ if ( ( (cache_ptr) == NULL ) || \
HDfprintf(stdout, "Pre HT search SC failed.\n"); \
}
-#define H5C_TEST__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k) \
+#define H5C_TEST__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k) \
if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->magic != H5C__H5C_T_MAGIC ) || \
( (cache_ptr)->index_len < 1 ) || \
@@ -392,7 +390,6 @@ if ( ( (cache_ptr) == NULL ) || \
( (cache_ptr)->index_size < (entry_ptr)->size ) || \
( (cache_ptr)->index_size != \
((cache_ptr)->clean_index_size + (cache_ptr)->dirty_index_size) ) || \
- ( H5F_addr_ne((entry_ptr)->addr, (Addr)) ) || \
( (entry_ptr)->size <= 0 ) || \
( ((cache_ptr)->index)[k] == NULL ) || \
( ( ((cache_ptr)->index)[k] != (entry_ptr) ) && \
@@ -416,32 +413,29 @@ if ( ( (cache_ptr) == NULL ) || \
#define H5C_TEST__SEARCH_INDEX(cache_ptr, Addr, entry_ptr) \
{ \
int k; \
- int depth = 0; \
H5C_TEST__PRE_HT_SEARCH_SC(cache_ptr, Addr) \
k = H5C__HASH_FCN(Addr); \
entry_ptr = ((cache_ptr)->index)[k]; \
- while ( ( entry_ptr ) && ( H5F_addr_ne(Addr, (entry_ptr)->addr) ) ) \
+ while ( entry_ptr ) \
{ \
- (entry_ptr) = (entry_ptr)->ht_next; \
- (depth)++; \
- } \
- if ( entry_ptr ) \
- { \
- H5C_TEST__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, Addr, k) \
- if ( entry_ptr != ((cache_ptr)->index)[k] ) \
+ if ( H5F_addr_eq(Addr, (entry_ptr)->addr) ) \
{ \
- if ( (entry_ptr)->ht_next ) \
+ H5C_TEST__POST_SUC_HT_SEARCH_SC(cache_ptr, entry_ptr, k) \
+ if ( entry_ptr != ((cache_ptr)->index)[k] ) \
{ \
- (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ if ( (entry_ptr)->ht_next ) \
+ (entry_ptr)->ht_next->ht_prev = (entry_ptr)->ht_prev; \
+ HDassert( (entry_ptr)->ht_prev != NULL ); \
+ (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
+ ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
+ (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
+ (entry_ptr)->ht_prev = NULL; \
+ ((cache_ptr)->index)[k] = (entry_ptr); \
+ H5C_TEST__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k) \
} \
- HDassert( (entry_ptr)->ht_prev != NULL ); \
- (entry_ptr)->ht_prev->ht_next = (entry_ptr)->ht_next; \
- ((cache_ptr)->index)[k]->ht_prev = (entry_ptr); \
- (entry_ptr)->ht_next = ((cache_ptr)->index)[k]; \
- (entry_ptr)->ht_prev = NULL; \
- ((cache_ptr)->index)[k] = (entry_ptr); \
- H5C_TEST__POST_HT_SHIFT_TO_FRONT(cache_ptr, entry_ptr, k) \
+ break; \
} \
+ (entry_ptr) = (entry_ptr)->ht_next; \
} \
}
@@ -557,11 +551,10 @@ H5TEST_DLLVAR const int32_t max_indices[NUMBER_OF_ENTRY_TYPES];
H5TEST_DLLVAR const size_t entry_sizes[NUMBER_OF_ENTRY_TYPES];
H5TEST_DLLVAR const haddr_t base_addrs[NUMBER_OF_ENTRY_TYPES];
H5TEST_DLLVAR const haddr_t alt_base_addrs[NUMBER_OF_ENTRY_TYPES];
-H5TEST_DLLVAR const char * entry_type_names[NUMBER_OF_ENTRY_TYPES];
/* callback table extern */
-H5TEST_DLLVAR const H5C_class_t types[NUMBER_OF_ENTRY_TYPES];
+H5TEST_DLLVAR const H5C_class_t *types[NUMBER_OF_ENTRY_TYPES];
#ifdef __cplusplus
@@ -648,7 +641,7 @@ H5TEST_DLL void resize_entry(H5F_t * file_ptr,
size_t new_size,
hbool_t in_cache);
-H5TEST_DLL H5F_t *setup_cache(size_t max_cache_size, size_t min_clean_size);
+H5TEST_DLL H5F_t *setup_cache(size_t max_cache_size, size_t min_clean_size, unsigned paged);
H5TEST_DLL void row_major_scan_forward(H5F_t * file_ptr,
int32_t max_index,
diff --git a/test/cache_image.c b/test/cache_image.c
new file mode 100644
index 0000000..58b0b8f
--- /dev/null
+++ b/test/cache_image.c
@@ -0,0 +1,8081 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 7/13/15
+ *
+ * This file contains tests specific to the cache image
+ * feature implemented in H5C.c
+ */
+#include "cache_common.h"
+#include "genall5.h"
+
+/* global variable declarations: */
+
+
+const char *FILENAMES[] = {
+ "cache_image_test",
+ NULL
+};
+
+/* local utility function declarations */
+static void create_datasets(hid_t file_id, int min_dset, int max_dset);
+static void delete_datasets(hid_t file_id, int min_dset, int max_dset);
+static void open_hdf5_file(hbool_t create_file, hbool_t mdci_sbem_expected,
+ hbool_t read_only, hbool_t set_mdci_fapl, hbool_t config_fsm,
+ hbool_t set_eoc, const char *hdf_file_name, unsigned cache_image_flags,
+ hid_t *file_id_ptr, H5F_t **file_ptr_ptr, H5C_t **cache_ptr_ptr);
+static void attempt_swmr_open_hdf5_file(hbool_t create_file,
+ hbool_t set_mdci_fapl, const char *hdf_file_name);
+static void verify_datasets(hid_t file_id, int min_dset, int max_dset);
+
+/* local test function declarations */
+static unsigned check_cache_image_ctl_flow_1(void);
+static unsigned check_cache_image_ctl_flow_2(void);
+static unsigned check_cache_image_ctl_flow_3(void);
+static unsigned check_cache_image_ctl_flow_4(void);
+static unsigned check_cache_image_ctl_flow_5(void);
+static unsigned check_cache_image_ctl_flow_6(void);
+
+static unsigned cache_image_smoke_check_1(void);
+static unsigned cache_image_smoke_check_2(void);
+static unsigned cache_image_smoke_check_3(void);
+static unsigned cache_image_smoke_check_4(void);
+static unsigned cache_image_smoke_check_5(void);
+static unsigned cache_image_smoke_check_6(void);
+
+static unsigned cache_image_api_error_check_1(void);
+static unsigned cache_image_api_error_check_2(void);
+static unsigned cache_image_api_error_check_3(void);
+static unsigned cache_image_api_error_check_4(void);
+
+static unsigned get_free_sections_test(void);
+static unsigned evict_on_close_test(void);
+
+
+/****************************************************************************/
+/***************************** Utility Functions ****************************/
+/****************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: create_datasets()
+ *
+ * Purpose: If pass is TRUE on entry, create the specified datasets
+ * in the indicated file.
+ *
+ * Datasets and their contents must be well known, as we
+ * will verify that they contain the expected data later.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/15/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define CHUNK_SIZE 10
+#define DSET_SIZE (40 * CHUNK_SIZE)
+#define MAX_NUM_DSETS 256
+
+static void
+create_datasets(hid_t file_id, int min_dset, int max_dset)
+{
+ const char * fcn_name = "create_datasets()";
+ char dset_name[64];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
+ herr_t status;
+ hid_t dataspace_id = -1;
+ hid_t filespace_ids[MAX_NUM_DSETS];
+ hid_t memspace_id = -1;
+ hid_t dataset_ids[MAX_NUM_DSETS];
+ hid_t properties = -1;
+ hsize_t dims[2];
+ hsize_t a_size[2];
+ hsize_t offset[2];
+ hsize_t chunk_size[2];
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ HDassert(0 <= min_dset);
+ HDassert(min_dset <= max_dset);
+ HDassert(max_dset < MAX_NUM_DSETS);
+
+ /* create the datasets */
+
+ if ( pass ) {
+
+ i = min_dset;
+
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ /* create a dataspace for the chunked dataset */
+ dims[0] = DSET_SIZE;
+ dims[1] = DSET_SIZE;
+ dataspace_id = H5Screate_simple(2, dims, NULL);
+
+ if ( dataspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+
+ /* set the dataset creation plist to specify that the raw data is
+ * to be partioned into 10X10 element chunks.
+ */
+
+ if ( pass ) {
+
+ chunk_size[0] = CHUNK_SIZE;
+ chunk_size[1] = CHUNK_SIZE;
+ properties = H5Pcreate(H5P_DATASET_CREATE);
+
+ if ( properties < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate() failed.";
+ }
+ }
+
+ if ( pass ) {
+
+ if ( H5Pset_chunk(properties, 2, chunk_size) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_chunk() failed.";
+ }
+ }
+
+ /* create the dataset */
+ if ( pass ) {
+
+ sprintf(dset_name, "/dset%03d", i);
+ dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE,
+ dataspace_id, H5P_DEFAULT,
+ properties, H5P_DEFAULT);
+
+ if ( dataset_ids[i] < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dcreate() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_ids[i] = H5Dget_space(dataset_ids[i]);
+
+ if ( filespace_ids[i] < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ i++;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* create the mem space to be used to read and write chunks */
+ if ( pass ) {
+
+ dims[0] = CHUNK_SIZE;
+ dims[1] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(2, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /*offset of hyperslab in memory*/
+ offset[1] = 0;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* initialize all datasets on a round robin basis */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ m = min_dset;
+ while ( ( pass ) && ( m <= max_dset ) )
+ {
+ /* initialize the slab */
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ data_chunk[k][l] = (DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l;
+ }
+ }
+
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)i; /*offset of hyperslab in file*/
+ offset[1] = (hsize_t)j;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk H5Sselect_hyperslab() failed.";
+ }
+
+ /* write the chunk to file */
+ status = H5Dwrite(dataset_ids[m], H5T_NATIVE_INT, memspace_id,
+ filespace_ids[m], H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dwrite() failed.";
+ }
+ m++;
+ }
+ j += CHUNK_SIZE;
+ }
+
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* read data from datasets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ m = min_dset;
+ while ( ( pass ) && ( m <= max_dset ) )
+ {
+
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[1] = (hsize_t)j;
+ a_size[0] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
+ memspace_id, filespace_ids[m],
+ H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[k][l],
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l));
+ HDfprintf(stdout,
+ "m = %d, i = %d, j = %d, k = %d, l = %d\n",
+ m, i, j, k, l);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, m);
+ }
+ }
+ }
+ m++;
+ }
+ j += CHUNK_SIZE;
+ }
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* close the file spaces */
+ i = min_dset;
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ if ( H5Sclose(filespace_ids[i]) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose() failed.";
+ }
+ i++;
+ }
+
+
+ /* close the datasets */
+ i = min_dset;
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ if ( H5Dclose(dataset_ids[i]) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose() failed.";
+ }
+ i++;
+ }
+
+ /* close the mem space */
+ if ( pass ) {
+
+ if ( H5Sclose(memspace_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+ }
+
+ return;
+
+} /* create_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: delete_datasets()
+ *
+ * Purpose: If pass is TRUE on entry, verify and then delete the
+ * dataset(s) indicated by min_dset and max_dset in the
+ * indicated file.
+ *
+ * Datasets and their contents must be well know, as we
+ * will verify that they contain the expected data later.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/31/16
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+delete_datasets(hid_t file_id, int min_dset, int max_dset)
+{
+ const char * fcn_name = "delete_datasets()";
+ char dset_name[64];
+ hbool_t show_progress = FALSE;
+ int cp = 0;
+ int i;
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ HDassert(0 <= min_dset);
+ HDassert(min_dset <= max_dset);
+ HDassert(max_dset < MAX_NUM_DSETS);
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* first, verify the contents of the target dataset(s) */
+ verify_datasets(file_id, min_dset, max_dset);
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* now delete the target datasets */
+ if ( pass ) {
+
+ i = min_dset;
+
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ sprintf(dset_name, "/dset%03d", i);
+
+ if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5Ldelete() failed.";
+ }
+
+ i++;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ return;
+
+} /* delete_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_hdf5_file()
+ *
+ * Purpose: If pass is true on entry, create or open the specified HDF5
+ * and test to see if it has a metadata cache image superblock
+ * extension message.
+ *
+ * Set pass to FALSE and issue a suitable failure
+ * message if either the file contains a metadata cache image
+ * superblock extension and mdci_sbem_expected is TRUE, or
+ * vise versa.
+ *
+ * If mdci_sbem_expected is TRUE, also verify that the metadata
+ * cache has been advised of this.
+ *
+ * If read_only is TRUE, open the file read only. Otherwise
+ * open the file read/write.
+ *
+ * If set_mdci_fapl is TRUE, set the metadata cache image
+ * FAPL entry when opening the file, and verify that the
+ * metadata cache is notified.
+ *
+ * If config_fsm is TRUE, setup the persistant free space
+ * manager. Note that this flag may only be set if
+ * create_file is also TRUE.
+ *
+ * Return pointers to the cache data structure and file data
+ * structures.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+open_hdf5_file(hbool_t create_file, hbool_t mdci_sbem_expected,
+ hbool_t read_only, hbool_t set_mdci_fapl, hbool_t config_fsm,
+ hbool_t set_eoc, const char *hdf_file_name, unsigned cache_image_flags,
+ hid_t *file_id_ptr, H5F_t ** file_ptr_ptr, H5C_t ** cache_ptr_ptr)
+{
+ const char * fcn_name = "open_hdf5_file()";
+ hbool_t show_progress = FALSE;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ hid_t fapl_id = -1;
+ hid_t fcpl_id = -1;
+ hid_t file_id = -1;
+ herr_t result;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ H5C_cache_image_ctl_t image_ctl;
+ H5AC_cache_image_config_t cache_image_config = {
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
+
+ if ( pass )
+ {
+ /* opening the file both read only and with a cache image
+ * requested is a contradiction. We resolve it by ignoring
+ * the cache image request silently.
+ */
+ if ( ( create_file && mdci_sbem_expected ) ||
+ ( create_file && read_only ) ||
+ ( config_fsm && !create_file ) ||
+ ( hdf_file_name == NULL ) ||
+ ( ( set_mdci_fapl ) && ( cache_image_flags == 0 ) ) ||
+ ( ( set_mdci_fapl ) &&
+ ( (cache_image_flags & ~H5C_CI__ALL_FLAGS) != 0 ) ) ||
+ ( file_id_ptr == NULL ) ||
+ ( file_ptr_ptr == NULL ) ||
+ ( cache_ptr_ptr == NULL ) ) {
+
+ failure_mssg =
+ "Bad param(s) on entry to open_hdf5_file().\n";
+ pass = FALSE;
+ } else if ( verbose ) {
+
+ HDfprintf(stdout, "%s: HDF file name = \"%s\".\n",
+ fcn_name, hdf_file_name);
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* create a file access propertly list. */
+ if ( pass ) {
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ if ( fapl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate() failed.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* call H5Pset_libver_bounds() on the fapl_id */
+ if ( pass ) {
+
+ if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST,
+ H5F_LIBVER_LATEST) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_libver_bounds() failed.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* get metadata cache image config -- verify that it is the default */
+ if ( pass ) {
+
+ result = H5Pget_mdc_image_config(fapl_id, &cache_image_config);
+
+ if ( result < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pget_mdc_image_config() failed.\n";
+ }
+
+ if ( ( cache_image_config.version !=
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
+ ( cache_image_config.generate_image != FALSE ) ||
+ ( cache_image_config.save_resize_status != FALSE ) ||
+ ( cache_image_config.entry_ageout !=
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected default cache image config.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* set metadata cache image fapl entry if indicated */
+ if ( ( pass ) && ( set_mdci_fapl ) ) {
+
+ /* set cache image config fields to taste */
+ cache_image_config.generate_image = TRUE;
+ cache_image_config.save_resize_status = FALSE;
+ cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE;
+
+ result = H5Pset_mdc_image_config(fapl_id, &cache_image_config);
+
+ if ( result < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_mdc_image_config() failed.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* setup the persistant free space manager if indicated */
+ if ( ( pass ) && ( config_fsm ) ) {
+
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+
+ if ( fcpl_id <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
+ }
+ }
+
+ if ( ( pass ) && ( config_fsm ) ) {
+ if(H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE,
+ TRUE, (hsize_t)1) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_file_space_strategy() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* set evict on close if indicated */
+ if ( ( pass ) && ( set_eoc ) ) {
+
+ if ( H5Pset_evict_on_close(fapl_id, TRUE) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_evict_on_close() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* open the file */
+ if ( pass ) {
+
+ if ( create_file ) {
+
+ if ( fcpl_id != -1 )
+
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
+ fcpl_id, fapl_id);
+ else
+
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
+ H5P_DEFAULT, fapl_id);
+
+ } else {
+
+ if ( read_only )
+
+ file_id = H5Fopen(hdf_file_name, H5F_ACC_RDONLY, fapl_id);
+
+ else
+
+ file_id = H5Fopen(hdf_file_name, H5F_ACC_RDWR, fapl_id);
+ }
+
+ if ( file_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fcreate() or H5Fopen() failed.\n";
+
+ } else {
+
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get file_ptr.";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: Can't get file_ptr.\n", fcn_name);
+ }
+ }
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* get a pointer to the files internal data structure and then
+ * to the cache structure
+ */
+ if ( pass ) {
+
+ if ( file_ptr->shared->cache == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "can't get cache pointer(1).\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* verify expected metadata cache status */
+
+ /* get the cache image control structure from the cache, and verify
+ * that it contains the expected values.
+ *
+ * Then set the flags in this structure to the specified value.
+ */
+ if ( pass ) {
+
+ if ( H5C_get_cache_image_config(cache_ptr, &image_ctl) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "error returned by H5C_get_cache_image_config().";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ if ( pass ) {
+
+ if ( set_mdci_fapl ) {
+
+ if ( read_only ) {
+
+ if ( ( image_ctl.version !=
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
+ ( image_ctl.generate_image != FALSE ) ||
+ ( image_ctl.save_resize_status != FALSE ) ||
+ ( image_ctl.entry_ageout !=
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
+ ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected image_ctl values(1).\n";
+ }
+ } else {
+
+ if ( ( image_ctl.version !=
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
+ ( image_ctl.generate_image != TRUE ) ||
+ ( image_ctl.save_resize_status != FALSE ) ||
+ ( image_ctl.entry_ageout !=
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
+ ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected image_ctl values(2).\n";
+ }
+ }
+ } else {
+
+ if ( ( image_ctl.version !=
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
+ ( image_ctl.generate_image != FALSE ) ||
+ ( image_ctl.save_resize_status != FALSE ) ||
+ ( image_ctl.entry_ageout !=
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
+ ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected image_ctl values(3).\n";
+ }
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ if ( ( pass ) && ( set_mdci_fapl ) ) {
+
+ image_ctl.flags = cache_image_flags;
+
+ if ( H5C_set_cache_image_config(file_ptr, cache_ptr, &image_ctl) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "error returned by H5C_set_cache_image_config().";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ if ( pass ) {
+
+ if ( cache_ptr->close_warning_received == TRUE ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value of close_warning_received.\n";
+ }
+
+ if ( mdci_sbem_expected ) {
+
+ if ( read_only ) {
+
+ if ( ( cache_ptr->load_image != TRUE ) ||
+ ( cache_ptr->delete_image != FALSE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message not present?\n";
+ }
+ } else {
+
+ if ( ( cache_ptr->load_image != TRUE ) ||
+ ( cache_ptr->delete_image != TRUE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message not present?\n";
+ }
+ }
+ } else {
+
+ if ( ( cache_ptr->load_image == TRUE ) ||
+ ( cache_ptr->delete_image == TRUE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message present?\n";
+ }
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ if ( pass ) {
+
+ *file_id_ptr = file_id;
+ *file_ptr_ptr = file_ptr;
+ *cache_ptr_ptr = cache_ptr;
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d -- exiting.\n", fcn_name, cp++);
+
+ return;
+
+} /* open_hdf5_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: attempt_swmr_open_hdf5_file()
+ *
+ * Purpose: If pass is true on entry, attempt to create or open the
+ * specified HDF5 file with SWMR, and also with cache image
+ * creation if requested.
+ *
+ * In all cases, the attempted open or create should fail.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+attempt_swmr_open_hdf5_file(const hbool_t create_file,
+ const hbool_t set_mdci_fapl,
+ const char * hdf_file_name)
+{
+ const char * fcn_name = "attempt_swmr_open_hdf5_file()";
+ hbool_t show_progress = FALSE;
+ int cp = 0;
+ hid_t fapl_id = -1;
+ hid_t file_id = -1;
+ herr_t result;
+ H5AC_cache_image_config_t cache_image_config = {
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
+
+ /* create a file access propertly list. */
+ if ( pass ) {
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ if ( fapl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate() failed.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* call H5Pset_libver_bounds() on the fapl_id */
+ if ( pass ) {
+
+ if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST)
+ < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_libver_bounds() failed.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* set metadata cache image fapl entry if indicated */
+ if ( ( pass ) && ( set_mdci_fapl ) ) {
+
+ /* set cache image config fields to taste */
+ cache_image_config.generate_image = TRUE;
+ cache_image_config.save_resize_status = FALSE;
+ cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE;
+
+ result = H5Pset_mdc_image_config(fapl_id, &cache_image_config);
+
+ if ( result < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_mdc_image_config() failed.\n";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* open the file */
+ if ( pass ) {
+
+ if ( create_file ) {
+
+ H5E_BEGIN_TRY {
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC | H5F_ACC_SWMR_WRITE,
+ H5P_DEFAULT, fapl_id);
+ } H5E_END_TRY;
+ } else {
+
+ H5E_BEGIN_TRY {
+ file_id = H5Fopen(hdf_file_name, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl_id);
+ } H5E_END_TRY;
+ }
+
+ if ( file_id >= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "SWMR H5Fcreate() or H5Fopen() succeeded.\n";
+
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ return;
+
+} /* attempt_swmr_open_hdf5_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_datasets()
+ *
+ * Purpose: If pass is TRUE on entry, verify that the datasets in the
+ * file exist and contain the expected data.
+ *
+ * Note that these datasets were created by
+ * create_datasets() above. Thus any changes in that
+ * function must be reflected in this function, and
+ * vise-versa.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/15/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+verify_datasets(hid_t file_id, int min_dset, int max_dset)
+{
+ const char * fcn_name = "verify_datasets()";
+ char dset_name[64];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
+ herr_t status;
+ hid_t filespace_ids[MAX_NUM_DSETS];
+ hid_t memspace_id = -1;
+ hid_t dataset_ids[MAX_NUM_DSETS];
+ hsize_t dims[2];
+ hsize_t a_size[2];
+ hsize_t offset[2];
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ HDassert(0 <= min_dset);
+ HDassert(min_dset <= max_dset);
+ HDassert(max_dset < MAX_NUM_DSETS);
+
+ /* open the datasets */
+
+ if ( pass ) {
+
+ i = min_dset;
+
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ /* open the dataset */
+ if ( pass ) {
+
+ sprintf(dset_name, "/dset%03d", i);
+ dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
+
+ if ( dataset_ids[i] < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_ids[i] = H5Dget_space(dataset_ids[i]);
+
+ if ( filespace_ids[i] < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ i++;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* create the mem space to be used to read and write chunks */
+ if ( pass ) {
+
+ dims[0] = CHUNK_SIZE;
+ dims[1] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(2, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /*offset of hyperslab in memory*/
+ offset[1] = 0;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+
+ /* read data from datasets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ m = min_dset;
+ while ( ( pass ) && ( m <= max_dset ) )
+ {
+
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[1] = (hsize_t)j;
+ a_size[0] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
+ memspace_id, filespace_ids[m],
+ H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[k][l],
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l));
+ HDfprintf(stdout,
+ "m = %d, i = %d, j = %d, k = %d, l = %d\n",
+ m, i, j, k, l);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, m);
+ }
+ }
+ }
+ m++;
+ }
+ j += CHUNK_SIZE;
+ }
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* close the file spaces */
+ i = min_dset;
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ if ( H5Sclose(filespace_ids[i]) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose() failed.";
+ }
+ i++;
+ }
+
+
+ /* close the datasets */
+ i = min_dset;
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ if ( H5Dclose(dataset_ids[i]) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose() failed.";
+ }
+ i++;
+ }
+
+ /* close the mem space */
+ if ( pass ) {
+
+ if ( H5Sclose(memspace_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+ }
+
+ return;
+
+} /* verify_datasets() */
+
+
+/****************************************************************************/
+/******************************* Test Functions *****************************/
+/****************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: check_cache_image_ctl_flow_1()
+ *
+ * Purpose: This test is one of a sequence of control flow tests intended
+ * to verify that control flow for the cache image feature works
+ * as expected.
+ *
+ * This test is an initial smoke check, so the sequence of
+ * operations is relatively simple. In particular, we are
+ * testing:
+ *
+ * i) Creation of file with cache image FAPL entry set
+ * and insertion of metadata cache image superblock
+ * message on file close.
+ *
+ * ii) Open of file with metadata cache image superblock
+ * message, transmission of message to metadata cache,
+ * and deletion of superblock message prior to close.
+ *
+ * Note that in all cases we are performing operations on the
+ * file. While this is the typical case, we must repeat this
+ * test without operations on the file.
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ *
+ * 2) Create some datasets in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * 5) Open a dataset.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ *
+ * 6) Close the file.
+ *
+ * 7) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 8) Close the file.
+ *
+ * 9) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/15/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+check_cache_image_ctl_flow_1(void)
+{
+ const char * fcn_name = "check_cache_image_ctl_flow_1()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image control flow test 1");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image super block
+ * extension message only.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__GEN_MDCI_SBE_MESG,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image, and that the supplied address and length
+ * are HADDR_UNDEF and zero respectively. Note that these values
+ * indicate that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open and close a dataset.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+ if ( pass ) {
+
+ /* think on how to verify that the superblock extension has been
+ * deleted, and if it is necessary to verify this directly.
+ */
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* check_cache_image_ctl_flow_1() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_cache_image_ctl_flow_2()
+ *
+ * Purpose: This test is one of a sequence of control flow tests intended
+ * to verify that control flow for the cache image feature works
+ * as expected.
+ *
+ * This test is an initial smoke check, so the sequence of
+ * operations is relatively simple. In particular, we are
+ * testing:
+ *
+ * i) Creation of file with cache image FAPL entry set
+ * and insertion of metadata cache image superblock
+ * message on file close.
+ *
+ * ii) Open of file with metadata cache image superblock
+ * message, transmission of message to metadata cache,
+ * and deletion of superblock message prior to close.
+ *
+ * Note that unlike the previous test, no operations are performed
+ * on the file. As a result of this, the metadata cache image
+ * message is not processed until the metadata cache receives
+ * the file close warning. (Under normal circumstances, it is
+ * processed as part of the first protect operation after the
+ * superblock is loaded.)
+ *
+ * In this particular test, we preform the following operations:
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ *
+ * 2) Close the file.
+ *
+ * 3) Open the file.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * 6) Close the file.
+ *
+ * 7) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 8) Close the file.
+ *
+ * 9) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/15/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+check_cache_image_ctl_flow_2(void)
+{
+ const char * fcn_name = "check_cache_image_ctl_flow_2()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image control flow test 2");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image super block
+ * extension message only.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__GEN_MDCI_SBE_MESG,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image, and that the supplied address and length
+ * are HADDR_UNDEF and zero respectively. Note that these values
+ * indicate that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* check_cache_image_ctl_flow_2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_cache_image_ctl_flow_3()
+ *
+ * Purpose: This test is one of a sequence of control flow tests intended
+ * to verify that control flow for the cache image feature works
+ * as expected.
+ *
+ * The objectives of this test are to:
+ *
+ * i) Test operation of the metadata cache image FAPL
+ * entry set on open of an existing file. This
+ * should result in the insertion of a metadata
+ * cache image superblock message on file close.
+ *
+ * ii) Test operation of the metadata cache image super
+ * block extension message when it appears in a file
+ * that is opened READ ONLY.
+ *
+ * Note that in all cases we are performing operations on the
+ * file between file open and close. While this is the
+ * typical case, we must repeat this test without operations
+ * on the file.
+ *
+ * 1) Create a HDF5 file WITHOUT the cache image FAPL entry.
+ *
+ * Verify that the cache is NOT informed of the cache image
+ * FAPL entry.
+ *
+ * 2) Close the file.
+ *
+ * 3) Open the file WITH the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ *
+ * 4) Create some datasets.
+ *
+ * 5) Close the file.
+ *
+ * 6) Open the file READ ONLY.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * 7) Verify the contents of the datasets.
+ *
+ * 8) Close the file.
+ *
+ * 9) Open the file READ/WRITE.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * 10) Verify the contents of the datasets.
+ *
+ * 11) Close the file.
+ *
+ * 12) Open the file
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 13) Close the file.
+ *
+ * 14) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/16/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+check_cache_image_ctl_flow_3(void)
+{
+ const char * fcn_name = "check_cache_image_ctl_flow_3()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image control flow test 3");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 0 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Create a HDF5 file WITHOUT the cache image FAPL entry.
+ *
+ * Verify that the cache is NOT informed of the cache image
+ * FAPL entry.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Open the file WITH the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image super block
+ * extension message only.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__GEN_MDCI_SBE_MESG,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Create some datasets. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Open the file READ ONLY.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Verify the contents of the datasets. */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Open the file READ/WRITE.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Verify the contents of the datasets. */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 12 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Open the file
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 13 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 14 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Delete the file. */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* check_cache_image_ctl_flow_3() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_cache_image_ctl_flow_4()
+ *
+ * Purpose: This test is one of a sequence of control flow tests intended
+ * to verify that control flow for the cache image feature works
+ * as expected.
+ *
+ * The objectives of this test are to:
+ *
+ * i) Test operation of the metadata cache image FAPL
+ * entry set on open of an existing file. This
+ * should result in the insertion of a metadata
+ * cache image superblock message on file close.
+ *
+ * ii) Test operation of the metadata cache image super
+ * block extension message when it appears in a file
+ * that is opened READ ONLY.
+ *
+ * In this test we avoid all file access beyond file open
+ * and close.
+ *
+ * 1) Create a HDF5 file WITHOUT the cache image FAPL entry.
+ *
+ * Verify that the cache is NOT informed of the cache image
+ * FAPL entry.
+ *
+ * 2) Close the file.
+ *
+ * 3) Open the file WITH the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ *
+ * 4) Close the file.
+ *
+ * 5) Open the file READ ONLY.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * 6) Close the file.
+ *
+ * 7) Open the file READ/WRITE.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * 8) Close the file.
+ *
+ * 9) Open the file
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 10) Close the file.
+ *
+ * 11) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/16/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+check_cache_image_ctl_flow_4(void)
+{
+ const char * fcn_name = "check_cache_image_ctl_flow_4()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image control flow test 4");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 0 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Create a HDF5 file WITHOUT the cache image FAPL entry.
+ *
+ * Verify that the cache is NOT informed of the cache image
+ * FAPL entry.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Open the file WITH the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image super block
+ * extension message only.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__GEN_MDCI_SBE_MESG,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open the file READ ONLY.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Open the file READ/WRITE.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Open the file
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 11 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Delete the file. */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* check_cache_image_ctl_flow_4() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_cache_image_ctl_flow_5()
+ *
+ * Purpose: This test is one of a sequence of control flow tests intended
+ * to verify that control flow for the cache image feature works
+ * as expected.
+ *
+ * The objective of this test is verify correct control flow
+ * when a file with a metadata cache image superblock extension
+ * message is opened with the metadata cache image FAPL entry.
+ *
+ * Note that in all cases we are performing operations on the
+ * file between file open and close. While this is the
+ * typical case, we must repeat this test without operations
+ * on the file.
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ *
+ * 2) Create some datasets.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file WITH the cache image FAPL entry.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ *
+ * 5) Verify the contents of the datasets.
+ *
+ * 6) Close the file.
+ *
+ * 7) Open the file.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * 8) Verify the contents of the datasets.
+ *
+ * 9) Close the file.
+ *
+ * 10) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/17/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+check_cache_image_ctl_flow_5(void)
+{
+ const char * fcn_name = "check_cache_image_ctl_flow_5()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image control flow test 5");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 0 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__GEN_MDCI_SBE_MESG,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some datasets. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file WITH the cache image FAPL entry.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__GEN_MDCI_SBE_MESG,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 5) Verify the contents of the datasets. */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Open the file.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 8 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 8) Verify the contents of the datasets. */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+ if ( show_progress ) /* 9 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 10 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Delete the file. */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* check_cache_image_ctl_flow_5() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_cache_image_ctl_flow_6()
+ *
+ * Purpose: This test is one of a sequence of control flow tests intended
+ * to verify that control flow for the cache image feature works
+ * as expected.
+ *
+ * The objective of this test is verify correct control flow
+ * when a file with a metadata cache image superblock extension
+ * message is opened with the metadata cache image FAPL entry.
+ *
+ * In this test we avoid all file activity other than open
+ * and close.
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ *
+ * 2) Close the file.
+ *
+ * 3) Open the file WITH the cache image FAPL entry.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ *
+ * 4) Close the file.
+ *
+ * 5) Open the file.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * 6) Close the file.
+ *
+ * 7) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/17/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+check_cache_image_ctl_flow_6(void)
+{
+ const char * fcn_name = "check_cache_image_ctl_flow_6()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image control flow test 6");
+
+ pass = TRUE;
+
+ if ( show_progress ) /* 0 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 1 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__GEN_MDCI_SBE_MESG,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 2 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 3 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file WITH the cache image FAPL entry.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set flags forcing creation of metadata cache image
+ * super block extension message only.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__GEN_MDCI_SBE_MESG,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 4 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 5 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open the file.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image, and that the
+ * supplied address and length are HADDR_UNDEF and
+ * zero respectively. Note that these values indicate
+ * that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress ) /* 6 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress ) /* 7 */
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Delete the file. */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* check_cache_image_ctl_flow_6() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_smoke_check_1()
+ *
+ * Purpose: This test is one of a sequence of tests intended
+ * to exercise the cache image feature verifying that it
+ * works more or less correctly in common cases.
+ *
+ * This test is an initial smoke check, so the sequence of
+ * operations is relatively simple. In particular, we are
+ * testing:
+ *
+ * i) Creation of file with metadata cache image
+ * superblock extension message and cache image
+ * block.
+ *
+ * ii) Open of file with metadata cache image superblock
+ * extension message and cache image block.
+ * Deserialization and removal of both, insertion
+ * of prefetched cache entries, and deserialization
+ * of prefetched cache entries as they are protected.
+ *
+ * iii) Subsequent write of file without metadata cache
+ * image.
+ *
+ * iv) Open and close of file with metadata cache image
+ * image requested, but with no operations on the
+ * file.
+ *
+ * To do this:
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set all cache image flags, forcing full functionality.
+ *
+ * 2) Create some datasets in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed
+ * to load the metadata cache image.
+ *
+ * 5) Open a dataset.
+ *
+ * Verify that it contains the expected data
+ *
+ * 6) Close the file.
+ *
+ * 7) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 8) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * 9) Close the file.
+ *
+ * 10) Open the file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set all cache image flags, forcing full functionality.
+ *
+ * 11) Close the file. Since there has been no access to
+ * any entries that would be included in the cache image,
+ * there should be no cache image.
+ *
+ * 12) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 13) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * 14) Close the file.
+ *
+ * 15) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/17/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_smoke_check_1(void)
+{
+ const char * fcn_name = "cache_image_smoke_check_1()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image smoke check 1");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing full function of the cache image feature.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image, and that the supplied address and length
+ * are HADDR_UNDEF and zero respectively. Note that these values
+ * indicate that the metadata image block doesn't exist.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open and close a dataset.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open and close a dataset.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Open the file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Close the file. Since there has been no access to
+ * any entries that would be included in the cache image,
+ * there should be no cache image.
+ */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(3).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 15) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_smoke_check_1() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_smoke_check_2()
+ *
+ * Purpose: This test is one of a sequence of tests intended
+ * to exercise the cache image feature verifying that it
+ * works more or less correctly in common cases.
+ *
+ * This test is an initial smoke check, so the sequence of
+ * operations is relatively simple. In particular, we are
+ * testing:
+ *
+ * i) Creation of file with metadata cache image
+ * superblock extension message and cache image
+ * block.
+ *
+ * ii) Open of file with metadata cache image superblock
+ * extension message and cache image block. Write
+ * of prefetched entries to file on file close.
+ *
+ * To do this:
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set all cache image flags, forcing full functionality.
+ *
+ * 2) Create some datasets in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file.
+ *
+ * 5) Close the file.
+ *
+ * 6) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 7) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * 8) Close the file.
+ *
+ * 9) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/18/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_smoke_check_2(void)
+{
+ const char * fcn_name = "cache_image_smoke_check_2()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image smoke check 2");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing full function of the cache image feature.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ /* can't verify that metadata cache image has been loaded directly,
+ * as in this cache, the load will not happen until close, and thus
+ * the images_created stat will not be readily available as it is
+ * incremented just before the cache is shut down.
+ */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Open and close a dataset.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_smoke_check_2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_smoke_check_3()
+ *
+ * Purpose: This test is one of a sequence of tests intended
+ * to exercise the cache image feature verifying that it
+ * works more or less correctly in common cases.
+ *
+ * This test is an initial smoke check, so the sequence of
+ * operations is relatively simple. In particular, we are
+ * testing:
+ *
+ * i) Creation of file with metadata cache image
+ * superblock extension message and cache image
+ * block.
+ *
+ * ii) Read only open and close of file with metadata
+ * cache image superblock extension message and
+ * cache image block.
+ *
+ * iii) Subsequent R/W open and close of file with metadata
+ * cache image superblock extension message and
+ * cache image block.
+ *
+ * To do this:
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set all cache image flags, forcing full functionality.
+ *
+ * 2) Create some datasets in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file read only.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 5 Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * 6) Close the file.
+ *
+ * 7) Open the file.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 8 Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * 9) Close the file.
+ *
+ * 10) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 11) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * 12) Close the file.
+ *
+ * 13) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/18/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_smoke_check_3(void)
+{
+ const char * fcn_name = "cache_image_smoke_check_3()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image smoke check 3");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing full function of the cache image feature.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file read only.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open and close a dataset.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Open the file.
+ *
+ * Verify that the file contains a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open and close a dataset.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+
+ /* 10) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Open and close a dataset.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_smoke_check_3() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_smoke_check_4()
+ *
+ * Purpose: This test attempts to mimic the typical "poor man's
+ * parallel use case in which the file is passed between
+ * processes, each of which open the file, write some data,
+ * close the file, and then pass control on to the next
+ * process.
+ *
+ * In this case, we only write one dataset per process.
+ *
+ * Cycle of operation
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set all cache image flags, forcing full functionality.
+ *
+ * 2) Create and write a dataset in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file with the cache image FAPL entry.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 5 Create and write a new dataset
+ *
+ * 6) Close the file.
+ *
+ * If sufficient datasets have been created, continue to
+ * 7). Otherwise goto 4)
+ *
+ * 7) Open the file.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 8) Open all datasets that have been created, and
+ * verify that they contain the expected data.
+ *
+ * 9) Close the file.
+ *
+ * 10) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 11) Open all datasets that have been created, and
+ * verify that they contain the expected data.
+ *
+ * Verify that it contains the expected data.
+ *
+ * 12) Close the file.
+ *
+ * 13) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 8/18/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_smoke_check_4(void)
+{
+ const char * fcn_name = "cache_image_smoke_check_4()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+ int min_dset = 0;
+ int max_dset = 0;
+
+ TESTING("metadata cache image smoke check 4");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing full function of the cache image feature.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create a dataset in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, min_dset++, max_dset++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ while ( ( pass ) && ( max_dset < MAX_NUM_DSETS ) )
+ {
+
+ /* 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L1 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp, max_dset, pass);
+
+
+ /* 5) Create a dataset in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, min_dset++, max_dset++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L2 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp + 1, max_dset, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L3 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp + 2, max_dset, pass);
+ } /* end while */
+ cp += 3;
+
+
+ /* 7) Open the file.
+ *
+ * Verify that the file contains a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open and close all datasets.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, max_dset - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+
+ /* 10) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Open and close all datasets.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, max_dset - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 13) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_smoke_check_4() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_smoke_check_5()
+ *
+ * Purpose: This test attempts to mimic the typical "poor man's
+ * parallel use case in which the file is passed between
+ * processes, each of which open the file, write some data,
+ * close the file, and then pass control on to the next
+ * process.
+ *
+ * In this case, we create one group for each process, and
+ * populate it with a "zoo" of HDF5 objects selected to
+ * (ideally) exercise all HDF5 on disk data structures.
+ *
+ * Cycle of operation
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set all cache image flags, forcing full functionality.
+ *
+ * 2) Create a process specific group.
+ *
+ * 3) Construct a "zoo" in the above group, and validate it.
+ *
+ * 4) Close the file.
+ *
+ * 5) Open the file with the cache image FAPL entry.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 6) Validate the "zoo" created in the previous file open.
+ *
+ * 7) Create a process specific group for this file open
+ *
+ * 8) Construct a "zoo" in the above group, and validate it.
+ *
+ * 9) Close the file.
+ *
+ * If sufficient zoos have been created, continue to
+ * 10). Otherwise goto 5)
+ *
+ * 10) Open the file R/O.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 11) Validate all the zoos.
+ *
+ * 12) Close the file.
+ *
+ * 13) Open the file.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 14) Validate all the zoos.
+ *
+ * 15) Close the file.
+ *
+ * 16) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache
+ * image superblock extension message.
+ *
+ * 17) Validate all the zoos.
+ *
+ * 18) Close the file.
+ *
+ * 19) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/15/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#define MAX_NUM_GROUPS 128
+
+static unsigned
+cache_image_smoke_check_5(void)
+{
+ const char * fcn_name = "cache_image_smoke_check_5()";
+ char filename[512];
+ char process_group_name[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ hid_t proc_gid = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+ int i;
+ int min_group = 0;
+ int max_group = 0;
+
+ TESTING("metadata cache image smoke check 5");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing full function of the cache image feature.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create a process specific group. */
+ if ( pass ) {
+
+ sprintf(process_group_name, "/process_%d", min_group);
+
+ proc_gid = H5Gcreate2(file_id, process_group_name, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( proc_gid < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Gcreate2() failed (1).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Construct a "zoo" in the above group, and validate it. */
+ if ( pass )
+ create_zoo(file_id, process_group_name, min_group);
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Gclose(proc_gid) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Gclose(proc_gid) failed. (1)";
+ }
+ }
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ while ( ( pass ) && ( max_group < MAX_NUM_GROUPS ) )
+ {
+
+ /* 5) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L1 cp = %d, max_group = %d, pass = %d.\n",
+ fcn_name, cp, max_group, pass);
+
+
+ /* 6) Validate the "zoo" created in the previous file open. */
+ if ( pass )
+ validate_zoo(file_id, process_group_name, max_group);
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L2 cp = %d, max_group = %d, pass = %d.\n",
+ fcn_name, cp + 1, max_group, pass);
+
+
+ /* 7) Create a process specific group for this file open */
+ if ( pass ) {
+
+ max_group++;
+ sprintf(process_group_name, "/process_%d", max_group);
+
+ proc_gid = H5Gcreate2(file_id, process_group_name, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( proc_gid < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Gcreate2() failed (2).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L3 cp = %d, max_group = %d, pass = %d.\n",
+ fcn_name, cp + 2, max_group, pass);
+
+
+ /* 8) Construct a "zoo" in the above group, and validate it. */
+ if ( pass )
+ create_zoo(file_id, process_group_name, max_group);
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L4 cp = %d, max_group = %d, pass = %d.\n",
+ fcn_name, cp + 3, max_group, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Gclose(proc_gid) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Gclose(process_gid) failed. (2)";
+ }
+ }
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L5 cp = %d, max_group = %d, pass = %d.\n",
+ fcn_name, cp + 4, max_group, pass);
+ } /* end while */
+ cp += 5;
+
+ /* 10) Open the file read only.
+ *
+ * Verify that the file contains a metadata cache image
+ * superblock extension message.
+ */
+ if(pass) {
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if(show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Validate all the zoos. */
+ i = min_group;
+ while(pass && i <= max_group) {
+ sprintf(process_group_name, "/process_%d", i);
+ validate_zoo(file_id, process_group_name, i++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if( pass) {
+ if(cache_ptr->images_loaded == 0) {
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if(show_progress)
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 12) Close the file. */
+ if(pass) {
+ if(H5Fclose(file_id) < 0) {
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ /* 13) Open the file R/W.
+ *
+ * Verify that the file contains a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Validate all the zoos. */
+ i = min_group;
+ while ( ( pass ) && ( i <= max_group ) ) {
+
+ sprintf(process_group_name, "/process_%d", i);
+ validate_zoo(file_id, process_group_name, i++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 15) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+
+ /* 16) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 17) Validate all the zoos.
+ *
+ * Verify that the metadata cache image superblock
+ * extension message has been deleted.
+ */
+ i = min_group;
+ while ( ( pass ) && ( i <= max_group ) ) {
+
+ sprintf(process_group_name, "/process_%d", i);
+ validate_zoo(file_id, process_group_name, i++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 18) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 19) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_smoke_check_5() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_smoke_check_6()
+ *
+ * Purpose: As the free space manager metadata is now included in the
+ * cache image, a smoke check to verify generally correct
+ * behaviour of the persistent free space managers seems
+ * prudent.
+ *
+ * The basic idea of this test is to construct a long
+ * sequence of dataset creations and deletions, all separated
+ * by file open/close cycles with cache image enabled. If the
+ * perisistant free space managers are performing as expected,
+ * the size of the file should stabilize.
+ *
+ * To implement this, proceed as outlined in the cycle of
+ * operation below:
+ *
+ * Cycle of operation
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set all cache image flags, forcing full functionality.
+ *
+ * 2) Create and write a dataset in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file with the cache image FAPL entry.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 5) Create and write a new dataset.
+ *
+ * 6) Verify and delete the old dataset.
+ *
+ * 7) Close the file.
+ *
+ * If sufficient datasets have been created, and then
+ * deleteded continue to 8). Otherwise goto 4)
+ *
+ * 8) Open the file.
+ *
+ * Verify that the file contains a metadata cache
+ * image superblock extension message.
+ *
+ * 9) Verify the last dataset created.
+ *
+ * 10) Close the file.
+ *
+ * 11) Open the file.
+ *
+ * 12) Verify and delete the last dataset.
+ *
+ * Verify that a metadata cache image is not loaded.
+ *
+ * 13) Close the file.
+ *
+ * 14) Get the size of the file. Verify that it is less
+ * than 20 KB. Without deletions and persistant free
+ * space managers, size size is about 167 MB, so this
+ * is sufficient to verify that the persistant free
+ * space managers are more or less doing their job.
+ *
+ * Note that in the absence of paged allocation, file
+ * size gets below 1 KB.
+ *
+ * 15) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/31/16
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_smoke_check_6(void)
+{
+ const char * fcn_name = "cache_image_smoke_check_6()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ h5_stat_size_t file_size;
+ int cp = 0;
+ int min_dset = 0;
+ int max_dset = 0;
+
+ TESTING("metadata cache image smoke check 6");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing full function of the cache image feature.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create a dataset in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, min_dset++, max_dset++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ while ( ( pass ) && ( max_dset < MAX_NUM_DSETS ) )
+ {
+
+ /* 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L1 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp, max_dset, pass);
+
+
+ /* 5) Create a dataset in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, min_dset++, max_dset++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L2 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp + 1, max_dset, pass);
+
+
+ /* 6) Verify and delete the old dataset. */
+ if ( pass ) {
+
+ delete_datasets(file_id, min_dset - 2, max_dset - 2);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L3 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp + 2, max_dset, pass);
+
+
+ /* 7) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L4 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp + 3, max_dset, pass);
+ } /* end while */
+ cp += 4;
+
+
+ /* 8) Open the file.
+ *
+ * Verify that the file contains a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Verify the last dataset created. */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, min_dset - 1, max_dset - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+
+ /* 11) Open the file.
+ *
+ * Verify that the file doesn't contain a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Verify and delete the last dataset.
+ *
+ * Verify that a metadata cache image is not loaded.
+ */
+
+ if ( pass ) {
+
+ delete_datasets(file_id, min_dset - 1, max_dset - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+
+ /* 13) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Get the size of the file. Verify that it is less
+ * than 20 KB. Without deletions and persistant free
+ * space managers, size size is about 167 MB, so this
+ * is sufficient to verify that the persistant free
+ * space managers are more or less doing their job.
+ *
+ * Note that in the absence of paged allocation, file
+ * size gets below 1 KB, but since this test is run both
+ * with and without paged allocation, we must leave some
+ * extra space for the paged allocation case.
+ */
+ if(pass) {
+ if((file_size = h5_get_file_size(filename, H5P_DEFAULT)) < 0) {
+ pass = FALSE;
+ failure_mssg = "h5_get_file_size() failed.\n";
+ } else if(file_size > 20 * 1024) {
+ pass = FALSE;
+ failure_mssg = "unexpectedly large file size.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 15) Delete the file */
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_smoke_check_6() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_api_error_check_1()
+ *
+ * Purpose: This test is one of a sequence of tests intended
+ * to verify correct management of API errors.
+ *
+ * The object of this test is to verify that a file without
+ * a pre-existing cache image that is opened both read only
+ * and with a cache image requested is handle correctly
+ * (the cache image request should be ignored silently).
+ *
+ * The test is set up as follows:
+ *
+ * 1) Create a HDF5 file.
+ *
+ * 2) Create some datasets in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file read only with a cache image FAPL entry
+ * requested.
+ *
+ * 5) Open a dataset.
+ *
+ * Verify that it contains the expected data
+ *
+ * Verify that the cache image was not loaded.
+ *
+ * 6) Close the file.
+ *
+ * 7) Open the file read only.
+ *
+ * 8) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was not loaded.
+ *
+ * 9) Close the file.
+ *
+ * 10) Open the file read write.
+ *
+ * 11) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * 12) Close the file.
+ *
+ * 13) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/25/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_api_error_check_1(void)
+{
+ const char * fcn_name = "cache_image_api_error_check_1()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image api error check 1");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file read only with a cache image FAPL entry requested. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open and close a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was not loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Open the file read only. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open and close a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was not loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(3).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Open the file read / write. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 11) Open and close a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was not loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(4).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_api_error_check_1() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_api_error_check_2()
+ *
+ * Purpose: This test is one of a sequence of tests intended
+ * to verify correct management of API errors.
+ *
+ * The object of this test is to verify that a file with
+ * a pre-existing cache image that is opened both read only
+ * and with a cache image requested is handled correctly
+ * (the cache image request should be ignored silently).
+ *
+ * The test is set up as follows:
+ *
+ * 1) Create a HDF5 file with a cache image requested..
+ *
+ * 2) Create some datasets in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file read only with a cache image FAPL entry
+ * requested.
+ *
+ * 5) Open a dataset.
+ *
+ * Verify that it contains the expected data
+ *
+ * Verify that the cache image was loaded.
+ *
+ * 6) Close the file.
+ *
+ * 7) Open the file read only.
+ *
+ * 8) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was loaded.
+ *
+ * 9) Close the file.
+ *
+ * 10) Open the file read write.
+ *
+ * 11) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was loaded.
+ *
+ * 12) Close the file.
+ *
+ * 13) Open the file read write.
+ *
+ * 14) Open a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was NOT loaded.
+ *
+ * 15) Close the file.
+ *
+ * 16) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/25/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_api_error_check_2(void)
+{
+ const char * fcn_name = "cache_image_api_error_check_2()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image api error check 2");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with a cache image requested. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file read only with a cache image FAPL entry requested. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open and close a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block was not loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Open the file read only. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ 0,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open and close a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block was not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Open the file read / write. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 11) Open and close a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block was not loaded(3).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Open the file read / write. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 14) Open and close a dataset.
+ *
+ * Verify that it contains the expected data.
+ *
+ * Verify that the cache image was not loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block was loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 15) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_api_error_check_2() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_api_error_check_3()
+ *
+ * Purpose: This test is one of a sequence of tests intended
+ * to verify correct management of API errors.
+ *
+ * At present, SWMR and cache image may not be active
+ * at the same time. The purpose of this test is to
+ * verify that attempts to run SWMR and cache image
+ * at the same time will fail.
+ *
+ * The test is set up as follows:
+ *
+ * 1) Create a HDF5 file with a cache image requested..
+ *
+ * 2) Try to start SWMR write -- should fail.
+ *
+ * 3) Discard the file if necessary
+ *
+ * 4) Attempt to create a HDF5 file with SWMR write
+ * access and cache image requested -- should fail.
+ *
+ * 5) Discard the file if necessary
+ *
+ * 6) Create a HDF5 file with a cache image requested.
+ *
+ * 7) Create some datasets in the file.
+ *
+ * 8) Close the file.
+ *
+ * 9) Attempt to open the file with SWMR write access --
+ * should fail.
+ *
+ * 10) Discard the file if necessary.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 12/29/16
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_api_error_check_3(void)
+{
+ const char * fcn_name = "cache_image_api_error_check_3()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ TESTING("metadata cache image api error check 3");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with a cache image requested. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Try to start SWMR write -- should fail. */
+
+ if ( pass ) {
+
+ H5E_BEGIN_TRY {
+ if ( H5Fstart_swmr_write(file_id) == SUCCEED ) {
+
+ pass = FALSE;
+ failure_mssg = "SWMR start succeeded in file with cache image.";
+ }
+ } H5E_END_TRY;
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Discard the file if necessary */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Attempt to create a HDF5 file with SWMR write
+ * access and cache image requested -- should fail.
+ */
+
+ attempt_swmr_open_hdf5_file(/* create_file */ TRUE,
+ /* set_mdci_fapl */ TRUE,
+ /* hdf_file_name */ filename);
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Discard the file if necessary */
+
+ if ( pass ) {
+
+ /* file probably doesn't exist, so don't
+ * error check the remove call.
+ */
+ HDremove(filename);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Create a HDF5 file with a cache image requested. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Attempt to open the file with SWMR write access -- should fail. */
+
+ attempt_swmr_open_hdf5_file(/* create_file */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* hdf_file_name */ filename);
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Discard the file if necessary. */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_api_error_check_3() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: cache_image_api_error_check_4()
+ *
+ * Purpose: This test is one of a sequence of tests intended
+ * to verify correct management of API errors.
+ *
+ * The object of this test is to verify that a request for
+ * a cache image when a version 2 superblock is not available/
+ * not requested is handled correctly.
+ * (the cache image request should be ignored silently).
+ *
+ * The test is set up as follows:
+ *
+ * 1) Create a FAPL requesting a cache image, but WITHOUT
+ * specifying the latest file format.
+ *
+ * 2) Create a HDF5 file using the above FAPL.
+ *
+ * 3) Create some datasets in the file.
+ *
+ * 4) Close the file.
+ *
+ * 5) Open the file read only. Verify that the file doesn't
+ * contain a cache image.
+ *
+ * 6) Verify that the datasets exist and contain the
+ * expected data
+ *
+ * Verify that the cache image was not loaded.
+ *
+ * 7) Close the file.
+ *
+ * 8) Open the file R/W using the FAPL defined in 1) above.
+ * Verify that the file does not contain a cache image.
+ *
+ * 9) Close the file.
+ *
+ * 10) Open the file R/W using the FAPL defined in 1) above.
+ * Verify that the file does not contain a cache image.
+ *
+ * 11) Verify that the data sets contain the expected data
+ *
+ * Verify that a cache image was not loaded.
+ *
+ * 12) Create several more data sets.
+ *
+ * 13) Close the file.
+ *
+ * 14) Open the file read write.
+ *
+ * Verify that the file does not contain a cache image.
+ *
+ * 15) Verify the data sets exist and contain the expected
+ * data.
+ *
+ * Verify that a cache image was not loaded.
+ *
+ * 16) Close the file.
+ *
+ * 17) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/25/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+cache_image_api_error_check_4(void)
+{
+ const char * fcn_name = "cache_image_api_error_check_4()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t fapl_id = -1;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+ H5AC_cache_image_config_t cache_image_config;
+
+ TESTING("metadata cache image api error check 4");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 1) Create a FAPL requesting a cache image, but WITHOUT
+ * specifying the latest file format.
+ */
+ if ( pass ) {
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ if ( fapl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ /* set cache image config fields to taste */
+ cache_image_config.version = H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION;
+ cache_image_config.generate_image = TRUE;
+ cache_image_config.save_resize_status = FALSE;
+ cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE;
+
+ if ( H5Pset_mdc_image_config(fapl_id, &cache_image_config) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_mdc_image_config() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create a HDF5 file using the above FAPL. */
+
+ if ( pass ) {
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_id);
+
+ if ( file_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fcreate() failed.\n";
+
+ } else {
+
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get file_ptr.";
+
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get a pointer to the files internal data structure and then
+ * to the cache structure
+ */
+ if ( pass ) {
+
+ if ( file_ptr->shared->cache == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "can't get cache pointer(1).\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Create some datasets in the file. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ HDassert(cache_ptr);
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Open the file read only. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Verify that the datasets exist and contain the
+ * expected data
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open the file R/W using the FAPL defined in 1) above.
+ *
+ * Verify that the file does not contain a cache image.
+ */
+
+ if ( pass ) {
+
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+
+ if ( file_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fopen() failed.\n";
+
+ } else {
+
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get file_ptr.";
+
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get a pointer to the files internal data structure and then
+ * to the cache structure
+ */
+ if ( pass ) {
+
+ if ( file_ptr->shared->cache == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "can't get cache pointer(1).\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( ( cache_ptr->load_image == TRUE ) ||
+ ( cache_ptr->delete_image == TRUE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message present?\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+
+ /* 10) Open the file R/W using the FAPL defined in 1) above.
+ * Verify that the file does not contain a cache image.
+ */
+
+ if ( pass ) {
+
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl_id);
+
+ if ( file_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fopen() failed.\n";
+
+ } else {
+
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get file_ptr.";
+
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get a pointer to the files internal data structure and then
+ * to the cache structure
+ */
+ if ( pass ) {
+
+ if ( file_ptr->shared->cache == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "can't get cache pointer(1).\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( ( cache_ptr->load_image == TRUE ) ||
+ ( cache_ptr->delete_image == TRUE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message present?\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Verify that the data sets contain the expected data
+ *
+ * Verify that a cache image was not loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 5);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Create several more data sets. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 6, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Open the file read write.
+ *
+ * Verify that the file does not contain a cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 15) Verify the data sets exist and contain the expected data.
+ *
+ * Verify that a cache image was not loaded.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 0, 10);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 16) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 17) Delete the file */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* tidy up */
+ if ( fapl_id != -1 )
+ H5Pclose(fapl_id);
+
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* cache_image_api_error_check_4() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: get_free_sections_test()
+ *
+ * Purpose: It is possible that H5Fget_free_sections() to be
+ * called before any activity on the metadata cache.
+ * This is a potential problem, as satisfying the
+ * H5Fget_free_sections() call requires access to all
+ * free space managers. When persistant free space
+ * managers are enabled, this will require calling
+ * H5MF_tidy_self_referential_fsm_hack(). This is a
+ * non issue in the absence of a cache image. However,
+ * this is a problem if a cache image exists, as
+ * the call to H5MF_tidy_self_referential_fsm_hack()
+ * will free the file space allocated to the cache
+ * image.
+ *
+ * The objective of this test is to create a test file
+ * with both non-empty self referential presistant
+ * free space managers, and a cache image, and then
+ * verify that this situation is handled correctly if
+ * H5Fget_free_sections() is called before the metadata
+ * cache image is loaded.
+ *
+ * The test is set up as follows:
+ *
+ * 1) Create a HDF5 file with a cache image requested
+ * and persistant free space managers enabled.
+ *
+ * 2) Create some data sets, and then delete some of
+ * of those near the beginning of the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file read only.
+ *
+ * 5) Verify that a cache image exists, and has not
+ * been loaded.
+ *
+ * 6) Verify that one or more self referential FSMs
+ * have been stored at the end of the file just
+ * before the cache image.
+ *
+ * 7) Call H5Fget_free_sections().
+ *
+ * 8) Verify that the cache image has been loaded and
+ * that the self referential FSMs have been floated.
+ *
+ * 9) Verify that the remaining data sets contain the
+ * expected data.
+ *
+ * 10) Close the file.
+ *
+ * 11) Open the file R/W.
+ *
+ * 12) Verify that a cache image exists, and has not
+ * been loaded.
+ *
+ * 13) Verify that one or more self referential FSMs
+ * have been stored at the end of the file just
+ * before the cache image.
+ *
+ * 14) Call H5Fget_free_sections().
+ *
+ * 15) Verify that the cache image has been loaded and
+ * that the self referential FSMs have been floated.
+ *
+ * 16) Verify that the remaining data sets contain the
+ * expected data.
+ *
+ * 17) Delete the remaining data sets.
+ *
+ * 18) Close the file.
+ *
+ * 19) Verify that file space has been reclaimed.
+ *
+ * 20) Discard the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/10/17
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+get_free_sections_test(void)
+{
+ const char * fcn_name = "get_free_sections_test()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ h5_stat_size_t file_size;
+ int cp = 0;
+
+ TESTING("Cache image / H5Fget_free_sections() interaction");
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with a cache image requested
+ * and persistant free space managers enabled.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some data sets, and then delete some of
+ * of those near the beginning of the file.
+ */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 1, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 1, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ delete_datasets(file_id, 1, 5);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed (1).\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file read only. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Verify that a cache image exists, and has not been loaded. */
+
+ if ( pass ) {
+
+ if ( ( ! file_ptr->shared->cache->load_image ) ||
+ ( file_ptr->shared->cache->image_loaded ) ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected cache image status.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Verify that one or more self referential FSMs
+ * have been stored at the end of the file just
+ * before the cache image.
+ */
+
+ if ( pass ) {
+
+ /* file_ptr->shared->first_alloc_dealloc is set to FALSE if the
+ * file is opened R/O.
+ */
+ if ( ( file_ptr->shared->first_alloc_dealloc ) ||
+ ( ! H5F_addr_defined(file_ptr->shared->eoa_pre_fsm_fsalloc) ) ||
+ ( ! H5F_addr_defined(file_ptr->shared->cache->image_addr) ) ||
+ ( H5F_addr_gt(file_ptr->shared->eoa_pre_fsm_fsalloc,
+ file_ptr->shared->cache->image_addr) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected cache image status (1).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Call H5Fget_free_sections(). */
+
+ if ( pass ) {
+
+ if ( H5Fget_free_sections(file_id, H5FD_MEM_DEFAULT, (size_t)0, NULL)
+ < 0 ){
+
+ pass = FALSE;
+ failure_mssg = "H5Fget_free_sections() failed (1).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Verify that the cache image has been loaded and
+ * that the self referential FSMs have been floated.
+ */
+ if ( pass ) {
+
+ if ( ! file_ptr->shared->cache->image_loaded ) {
+
+ pass = FALSE;
+ failure_mssg = "cache image not loaded (1).\n";
+
+ } else if ( file_ptr->shared->first_alloc_dealloc ) {
+
+ pass = FALSE;
+ failure_mssg = "self referential FSMs not floated (1).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Verify that the remaining data sets contain the expected data. */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 6, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed (2).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Open the file R/W. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Verify that a cache image exists, and has not been loaded. */
+
+ if ( pass ) {
+
+ if ( ( ! file_ptr->shared->cache->load_image ) ||
+ ( file_ptr->shared->cache->image_loaded ) ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected cache image status.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Verify that one or more self referential FSMs
+ * have been stored at the end of the file just
+ * before the cache image.
+ */
+ if ( pass ) {
+
+ if ( ( ! file_ptr->shared->first_alloc_dealloc ) ||
+ ( ! H5F_addr_defined(file_ptr->shared->eoa_pre_fsm_fsalloc) ) ||
+ ( ! H5F_addr_defined(file_ptr->shared->cache->image_addr) ) ||
+ ( H5F_addr_gt(file_ptr->shared->eoa_pre_fsm_fsalloc,
+ file_ptr->shared->cache->image_addr) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected cache image status (2).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Call H5Fget_free_sections(). */
+
+ if ( pass ) {
+
+ if ( H5Fget_free_sections(file_id, H5FD_MEM_DEFAULT, (size_t)0, NULL)
+ < 0 ){
+
+ pass = FALSE;
+ failure_mssg = "H5Fget_free_sections() failed (2).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 15) Verify that the cache image has been loaded and
+ * that the self referential FSMs have been floated.
+ */
+ if ( pass ) {
+
+ if ( ! file_ptr->shared->cache->image_loaded ) {
+
+ pass = FALSE;
+ failure_mssg = "cache image not loaded (2).\n";
+
+ } else if ( file_ptr->shared->first_alloc_dealloc ) {
+
+ pass = FALSE;
+ failure_mssg = "self referential FSMs not floated (2).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 16) Verify that the remaining data sets contain the expected data. */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 6, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 17) Delete the remaining data sets. */
+
+ if ( pass ) {
+
+ delete_datasets(file_id, 6, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 18) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed (3).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 19) Verify that file space has been reclaimed. */
+
+ if ( pass ) {
+
+ if((file_size = h5_get_file_size(filename, H5P_DEFAULT)) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "h5_get_file_size() failed.\n";
+
+ } else if ( file_size > 20 * 1024 ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpectedly large file size.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 20) Discard the file. */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+
+} /* get_free_sections_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: evict_on_close_test()
+ *
+ * Purpose: If a file containing a cache image which in turn
+ * contains dirty entries is opened R/O, the metadata
+ * cache must refuse to evict the dirty entries, as
+ * it will not be able to reload them from file. This
+ * is a bit tricky, as the dirty entries must marked as
+ * clean in the metadata cache so that the MDC will not
+ * attempt to flush then on file close.
+ *
+ * The objective of this test is to verify that the
+ * metadata will not evict dirty entries in the above
+ * context when the file is opened with the evict on close
+ * FAPL entry.
+ *
+ * Do this by creating a HDF5 file with a cache image
+ * containing dirty metadata.
+ *
+ * Then close the file, re-open it R/O, and scan its
+ * contents twice. If evict on close succeeds in evicting
+ * the dirty metadata, the second scan will fail, as valid
+ * versions of the dirty metadata will not be available.
+ *
+ * To make the test more useful, enable persistant free
+ * space managers.
+ *
+ * The test is set up as follows:
+ *
+ * 1) Create a HDF5 file without a cache image requested
+ * and persistant free space managers enabled.
+ *
+ * 2) Create some data sets and verify them.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file R/W, and with cache image requested.
+ *
+ * 5) Verify the datasets created in 2) above. This will
+ * force their (clean) metadata into the metadata cache,
+ * and hence into the cache image.
+ *
+ * 6) Create some more datasets.
+ *
+ * 7) Close the file.
+ *
+ * 8) Open the file R/O and with evict on close enabled.
+ *
+ * 9) Verify all datasets twice.
+ *
+ * 10) Close the file.
+ *
+ * 11) Open the file R/W and with evict on close enabled.
+ *
+ * 12) Verify all datasets twice.
+ *
+ * 13) Close the file.
+ *
+ * 14) Discard the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/23/17
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+evict_on_close_test(void)
+{
+#ifndef H5_HAVE_PARALLEL
+ const char * fcn_name = "evict_on_close_test()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hbool_t verbose = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+#endif /* H5_HAVE_PARALLEL */
+
+ TESTING("Cache image / evict on close interaction");
+
+#ifdef H5_HAVE_PARALLEL
+ SKIPPED();
+ HDputs(" EoC not supported in the parallel library.");
+ return 0;
+#else
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[0], H5P_DEFAULT, filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file without a cache image requested
+ * and persistant free space managers enabled.
+ */
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ TRUE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create some data sets and verify them. */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 1, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 1, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed (1).\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file R/W, and with cache image requested. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Verify the datasets created in 2) above. This will
+ * force their (clean) metadata into the metadata cache,
+ * and hence into the cache image.
+ */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 1, 10);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Create some more datasets and verify them */
+
+ if ( pass ) {
+
+ create_datasets(file_id, 11, 20);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 11, 20);
+ }
+
+ if ( verbose ) {
+
+ HDassert(cache_ptr);
+ HDassert(cache_ptr->magic == H5C__H5C_T_MAGIC);
+
+ HDfprintf(stdout, "index size / index dirty size = %lld / %lld\n",
+ (long long)(cache_ptr->index_size),
+ (long long)(cache_ptr->dirty_index_size));
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed (2).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open the file R/O and with evict on close enabled. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ TRUE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s*: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Verify all datasets twice */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 1, 20);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 1, 20);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed (3).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Open the file R/w and with evict on close enabled. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* set_eoc */ TRUE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s*: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Verify all datasets twice */
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 1, 20);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ verify_datasets(file_id, 1, 20);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed (3).\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Discard the file. */
+
+ if ( pass ) {
+
+ if ( HDremove(filename) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) { PASSED(); } else { H5_FAILED(); }
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\".\n",
+ FUNC, failure_mssg);
+
+ return !pass;
+#endif /* H5_HAVE_PARALLEL */
+
+} /* evict_on_close_test() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Run tests on the cache code contained in H5C.c
+ *
+ * Return: Success:
+ *
+ * Failure:
+ *
+ * Programmer: John Mainzer
+ * 6/24/04
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ unsigned nerrs = 0;
+ int express_test;
+
+ H5open();
+
+ express_test = GetTestExpress();
+
+ printf("=========================================\n");
+ printf("Cache image tests\n");
+ printf(" express_test = %d\n", express_test);
+ printf("=========================================\n");
+
+ nerrs += check_cache_image_ctl_flow_1();
+ nerrs += check_cache_image_ctl_flow_2();
+ nerrs += check_cache_image_ctl_flow_3();
+ nerrs += check_cache_image_ctl_flow_4();
+ nerrs += check_cache_image_ctl_flow_5();
+ nerrs += check_cache_image_ctl_flow_6();
+
+ nerrs += cache_image_smoke_check_1();
+ nerrs += cache_image_smoke_check_2();
+ nerrs += cache_image_smoke_check_3();
+ nerrs += cache_image_smoke_check_4();
+ nerrs += cache_image_smoke_check_5();
+ nerrs += cache_image_smoke_check_6();
+
+ nerrs += cache_image_api_error_check_1();
+ nerrs += cache_image_api_error_check_2();
+ nerrs += cache_image_api_error_check_3();
+ nerrs += cache_image_api_error_check_4();
+
+ nerrs += get_free_sections_test();
+ nerrs += evict_on_close_test();
+
+ return(nerrs > 0);
+
+} /* main() */
+
diff --git a/test/cache_logging.c b/test/cache_logging.c
index 9190a8a..b4ed53b 100644
--- a/test/cache_logging.c
+++ b/test/cache_logging.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Purpose: Tests the metadata cache logging framework */
diff --git a/test/cache_tagging.c b/test/cache_tagging.c
index 473851e..99ab49c 100644
--- a/test/cache_tagging.c
+++ b/test/cache_tagging.c
@@ -1,14 +1,13 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Mike McGreevy
@@ -21,11 +20,8 @@
#include "H5Fpkg.h"
#include "testhdf5.h"
-#include "h5test.h"
#include "cache_common.h"
-#include "H5Iprivate.h"
-#include "H5ACprivate.h"
#include "H5HLprivate.h"
/* ============ */
@@ -56,13 +52,16 @@
/* ===================== */
/* Helper Functions */
+#ifndef NDEBUG
static int dump_cache(hid_t fid);
+#endif /* NDEBUG */ /* end debugging functions */
static int verify_no_unknown_tags(hid_t fid);
static int mark_all_entries_investigated(hid_t fid);
static int reset_all_entries_investigated(hid_t fid);
static int verify_tag(hid_t fid, int id, haddr_t tag);
static int get_object_header_tag(hid_t loc_id, haddr_t *tag);
static int get_sbe_tag(hid_t fid, haddr_t *tag);
+
/* Tests */
static unsigned check_file_creation_tags(hid_t fcpl_id, int type);
static unsigned check_file_open_tags(hid_t fcpl, int type);
@@ -74,7 +73,6 @@ static unsigned check_attribute_rename_tags(hid_t fcpl, int type);
static unsigned check_dataset_creation_tags(hid_t fcpl, int type);
static unsigned check_dataset_creation_earlyalloc_tags(hid_t fcpl, int type);
static unsigned check_link_removal_tags(hid_t fcpl, int type);
-
static unsigned check_group_creation_tags(void);
static unsigned check_multi_group_creation_tags(void);
static unsigned check_group_open_tags(void);
@@ -98,6 +96,7 @@ static unsigned check_invalid_tag_application(void);
/* ================ */
+#ifndef NDEBUG
/*-------------------------------------------------------------------------
* Function: dump_cache()
@@ -131,6 +130,7 @@ static int dump_cache(hid_t fid)
error:
return -1;
} /* dump_cache */
+#endif /* NDEBUG */ /* end debugging functions */
/*-------------------------------------------------------------------------
@@ -448,8 +448,10 @@ check_file_creation_tags(hid_t fcpl_id, int type)
/* Create a test file with provided fcpl_t */
if ( (fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC, fcpl_id, H5P_DEFAULT)) < 0 ) TEST_ERROR;
+#ifndef NDEBUG
/* if verbose, print cache index to screen before verification . */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify there is a superblock entry with superblock tag */
if ( verify_tag(fid, H5AC_SUPERBLOCK_ID, H5AC__SUPERBLOCK_TAG) < 0 ) TEST_ERROR;
@@ -554,8 +556,10 @@ check_file_open_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen before verification . */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify there is a superblock entry with superblock tag. */
if ( verify_tag(fid, H5AC_SUPERBLOCK_ID, H5AC__SUPERBLOCK_TAG) < 0 ) TEST_ERROR;
@@ -656,8 +660,10 @@ check_group_creation_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -759,8 +765,10 @@ check_multi_group_creation_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify there is an object header for each group */
for (i = 0; i < MULTIGROUPS; i++) {
@@ -891,8 +899,10 @@ check_link_iteration_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -956,9 +966,9 @@ check_dense_attribute_tags(void)
int verbose = FALSE; /* verbose file outout */
int i = 0; /* iterator */
hid_t fapl = -1; /* File access property list */
- haddr_t d_tag = 0; /* Dataset tag value */
- haddr_t root_tag = 0; /* Root group tag value */
- char attrname[500]; /* Name of attribute */
+ haddr_t d_tag = 0; /* Dataset tag value */
+ haddr_t root_tag = 0; /* Root group tag value */
+ char attrname[500]; /* Name of attribute */
/* Testing Macro */
TESTING("tag application during dense attribute manipulation");
@@ -1008,8 +1018,10 @@ check_dense_attribute_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify free space header and section info */
if ( verify_tag(fid, H5AC_FSPACE_SINFO_ID, d_tag) < 0 ) TEST_ERROR;
@@ -1064,8 +1076,10 @@ check_dense_attribute_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify object header belonging to dataset */
if ( verify_tag(fid, H5AC_OHDR_ID, d_tag) < 0 ) TEST_ERROR;
@@ -1172,8 +1186,10 @@ check_group_open_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -1276,8 +1292,10 @@ check_attribute_creation_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify object header belonging to group */
if ( verify_tag(fid, H5AC_OHDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -1410,8 +1428,10 @@ check_attribute_open_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify object header belonging to group */
if ( verify_tag(fid, H5AC_OHDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -1493,6 +1513,7 @@ check_attribute_rename_tags(hid_t fcpl, int type)
haddr_t g_tag = 0;
hsize_t dims1[2] = {DIMS, DIMS}; /* dimensions */
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dimensions */
+ hbool_t persistant_fsms = FALSE;
/* Testing Macro */
TESTING("tag application during attribute renaming");
@@ -1501,6 +1522,10 @@ check_attribute_rename_tags(hid_t fcpl, int type)
/* Setup */
/* ===== */
+ /* check to see if the FCPL specified persistant free space managers */
+ if(H5Pget_file_space_strategy(fcpl, NULL, &persistant_fsms, NULL) < 0)
+ TEST_ERROR;
+
/* Allocate array */
if ( (NULL == (data = (int *)HDcalloc(DIMS * DIMS, sizeof(int)))) ) TEST_ERROR;
@@ -1536,6 +1561,7 @@ check_attribute_rename_tags(hid_t fcpl, int type)
/* Close and Reopen the file and group */
if ( H5Gclose(gid) < 0 ) TEST_ERROR;
if ( H5Fclose(fid) < 0 ) TEST_ERROR;
+
if ( (fid = H5Fopen(FILENAME, H5F_ACC_RDWR, H5P_DEFAULT)) < 0 ) TEST_ERROR;
if ( (gid = H5Gopen2(fid, GROUPNAME, H5P_DEFAULT)) < 0 ) TEST_ERROR;
@@ -1552,8 +1578,10 @@ check_attribute_rename_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -1588,9 +1616,18 @@ check_attribute_rename_tags(hid_t fcpl, int type)
* 3 calls to verify_tag() for verifying free space:
* one freespace header tag for H5FD_MEM_DRAW manager,
* one freespace header tag for H5FD_MEM_SUPER manager
+ * one freespace section info tag for H5FD_MEM_SUPER manager
*/
if ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) TEST_ERROR;
- if ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) TEST_ERROR;
+
+ /* If the free space managers are persistant, the
+ * H5MF_tidy_self_referential_fsm_hack() must have been run.
+ * Since this function floats all self referential free space
+ * managers, the H5FD_MEM_SUPER FSM will not be in the metadata
+ * cache.
+ */
+ if(!persistant_fsms && verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR;
+ if(!persistant_fsms && verify_tag(fid, H5AC_FSPACE_SINFO_ID, H5AC__FREESPACE_TAG) < 0) TEST_ERROR;
/* verify btree header and leaf node belonging to group */
if ( verify_tag(fid, H5AC_BT2_HDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -1655,6 +1692,7 @@ check_attribute_delete_tags(hid_t fcpl, int type)
haddr_t g_tag = 0;
hsize_t dims1[2] = {DIMS, DIMS}; /* dimensions */
hsize_t maxdims[2] = {H5S_UNLIMITED, H5S_UNLIMITED}; /* dimensions */
+ hbool_t persistant_fsms = FALSE;
/* Testing Macro */
TESTING("tag application during attribute delete");
@@ -1663,6 +1701,10 @@ check_attribute_delete_tags(hid_t fcpl, int type)
/* Setup */
/* ===== */
+ /* check to see if the FCPL specified persistant free space managers */
+ if ( H5Pget_file_space_strategy(fcpl, NULL, &persistant_fsms, NULL) < 0 )
+ TEST_ERROR;
+
/* Allocate array */
if ( (NULL == (data = (int *)HDcalloc(DIMS * DIMS, sizeof(int)))) ) TEST_ERROR;
@@ -1714,8 +1756,10 @@ check_attribute_delete_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify object header belonging to group */
if ( verify_tag(fid, H5AC_OHDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -1726,12 +1770,26 @@ check_attribute_delete_tags(hid_t fcpl, int type)
if ( verify_tag(fid, H5AC_SOHM_TABLE_ID, H5AC__SOHM_TAG) < 0 ) TEST_ERROR;
/*
- * 2 calls to verify_tag() for verifying free space:
- * one freespace header tag for H5FD_MEM_DRAW manager,
- * one freespace header tag for H5FD_MEM_SUPER manager
- */
- if ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) TEST_ERROR;
- if ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) TEST_ERROR;
+ * 2 calls to verify_tag() for verifying free space:
+ * one freespace header tag for free-space header,
+ * one freespace header tag for free-space section info
+ */
+ if ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 )
+ TEST_ERROR;
+ if ( verify_tag(fid, H5AC_FSPACE_SINFO_ID, H5AC__FREESPACE_TAG) < 0 )
+ TEST_ERROR;
+
+#if 0
+ /* If the free space managers are persistant, the
+ * H5MF_tidy_self_referential_fsm_hack() must have been run.
+ * Since this function floats all self referential free space
+ * managers, the H5FD_MEM_SUPER FSM will not be in the metadata
+ * cache.
+ */
+ if ( ( ! persistant_fsms ) &&
+ ( verify_tag(fid, H5AC_FSPACE_HDR_ID, H5AC__FREESPACE_TAG) < 0 ) )
+ TEST_ERROR;
+#endif
} /* end if */
@@ -1836,8 +1894,10 @@ check_dataset_creation_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -1961,8 +2021,10 @@ check_dataset_creation_earlyalloc_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -2099,8 +2161,10 @@ check_dataset_open_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -2230,8 +2294,10 @@ check_dataset_write_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify 10 b-tree nodes belonging to dataset */
for (i=0; i<10; i++)
@@ -2353,8 +2419,10 @@ check_attribute_write_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify object header of group */
if ( verify_tag(fid, H5AC_OHDR_ID, g_tag) < 0 ) TEST_ERROR;
@@ -2508,8 +2576,10 @@ check_dataset_read_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify 19 b-tree nodes belonging to dataset */
for (i=0; i<19; i++)
@@ -2638,8 +2708,10 @@ check_dataset_size_retrieval(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify 19 b-tree nodes belonging to dataset */
for (i=0; i<19; i++)
@@ -2771,8 +2843,10 @@ check_dataset_extend_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, d_tag) < 0 ) TEST_ERROR;
@@ -2868,8 +2942,10 @@ check_object_info_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -2974,8 +3050,10 @@ check_object_copy_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3124,8 +3202,10 @@ check_link_removal_tags(hid_t fcpl, int type)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3273,8 +3353,10 @@ check_link_getname_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group's tagged metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3373,8 +3455,10 @@ check_external_link_creation_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* Verify root group metadata */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3480,8 +3564,10 @@ check_external_link_open_tags(void)
/* Verification of Metadata Tag Values */
/* =================================== */
+#ifndef NDEBUG
/* if verbose, print cache index to screen for visual verification */
if ( verbose ) dump_cache(fid);
+#endif /* NDEBUG */ /* end debugging functions */
/* verify tag value of first file's root group */
if ( verify_tag(fid, H5AC_OHDR_ID, root_tag) < 0 ) TEST_ERROR;
@@ -3645,7 +3731,7 @@ main(void)
fcpl_shmesg_all = H5Pcreate(H5P_FILE_CREATE);
H5Pset_shared_mesg_nindexes(fcpl_shmesg_all, 1);
H5Pset_shared_mesg_index(fcpl_shmesg_all, 0, H5O_SHMESG_ALL_FLAG, 20);
- H5Pset_file_space(fcpl_shmesg_all, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0);
+ H5Pset_file_space_strategy(fcpl_shmesg_all, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)0);
/* ========= */
/* Run Tests */
diff --git a/test/chunk_info.c b/test/chunk_info.c
index 68bf774..1f19963 100644
--- a/test/chunk_info.c
+++ b/test/chunk_info.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/cmpd_dset.c b/test/cmpd_dset.c
index b44a847..5d6cfc7 100644
--- a/test/cmpd_dset.c
+++ b/test/cmpd_dset.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/cork.c b/test/cork.c
index 6cca163..8c685ee 100644
--- a/test/cork.c
+++ b/test/cork.c
@@ -1,14 +1,13 @@
/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
* All rights reserved. *
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Vailin Choi
diff --git a/test/cross_read.c b/test/cross_read.c
index 8df81f0..5d5ef07 100644
--- a/test/cross_read.c
+++ b/test/cross_read.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/dangle.c b/test/dangle.c
index 4c8ce6b..1717d12 100644
--- a/test/dangle.c
+++ b/test/dangle.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/dsets.c b/test/dsets.c
index d086c58..ba0ad82 100644
--- a/test/dsets.c
+++ b/test/dsets.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -19,10 +17,6 @@
*
* Purpose: Tests the dataset interface (H5D)
*/
-
-#define H5D_FRIEND /*suppress error about including H5Dpkg */
-#define H5D_TESTING
-
#define H5FD_FRIEND /*suppress error about including H5FDpkg */
#define H5FD_TESTING
@@ -30,7 +24,6 @@
#include "h5test.h"
#include "H5srcdir.h"
-#include "H5Dpkg.h"
#include "H5FDpkg.h"
#include "H5VMprivate.h"
#include "H5Zpkg.h"
@@ -507,7 +500,7 @@ test_simple_io(const char *env_h5_drvr, hid_t fapl)
TESTING("simple I/O");
- /* Can't run this test with multi-file VFDs */
+ /* Can't run this test with multi-file VFDs because of HDopen/read/seek the file directly */
if(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi") && HDstrcmp(env_h5_drvr, "family")) {
h5_fixname(FILENAME[4], fapl, filename, sizeof filename);
@@ -633,7 +626,7 @@ error:
*-------------------------------------------------------------------------
*/
static herr_t
-test_userblock_offset(const char *env_h5_drvr, hid_t fapl)
+test_userblock_offset(const char *env_h5_drvr, hid_t fapl, hbool_t new_format)
{
char filename[FILENAME_BUF_SIZE];
hid_t file = -1, fcpl = -1, dataset = -1, space = -1;
@@ -645,12 +638,15 @@ test_userblock_offset(const char *env_h5_drvr, hid_t fapl)
TESTING("dataset offset with user block");
- /* Can't run this test with multi-file VFDs */
+ /* Can't run this test with multi-file VFDs because of HDopen/read/seek the file directly */
if(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi") && HDstrcmp(env_h5_drvr, "family")) {
h5_fixname(FILENAME[2], fapl, filename, sizeof filename);
if((fcpl=H5Pcreate(H5P_FILE_CREATE)) < 0) goto error;
if(H5Pset_userblock(fcpl, (hsize_t)USER_BLOCK) < 0) goto error;
+ if(new_format)
+ if(H5Pset_file_space_page_size(fcpl, (hsize_t)USER_BLOCK) < 0)
+ goto error;
if((file=H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
goto error;
@@ -1984,10 +1980,11 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl, int if_fletcher32,
}
}
- PASSED();
-
/* Get the storage size of the dataset */
if((*dset_size=H5Dget_storage_size(dataset))==0) goto error;
+
+ PASSED();
+
/* Clean up objects used for this test */
if(H5Dclose (dataset) < 0) goto error;
if(H5Sclose (sid) < 0) goto error;
@@ -2932,6 +2929,7 @@ test_nbit_int(hid_t file)
PASSED();
return 0;
+
error:
return -1;
}
@@ -12634,7 +12632,123 @@ error:
} /* dls_01_main() */
+/*-------------------------------------------------------------------------
+ * Function: test_compact_open_close_dirty
+ *
+ * Purpose: Verify that the two issues reported in HDFFV-10051 are fixed:
+ * (1) Repeated open/close of a compact dataset fails due to the
+ * increment of ndims in the dataset structure for every open.
+ * (2) layout "dirty" flag for a compact dataset is not reset
+ * properly after flushing the data at dataset close.
+ * The test for issue #1 is based on compactoc.c attached
+ * to the jira issue HDFFV-10051
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ * Programmer: Vailin Choi; April 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_compact_open_close_dirty(hid_t fapl)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hid_t dcpl = -1; /* Dataset creation property list */
+ hsize_t dims[1] = {10}; /* Dimension */
+ int wbuf[10]; /* Data buffer */
+ char filename[FILENAME_BUF_SIZE]; /* Filename */
+ int i; /* Local index variable */
+ hbool_t dirty; /* The dirty flag */
+
+ TESTING("compact dataset repeated open/close and dirty flag");
+
+ /* Create a file */
+ h5_fixname(FILENAME[1], fapl, filename, sizeof filename);
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ TEST_ERROR
+
+ /* Initialize data */
+ for(i = 0; i < 10; i++)
+ wbuf[i] = i;
+
+ /* Create dataspace */
+ if((sid = H5Screate_simple(1, dims, NULL)) < 0)
+ TEST_ERROR
+
+ /* Set compact layout */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_layout(dcpl, H5D_COMPACT) < 0)
+ TEST_ERROR
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0)
+ TEST_ERROR
+
+ /* Create a compact dataset */
+ if((did = H5Dcreate2(fid, DSET_COMPACT_MAX_NAME, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf) < 0)
+ TEST_ERROR
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+
+ /* Verify the repeated open/close of the dataset will not fail */
+ for(i = 0; i < 20;i++) {
+ H5E_BEGIN_TRY {
+ did = H5Dopen2 (fid, DSET_COMPACT_MAX_NAME, H5P_DEFAULT);
+ } H5E_END_TRY;
+ if(did < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ }
+
+ /* Open the dataset */
+ if((did = H5Dopen2(fid, DSET_COMPACT_MAX_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Retrieve the "dirty" flag from the compact dataset layout */
+ if(H5D__layout_compact_dirty_test(did, &dirty) < 0)
+ TEST_ERROR
+
+ /* Verify that the "dirty" flag is false */
+ if(dirty)
+ TEST_ERROR
+
+ /* Close the dataset */
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+
+ /* Close the dataspace */
+ if(H5Sclose(sid) < 0)
+ TEST_ERROR
+
+ /* Close the dataset creation property list */
+ if(H5Pclose(dcpl) < 0)
+ TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+ PASSED();
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Pclose(dcpl);
+ H5Dclose(did);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
+} /* test_compact_open_close_dirty() */
/*-------------------------------------------------------------------------
@@ -12656,18 +12770,24 @@ main(void)
{
char filename[FILENAME_BUF_SIZE];
hid_t file, grp, fapl, fapl2;
+ hid_t fcpl = -1, fcpl2 = -1;
unsigned new_format;
+ unsigned paged;
int mdc_nelmts;
size_t rdcc_nelmts;
size_t rdcc_nbytes;
double rdcc_w0;
int nerrors = 0;
const char *envval;
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
/* Don't run this test using certain file drivers */
envval = HDgetenv("HDF5_DRIVER");
if(envval == NULL)
- envval = "sec2";
+ envval = "nomatch";
+
+ /* Current VFD that does not support contigous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(envval, "split") && HDstrcmp(envval, "multi"));
/* Set the random # seed */
HDsrandom((unsigned)HDtime(NULL));
@@ -12689,101 +12809,137 @@ main(void)
/* Set the "use the latest version of the format" bounds for creating objects in the file */
if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR
+ /* create a file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR
+ if((fcpl2 = H5Pcopy(fcpl)) < 0) TEST_ERROR
+
+ /* Set file space strategy to paged aggregation and persisting free-space */
+ if(H5Pset_file_space_strategy(fcpl2, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) < 0)
+ TEST_ERROR
+
h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
- /* Test with old & new format groups */
- for(new_format = FALSE; new_format <= TRUE; new_format++) {
- hid_t my_fapl;
+ /* Test with paged aggregation enabled or not */
+ for(paged = FALSE; paged <= TRUE; paged++) {
- /* Set the FAPL for the type of format */
- if(new_format) {
- puts("\nTesting with new file format:");
- my_fapl = fapl2;
- } /* end if */
- else {
- puts("Testing with old file format:");
- my_fapl = fapl;
- } /* end else */
+ /* Temporary: skip testing for multi/split drivers:
+ fail file create when persisting free-space or using paged aggregation strategy */
+ if(!contig_addr_vfd && paged)
+ continue;
- /* Create the file for this test */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0)
- goto error;
+ /* Test with old & new format groups */
+ for(new_format = FALSE; new_format <= TRUE; new_format++) {
+ hid_t my_fapl, my_fcpl;
+
+ /* Set the FAPL for the type of format */
+ if(new_format) {
+ my_fapl = fapl2;
+ if(paged) {
+ my_fcpl = fcpl2;
+ puts("\nTesting with new file format and paged aggregation");
+ } else {
+ my_fcpl = fcpl;
+ puts("\nTesting with new file format and non-paged aggregation");
+ }
+ } /* end if */
+ else {
+ my_fapl = fapl;
+ if(paged) {
+ my_fcpl = fcpl2;
+ puts("Testing with old file format and paged aggregation:");
+ } else {
+ my_fcpl = fcpl;
+ puts("Testing with old file format and non-paged aggregation:");
+ }
+ } /* end else */
- /* Cause the library to emit initial messages */
- if((grp = H5Gcreate2(file, "emit diagnostics", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
- goto error;
- if(H5Oset_comment(grp, "Causes diagnostic messages to be emitted") < 0)
- goto error;
- if(H5Gclose(grp) < 0)
- goto error;
+ /* Create the file for this test */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, my_fcpl, my_fapl)) < 0)
+ goto error;
+
+ /* Cause the library to emit initial messages */
+ if((grp = H5Gcreate2(file, "emit diagnostics", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+ if(H5Oset_comment(grp, "Causes diagnostic messages to be emitted") < 0)
+ goto error;
+ if(H5Gclose(grp) < 0)
+ goto error;
+
+ nerrors += (test_create(file) < 0 ? 1 : 0);
+ nerrors += (test_simple_io(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_compact_io(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_max_compact(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_compact_open_close_dirty(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_conv_buffer(file) < 0 ? 1 : 0);
+ nerrors += (test_tconv(file) < 0 ? 1 : 0);
+ nerrors += (test_filters(file, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_onebyte_shuffle(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_int(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_float(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_double(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_array(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_compound(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_compound_2(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_compound_3(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_int_size(file) < 0 ? 1 : 0);
+ nerrors += (test_nbit_flt_size(file) < 0 ? 1 : 0);
+ nerrors += (test_scaleoffset_int(file) < 0 ? 1 : 0);
+ nerrors += (test_scaleoffset_int_2(file) < 0 ? 1 : 0);
+ nerrors += (test_scaleoffset_float(file) < 0 ? 1 : 0);
+ nerrors += (test_scaleoffset_float_2(file) < 0 ? 1 : 0);
+ nerrors += (test_scaleoffset_double(file) < 0 ? 1 : 0);
+ nerrors += (test_scaleoffset_double_2(file) < 0 ? 1 : 0);
+ nerrors += (test_multiopen (file) < 0 ? 1 : 0);
+ nerrors += (test_types(file) < 0 ? 1 : 0);
+ nerrors += (test_userblock_offset(envval, my_fapl, new_format) < 0 ? 1 : 0);
+ nerrors += (test_missing_filter(file) < 0 ? 1 : 0);
+ nerrors += (test_can_apply(file) < 0 ? 1 : 0);
+ nerrors += (test_can_apply2(file) < 0 ? 1 : 0);
+ nerrors += (test_set_local(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_can_apply_szip(file) < 0 ? 1 : 0);
+ nerrors += (test_compare_dcpl(file) < 0 ? 1 : 0);
+ nerrors += (test_copy_dcpl(file, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_filter_delete(file) < 0 ? 1 : 0);
+ nerrors += (test_filters_endianess() < 0 ? 1 : 0);
+ nerrors += (test_zero_dims(file) < 0 ? 1 : 0);
+ nerrors += (test_missing_chunk(file) < 0 ? 1 : 0);
+ nerrors += (test_random_chunks(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_create(file) < 0 ? 1 : 0);
- nerrors += (test_simple_io(envval, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_compact_io(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_max_compact(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_conv_buffer(file) < 0 ? 1 : 0);
- nerrors += (test_tconv(file) < 0 ? 1 : 0);
- nerrors += (test_filters(file, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_onebyte_shuffle(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_int(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_float(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_double(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_array(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_compound(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_compound_2(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_compound_3(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_int_size(file) < 0 ? 1 : 0);
- nerrors += (test_nbit_flt_size(file) < 0 ? 1 : 0);
- nerrors += (test_scaleoffset_int(file) < 0 ? 1 : 0);
- nerrors += (test_scaleoffset_int_2(file) < 0 ? 1 : 0);
- nerrors += (test_scaleoffset_float(file) < 0 ? 1 : 0);
- nerrors += (test_scaleoffset_float_2(file) < 0 ? 1 : 0);
- nerrors += (test_scaleoffset_double(file) < 0 ? 1 : 0);
- nerrors += (test_scaleoffset_double_2(file) < 0 ? 1 : 0);
- nerrors += (test_multiopen (file) < 0 ? 1 : 0);
- nerrors += (test_types(file) < 0 ? 1 : 0);
- nerrors += (test_userblock_offset(envval, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_missing_filter(file) < 0 ? 1 : 0);
- nerrors += (test_can_apply(file) < 0 ? 1 : 0);
- nerrors += (test_can_apply2(file) < 0 ? 1 : 0);
- nerrors += (test_set_local(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_can_apply_szip(file) < 0 ? 1 : 0);
- nerrors += (test_compare_dcpl(file) < 0 ? 1 : 0);
- nerrors += (test_copy_dcpl(file, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_filter_delete(file) < 0 ? 1 : 0);
- nerrors += (test_filters_endianess() < 0 ? 1 : 0);
- nerrors += (test_zero_dims(file) < 0 ? 1 : 0);
- nerrors += (test_missing_chunk(file) < 0 ? 1 : 0);
- nerrors += (test_random_chunks(my_fapl) < 0 ? 1 : 0);
#ifndef H5_NO_DEPRECATED_SYMBOLS
- nerrors += (test_deprec(file) < 0 ? 1 : 0);
+ nerrors += (test_deprec(file) < 0 ? 1 : 0);
#endif /* H5_NO_DEPRECATED_SYMBOLS */
- nerrors += (test_huge_chunks(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_chunk_cache(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_chunk_fast(envval, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_reopen_chunk_fast(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_chunk_fast_bug1(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_fixed_array(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_idx_compatible() < 0 ? 1 : 0);
- nerrors += (test_unfiltered_edge_chunks(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_single_chunk(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0);
- nerrors += (test_swmr_non_latest(envval, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_earray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_farray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_bt2_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
- nerrors += (test_storage_size(my_fapl) < 0 ? 1 : 0);
-
- if(H5Fclose(file) < 0)
- goto error;
- } /* end for */
- /* Close 2nd FAPL */
+ nerrors += (test_huge_chunks(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_chunk_cache(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_big_chunks_bypass_cache(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_chunk_fast(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_reopen_chunk_fast(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_chunk_fast_bug1(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_chunk_expand(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_layout_extend(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_fixed_array(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_idx_compatible() < 0 ? 1 : 0);
+ nerrors += (test_unfiltered_edge_chunks(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_single_chunk(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_large_chunk_shrink(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_zero_dim_dset(my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_storage_size(my_fapl) < 0 ? 1 : 0);
+
+ nerrors += (test_swmr_non_latest(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_earray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_farray_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
+ nerrors += (test_bt2_hdr_fd(envval, my_fapl) < 0 ? 1 : 0);
+
+ if(H5Fclose(file) < 0)
+ goto error;
+ } /* end for new_format */
+ } /* end for paged */
+
+ /* Close property lists */
if(H5Pclose(fapl2) < 0) TEST_ERROR
+ if(H5Pclose(fcpl) < 0) TEST_ERROR
+ if(H5Pclose(fcpl2) < 0) TEST_ERROR
/* Tests that do not use files */
nerrors += (test_scatter() < 0 ? 1 : 0);
diff --git a/test/dt_arith.c b/test/dt_arith.c
index c5c14c87..c7f2986 100644
--- a/test/dt_arith.c
+++ b/test/dt_arith.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/dtransform.c b/test/dtransform.c
index 2769771..0381bb8 100644
--- a/test/dtransform.c
+++ b/test/dtransform.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5test.h"
diff --git a/test/dtypes.c b/test/dtypes.c
index c3a8066..7e8047d 100644
--- a/test/dtypes.c
+++ b/test/dtypes.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/dynlib1.c b/test/dynlib1.c
index 1ccc33a..e9137fb 100644
--- a/test/dynlib1.c
+++ b/test/dynlib1.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Programmer: Raymond Lu
diff --git a/test/dynlib2.c b/test/dynlib2.c
index 0d8be2b..2574d4d 100644
--- a/test/dynlib2.c
+++ b/test/dynlib2.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Programmer: Raymond Lu
diff --git a/test/dynlib3.c b/test/dynlib3.c
index e509a52..8871321 100644
--- a/test/dynlib3.c
+++ b/test/dynlib3.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Programmer: Raymond Lu
diff --git a/test/dynlib4.c b/test/dynlib4.c
index 8da0270..06d90ff 100644
--- a/test/dynlib4.c
+++ b/test/dynlib4.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Purpose: Tests the plugin module (H5PL)
diff --git a/test/earray.c b/test/earray.c
index 07acbb5..1058565 100644
--- a/test/earray.c
+++ b/test/earray.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@hdfgroup.org>
@@ -159,15 +157,6 @@ typedef struct earray_test_t {
/* Local prototypes */
-/* Metadata cache (H5AC) callbacks */
-static herr_t earray_cache_test_get_initial_load_size(void *udata, size_t *image_len);
-static void *earray_cache_test_deserialize(const void *image_ptr, size_t len,
- void *udata_ptr, hbool_t *dirty_ptr);
-static herr_t earray_cache_test_image_len(const void *thing, size_t *image_len_ptr);
-static herr_t earray_cache_test_serialize(const H5F_t *f, void *image_ptr,
- size_t len, void *thing);
-static herr_t earray_cache_test_free_icr(void *thing);
-
/* Local variables */
const char *FILENAME[] = {
@@ -182,24 +171,6 @@ char filename_g[EARRAY_FILENAME_LEN];
/* Empty file size */
h5_stat_size_t empty_size_g;
-/* H5EA test object inherits cache-like properties from H5AC */
-const H5AC_class_t H5AC_EARRAY_TEST[1] = {{
- /* id */ H5AC_TEST_ID,
- /* name */ "earray test",
- /* mem_type */ H5FD_MEM_DEFAULT,
- /* flags */ H5AC__CLASS_SKIP_READS | H5AC__CLASS_SKIP_WRITES,
- /* get_initial_load_size */ earray_cache_test_get_initial_load_size,
- /* get_final_load_size */ NULL,
- /* verify_chksum */ NULL,
- /* deserialize */ earray_cache_test_deserialize,
- /* image_len */ earray_cache_test_image_len,
- /* pre_serialize */ NULL,
- /* serialize */ earray_cache_test_serialize,
- /* notify */ NULL,
- /* free_icr */ earray_cache_test_free_icr,
- /* fsf_size */ NULL,
-}};
-
/*-------------------------------------------------------------------------
* Function: init_cparam
@@ -619,225 +590,6 @@ error:
/*-------------------------------------------------------------------------
- * Function: earray_cache_test_get_initial_load_size()
- *
- * Purpose: place holder function -- should never be called
- *
- * A generic discussion of metadata cache callbacks of this type
- * may be found in H5Cprivate.h.
- *
- * Return: Success: SUCCEED
- * Failure: FAIL
- *
- * Programmer: John Mainzer
- * 8/2/14
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-earray_cache_test_get_initial_load_size( void *udata, size_t *image_len)
-{
- HDassert(udata);
- HDassert(image_len);
-
- /* Should never be called */
- HDassert(0 && "Can't be called!");
-
- *image_len = 0;
-
- return(SUCCEED);
-} /* end earray_cache_test_get_initial_load_size() */
-
-
-/*-------------------------------------------------------------------------
- * Function: earray_cache_test_deserialize
- *
- * Purpose: place holder function -- should never be called.
- *
- *
- * A generic discussion of metadata cache callbacks of this type
- * may be found in H5Cprivate.h:
- *
- * Return: Success: Pointer to in core representation
- * Failure: NULL
- *
- * Programmer: John Mainzer
- * 8/2/14
- *
- *-------------------------------------------------------------------------
- */
-static void *
-earray_cache_test_deserialize(const void *image_ptr,
- size_t len,
- void *udata_ptr,
- hbool_t *dirty_ptr)
-{
- HDassert(image_ptr);
- HDassert(len > 0 );
- HDassert(udata_ptr);
- HDassert(dirty_ptr);
-
- /* Should never be called */
- HDassert(0 && "Can't be called!");
-
- return(NULL);
-} /* end earray_cache_test_deserialize() */
-
-
-/*-------------------------------------------------------------------------
- * Function: earray_cache_test_image_len
- *
- * Purpose: test code place holder function -- just set *image_len_ptr to
- * one.
- *
- *
- * A generic discussion of metadata cache callbacks of this type
- * may be found in H5Cprivate.h:
- *
- * Return: Success: SUCCEED
- * Failure: FAIL
- *
- * Programmer: John Mainzer
- * 8/2/14
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-earray_cache_test_image_len(const void *thing, size_t *image_len_ptr)
-{
- HDassert(thing);
- HDassert(image_len_ptr);
-
- /* Set size value */
- /* (hard-code to 1) */
- *image_len_ptr = 1;
-
- return(SUCCEED);
-} /* end earray_cache_test_image_len() */
-
-
-
-/*-------------------------------------------------------------------------
- * Function: earray_cache_test_serialize
- *
- * Purpose: Validate the contents of the instance of earray_test_t.
- *
- *
- * A generic discussion of metadata cache callbacks of this type
- * may be found in H5Cprivate.h:
- *
- *
- * Return: Success: SUCCEED
- * Failure: FAIL
- *
- * Programmer: John Mainzer
- * 8/2/14
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-earray_cache_test_serialize(const H5F_t *f,
- void *image_ptr,
- H5_ATTR_UNUSED size_t len,
- void *thing)
-{
- earray_test_t *test;
-
- HDassert(f);
- HDassert(image_ptr);
- HDassert(thing);
- test = (earray_test_t *)thing;
- HDassert(test);
- HDassert(test->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
- HDassert((const H5AC_class_t *)(test->cache_info.type) ==
- &(H5AC_EARRAY_TEST[0]));
-
- /* Check for out of order flush */
- if(test->fd_info->base_obj)
- TEST_ERROR
-
- /* Check which index this entry corresponds to */
- if((uint64_t)0 == test->idx) {
- /* Check for out of order flush */
- if(test->fd_info->idx0_obj || test->fd_info->idx0_elem)
- TEST_ERROR
-
- /* Set flag for object flush */
- test->fd_info->idx0_obj = TRUE;
- } /* end if */
- else if((uint64_t)1 == test->idx) {
- /* Check for out of order flush */
- if(test->fd_info->idx1_obj || test->fd_info->idx1_elem)
- TEST_ERROR
-
- /* Set flag for object flush */
- test->fd_info->idx1_obj = TRUE;
- } /* end if */
- else if((uint64_t)10000 == test->idx) {
- /* Check for out of order flush */
- if(test->fd_info->idx10000_obj || test->fd_info->idx10000_elem)
- TEST_ERROR
-
- /* Set flag for object flush */
- test->fd_info->idx10000_obj = TRUE;
- } /* end if */
- else if((uint64_t)-1 == test->idx) {
- /* Set flag for object flush */
- test->fd_info->base_obj = TRUE;
- } /* end if */
-
- return(SUCCEED);
-
-error:
- return(FAIL);
-} /* end earray_cache_test_serialize() */
-
-
-
-/*-------------------------------------------------------------------------
- * Function: earray_cache_test_free_icr
- *
- * Purpose: Destroy an extensible array test object in memory.
- *
- *
- * A generic discussion of metadata cache callbacks of this type
- * may be found in H5Cprivate.h:
- *
- *
- * Return: Success: SUCCEED
- * Failure: FAIL
- *
- * Programmer: John Mainzer
- * 8/2/14
- *
- *-------------------------------------------------------------------------
- */
-static herr_t
-earray_cache_test_free_icr(void *thing)
-{
- earray_test_t *test;
-
- HDassert(thing);
- test = (earray_test_t *)thing;
- HDassert(test);
-
- /* the metadata cache sets cache_info.magic to
- * H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC before calling the
- * free_icr routine. Hence the following assert:
- */
-
- HDassert(test->cache_info.magic == H5C__H5C_CACHE_ENTRY_T_BAD_MAGIC);
- HDassert((const H5AC_class_t *)(test->cache_info.type) ==
- &(H5AC_EARRAY_TEST[0]));
-
- /* Free the shared info itself */
- HDfree(test);
-
- return(SUCCEED);
-} /* end earray_cache_test_free_icr() */
-
-
-/*-------------------------------------------------------------------------
* Function: test_create
*
* Purpose: Test creating extensible array
diff --git a/test/efc.c b/test/efc.c
index 57a600d..bd008c8 100644
--- a/test/efc.c
+++ b/test/efc.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Neil Fortner
diff --git a/test/enc_dec_plist.c b/test/enc_dec_plist.c
index 4f0147e..36db2d0 100644
--- a/test/enc_dec_plist.c
+++ b/test/enc_dec_plist.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -117,6 +115,11 @@ main(void)
0.2f,
(256 * 2048),
H5AC__DEFAULT_METADATA_WRITE_STRATEGY};
+ H5AC_cache_image_config_t my_cache_image_config = {
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ -1};
if(VERBOSE_MED)
printf("Encode/Decode DCPLs\n");
@@ -455,6 +458,8 @@ main(void)
FAIL_STACK_ERROR
if((H5Pset_mdc_config(fapl, &my_cache_config)) < 0)
FAIL_STACK_ERROR
+ if((H5Pset_mdc_image_config(fapl, &my_cache_image_config)) < 0)
+ FAIL_STACK_ERROR
if((H5Pset_core_write_tracking(fapl, TRUE, 1024 * 1024)) < 0)
FAIL_STACK_ERROR
diff --git a/test/enc_dec_plist_cross_platform.c b/test/enc_dec_plist_cross_platform.c
index a5d5552..5511828 100644
--- a/test/enc_dec_plist_cross_platform.c
+++ b/test/enc_dec_plist_cross_platform.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/enum.c b/test/enum.c
index 29b702d..4e20713 100644
--- a/test/enum.c
+++ b/test/enum.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/err_compat.c b/test/err_compat.c
index 7779ddc..eee150b 100644
--- a/test/err_compat.c
+++ b/test/err_compat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/error_test.c b/test/error_test.c
index b150656..9c39065 100644
--- a/test/error_test.c
+++ b/test/error_test.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/evict_on_close.c b/test/evict_on_close.c
index e0a7a73..6536837 100644
--- a/test/evict_on_close.c
+++ b/test/evict_on_close.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -39,7 +37,14 @@
#include "H5Gpkg.h"
#include "H5Ipkg.h"
+/* Evict on close is not supported under parallel at this time.
+ * In the meantime, we just run a simple check that EoC can't be
+ * enabled in parallel HDF5.
+ */
+#ifndef H5_HAVE_PARALLEL
+
/* Uncomment to manually inspect cache states */
+/* (Requires debug build of the library) */
/* #define EOC_MANUAL_INSPECTION */
const char *FILENAMES[] = {
@@ -600,7 +605,7 @@ check_group_layout(hid_t fid, const char *group_name)
hid_t gid1 = -1, gid2 = -1; /* group IDs */
H5G_t *grp_ptr = NULL; /* ptr to internal group struct */
haddr_t tag1, tag2; /* MD cache tags for groups */
- int32_t before, during, after; /* cache sizes */
+ uint32_t before, during, after; /* cache sizes */
int i; /* iterator */
/* NOTE: The TESTING() macro is called in main() */
@@ -616,7 +621,7 @@ check_group_layout(hid_t fid, const char *group_name)
HDprintf("\nCACHE BEFORE GROUP OPEN:\n");
if(H5AC_dump_cache(file_ptr) < 0)
TEST_ERROR;
- HDprintf("NUMBER OF CACHE ENTRIES: %d\n", before);
+ HDprintf("NUMBER OF CACHE ENTRIES: %u\n", before);
#endif
/* Open the main group and get its tag */
@@ -658,7 +663,7 @@ check_group_layout(hid_t fid, const char *group_name)
if(H5AC_dump_cache(file_ptr) < 0)
TEST_ERROR;
HDprintf("MAIN GROUP TAG: %#X\n", tag1);
- HDprintf("NUMBER OF CACHE ENTRIES: %d\n", during);
+ HDprintf("NUMBER OF CACHE ENTRIES: %u\n", during);
#endif
/* Close the main group */
@@ -672,7 +677,7 @@ check_group_layout(hid_t fid, const char *group_name)
HDprintf("\nCACHE AFTER CLOSING GROUPS:\n");
if(H5AC_dump_cache(file_ptr) < 0)
TEST_ERROR;
- HDprintf("NUMBER OF CACHE ENTRIES: %d\n", after);
+ HDprintf("NUMBER OF CACHE ENTRIES: %u\n", after);
#endif
/* Ensure that the cache does not contain entries with the tag */
@@ -718,7 +723,7 @@ check_dset_scheme(hid_t fid, const char *dset_name)
H5D_t *dset_ptr = NULL; /* ptr to internal dset struct */
haddr_t tag; /* MD cache tag for dataset */
int *data = NULL; /* buffer for fake data */
- int32_t before, during, after; /* cache sizes */
+ uint32_t before, during, after; /* cache sizes */
/* NOTE: The TESTING() macro is called in main() */
@@ -737,7 +742,7 @@ check_dset_scheme(hid_t fid, const char *dset_name)
HDprintf("\nCACHE BEFORE DATASET OPEN:\n");
if(H5AC_dump_cache(file_ptr) < 0)
TEST_ERROR;
- HDprintf("NUMBER OF CACHE ENTRIES: %d\n", before);
+ HDprintf("NUMBER OF CACHE ENTRIES: %u\n", before);
#endif
/* Open dataset and get the metadata tag */
@@ -761,7 +766,7 @@ check_dset_scheme(hid_t fid, const char *dset_name)
if(H5AC_dump_cache(file_ptr) < 0)
TEST_ERROR;
HDprintf("TAG: %#X\n", tag);
- HDprintf("NUMBER OF CACHE ENTRIES: %d\n", during);
+ HDprintf("NUMBER OF CACHE ENTRIES: %u\n", during);
#endif
/* Close the dataset */
@@ -775,7 +780,7 @@ check_dset_scheme(hid_t fid, const char *dset_name)
HDprintf("\nCACHE AFTER DATASET CLOSE:\n");
if(H5AC_dump_cache(file_ptr) < 0)
TEST_ERROR;
- HDprintf("NUMBER OF CACHE ENTRIES: %d\n", after);
+ HDprintf("NUMBER OF CACHE ENTRIES: %u\n", after);
#endif
/* Ensure that the cache does not contain entries with the tag */
@@ -991,3 +996,96 @@ error:
} /* end main() */
+#else
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_evict_on_close_parallel_fail()
+ *
+ * Purpose: Verify that the H5Pset_evict_on_close() call fails in
+ * parallel HDF5.
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Dana Robinson
+ * Spring 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+check_evict_on_close_parallel_fail(void)
+{
+ hid_t fapl_id = -1;
+ hbool_t evict_on_close;
+ herr_t status;
+
+ TESTING("evict on close fails in parallel");
+
+ /* Create a fapl */
+ if((fapl_id = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR;
+
+ /* Set the evict on close property (should fail)*/
+ evict_on_close = TRUE;
+ H5E_BEGIN_TRY {
+ status = H5Pset_evict_on_close(fapl_id, evict_on_close);
+ } H5E_END_TRY;
+ if(status >= 0)
+ FAIL_PUTS_ERROR("H5Pset_evict_on_close() did not fail in parallel HDF5.");
+
+ /* close fapl */
+ if(H5Pclose(fapl_id) < 0)
+ TEST_ERROR;
+
+ PASSED();
+ return SUCCEED;
+
+error:
+ H5_FAILED();
+ return FAIL;
+
+} /* check_evict_on_close_parallel_fail() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main (parallel version)
+ *
+ * Return: EXIT_FAILURE/EXIT_SUCCESS
+ *
+ * Programmer: Dana Robinson
+ * Spring 2016
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ unsigned nerrors = 0; /* number of test errors */
+
+ HDprintf("Testing evict-on-close cache behavior\n");
+
+ /* Initialize */
+ h5_reset();
+
+ /* Test that EoC fails in parallel HDF5 */
+ nerrors += check_evict_on_close_parallel_fail() < 0 ? 1 : 0;
+
+ if(nerrors)
+ goto error;
+
+ HDprintf("All evict-on-close tests passed.\n");
+ HDprintf("Note that EoC is not supported under parallel so most tests are skipped.\n");
+
+ return EXIT_SUCCESS;
+
+error:
+
+ HDprintf("***** %u evict-on-close test%s FAILED! *****\n",
+ nerrors, nerrors > 1 ? "S" : "");
+
+ return EXIT_FAILURE;
+
+} /* main() - parallel */
+
+#endif /* H5_HAVE_PARALLEL */
+
diff --git a/test/extend.c b/test/extend.c
index 68f0d7c..e5c3cb3 100644
--- a/test/extend.c
+++ b/test/extend.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/external.c b/test/external.c
index 35207bd..9502586 100644
--- a/test/external.c
+++ b/test/external.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/farray.c b/test/farray.c
index e84bfae..f9f97bf 100644
--- a/test/farray.c
+++ b/test/farray.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/fheap.c b/test/fheap.c
index 8e364de..4be6cb9 100644
--- a/test/fheap.c
+++ b/test/fheap.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
@@ -26,6 +24,10 @@
#define H5HF_TESTING
#include "H5HFpkg.h" /* Fractal heaps */
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#include "H5Fpkg.h"
+
/* Other private headers that this test requires */
#include "H5Iprivate.h" /* IDs */
#include "H5MMprivate.h" /* Memory management */
@@ -77,6 +79,10 @@
#define DBLOCK_SIZE(fh, r) H5HF_get_dblock_size_test(fh, r) /* Size of a direct block in a given row */
#define DBLOCK_FREE(fh, r) H5HF_get_dblock_free_test(fh, r) /* Free space in a direct block of a given row */
+/* The number of settings for testing: page buffering, file space strategy and persisting free-space */
+#define NUM_PB_FS 6
+#define PAGE_BUFFER_PAGE_SIZE 4096
+
const char *FILENAME[] = {
"fheap",
NULL
@@ -126,6 +132,7 @@ typedef struct fheap_test_param_t {
fheap_test_fill_t fill; /* How to "bulk" fill heap blocks */
size_t actual_id_len; /* The actual length of heap IDs for a test */
fheap_test_comp_t comp; /* Whether to compress the blocks or not */
+ hid_t my_fcpl; /* File creation property list with file space strategy setting */
} fheap_test_param_t;
/* Heap state information */
@@ -544,7 +551,7 @@ begin_test(fheap_test_param_t *tparam, const char *base_desc,
del_str = get_del_string(tparam);
HDassert(del_str);
test_desc = (char *)H5MM_malloc(HDstrlen(del_str) + HDstrlen(base_desc));
- sprintf(test_desc, base_desc, del_str);
+ HDsprintf(test_desc, base_desc, del_str);
TESTING(test_desc);
H5MM_xfree(del_str);
H5MM_xfree(test_desc);
@@ -642,7 +649,7 @@ open_heap(char *filename, hid_t fapl, hid_t dxpl, const H5HF_create_t *cparam,
h5_fixname(FILENAME[0], fapl, filename, (size_t)FHEAP_FILENAME_LEN);
/* Create the file to work on */
- if((*file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((*file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Check for deleting the entire heap */
@@ -1827,7 +1834,7 @@ error:
*-------------------------------------------------------------------------
*/
static unsigned
-test_create(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UNUSED *tparam)
+test_create(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
{
hid_t file = -1; /* File ID */
char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
@@ -1844,7 +1851,7 @@ test_create(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UNUSED
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Close file */
@@ -1944,7 +1951,7 @@ error:
*-------------------------------------------------------------------------
*/
static unsigned
-test_reopen(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UNUSED *tparam)
+test_reopen(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
{
hid_t file = -1; /* File ID */
char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
@@ -1956,12 +1963,13 @@ test_reopen(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UNUSED
h5_stat_size_t file_size; /* File size, after deleting heap */
size_t id_len; /* Size of fractal heap IDs */
fheap_heap_state_t state; /* State of fractal heap */
+ hbool_t page = FALSE; /* Paged aggregation strategy or not */
/* Set the filename to use for this test (dependent on fapl) */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Close file */
@@ -1980,6 +1988,9 @@ test_reopen(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UNUSED
if(NULL == (f = (H5F_t *)H5I_object(file)))
STACK_ERROR
+ if(f->shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE)
+ page = TRUE;
+
/* Ignore metadata tags in the file's cache */
if (H5AC_ignore_tags(f) < 0)
FAIL_STACK_ERROR
@@ -2058,8 +2069,9 @@ test_reopen(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UNUSED
TEST_ERROR
/* Verify the file is correct size */
- if(file_size != empty_size)
- TEST_ERROR
+ if(!page || (page && !tparam->reopen_heap))
+ if(file_size != empty_size)
+ TEST_ERROR
/* All tests passed */
PASSED()
@@ -2090,7 +2102,7 @@ error:
*-------------------------------------------------------------------------
*/
static unsigned
-test_open_twice(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UNUSED *tparam)
+test_open_twice(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
{
hid_t file = -1; /* File ID */
hid_t file2 = -1; /* File ID */
@@ -2105,12 +2117,13 @@ test_open_twice(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UN
h5_stat_size_t file_size; /* File size, after deleting heap */
size_t id_len; /* Size of fractal heap IDs */
fheap_heap_state_t state; /* State of fractal heap */
+ hbool_t page = FALSE; /* Paged aggregation strategy or not */
/* Set the filename to use for this test (dependent on fapl) */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Close file */
@@ -2129,6 +2142,9 @@ test_open_twice(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UN
if(NULL == (f = (H5F_t *)H5I_object(file)))
STACK_ERROR
+ if(f->shared->fs_strategy == H5F_FSPACE_STRATEGY_PAGE)
+ page = TRUE;
+
/* Ignore metadata tags in the file's cache */
if (H5AC_ignore_tags(f) < 0)
FAIL_STACK_ERROR
@@ -2226,8 +2242,9 @@ test_open_twice(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UN
TEST_ERROR
/* Verify the file is correct size */
- if(file_size != empty_size)
- TEST_ERROR
+ if(!page || (page && !tparam->reopen_heap))
+ if(file_size != empty_size)
+ TEST_ERROR
/* All tests passed */
PASSED()
@@ -2262,7 +2279,7 @@ error:
*-------------------------------------------------------------------------
*/
static unsigned
-test_delete_open(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_UNUSED *tparam)
+test_delete_open(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tparam)
{
hid_t file = -1; /* File ID */
char filename[FHEAP_FILENAME_LEN]; /* Filename to use */
@@ -2280,7 +2297,7 @@ test_delete_open(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t H5_ATTR_U
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Close file */
@@ -2433,7 +2450,7 @@ error:
*-------------------------------------------------------------------------
*/
static unsigned
-test_id_limits(hid_t fapl, H5HF_create_t *cparam)
+test_id_limits(hid_t fapl, H5HF_create_t *cparam, hid_t fcpl)
{
hid_t file = -1; /* File ID */
hid_t dxpl = H5AC_ind_read_dxpl_id; /* DXPL to use */
@@ -2451,7 +2468,7 @@ test_id_limits(hid_t fapl, H5HF_create_t *cparam)
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -2778,7 +2795,7 @@ error:
*-------------------------------------------------------------------------
*/
static unsigned
-test_filtered_create(hid_t fapl, H5HF_create_t *cparam)
+test_filtered_create(hid_t fapl, H5HF_create_t *cparam, hid_t fcpl)
{
hid_t file = -1; /* File ID */
hid_t dxpl = H5AC_ind_read_dxpl_id; /* DXPL to use */
@@ -2794,7 +2811,7 @@ test_filtered_create(hid_t fapl, H5HF_create_t *cparam)
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -2901,7 +2918,7 @@ error:
*-------------------------------------------------------------------------
*/
static unsigned
-test_size(hid_t fapl, H5HF_create_t *cparam)
+test_size(hid_t fapl, H5HF_create_t *cparam, hid_t fcpl)
{
hid_t file = -1; /* File ID */
hid_t dxpl = H5AC_ind_read_dxpl_id; /* DXPL to use */
@@ -2917,7 +2934,7 @@ test_size(hid_t fapl, H5HF_create_t *cparam)
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -3045,7 +3062,7 @@ error:
*-------------------------------------------------------------------------
*/
static unsigned
-test_reopen_hdr(hid_t fapl, H5HF_create_t *cparam)
+test_reopen_hdr(hid_t fapl, H5HF_create_t *cparam, hid_t fcpl)
{
hid_t file1 = -1; /* File ID */
hid_t file2 = -2; /* File ID */
@@ -3060,7 +3077,7 @@ test_reopen_hdr(hid_t fapl, H5HF_create_t *cparam)
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file1 = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file1 = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -3199,7 +3216,7 @@ test_man_insert_weird(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpa
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -3309,7 +3326,7 @@ test_man_insert_first(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpa
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -3410,7 +3427,7 @@ test_man_insert_second(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tp
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -3507,7 +3524,7 @@ test_man_insert_root_mult(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -3606,7 +3623,7 @@ test_man_insert_force_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_test_par
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -3712,7 +3729,7 @@ test_man_insert_fill_second(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -3819,7 +3836,7 @@ test_man_insert_third_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_param
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -3930,7 +3947,7 @@ test_man_fill_first_row(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4026,7 +4043,7 @@ test_man_start_second_row(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4129,7 +4146,7 @@ test_man_fill_second_row(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4230,7 +4247,7 @@ test_man_start_third_row(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4341,7 +4358,7 @@ test_man_fill_fourth_row(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4438,7 +4455,7 @@ test_man_fill_all_root_direct(hid_t fapl, H5HF_create_t *cparam, fheap_test_para
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4534,7 +4551,7 @@ test_man_first_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_test_
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4636,7 +4653,7 @@ test_man_second_direct_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fhe
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4746,7 +4763,7 @@ test_man_fill_first_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4849,7 +4866,7 @@ test_man_second_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_test
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -4960,7 +4977,7 @@ test_man_fill_second_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fheap
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5068,7 +5085,7 @@ test_man_fill_recursive_indirect_row(hid_t fapl, H5HF_create_t *cparam, fheap_te
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5166,7 +5183,7 @@ test_man_start_2nd_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5275,7 +5292,7 @@ test_man_recursive_indirect_two_deep(hid_t fapl, H5HF_create_t *cparam, fheap_te
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5378,7 +5395,7 @@ test_man_start_3rd_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5488,7 +5505,7 @@ test_man_fill_first_3rd_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fh
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5599,7 +5616,7 @@ test_man_fill_3rd_recursive_indirect_row(hid_t fapl, H5HF_create_t *cparam, fhea
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5706,7 +5723,7 @@ test_man_fill_all_3rd_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fhea
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5814,7 +5831,7 @@ test_man_start_4th_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -5929,7 +5946,7 @@ test_man_fill_first_4th_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fh
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -6049,7 +6066,7 @@ test_man_fill_4th_recursive_indirect_row(hid_t fapl, H5HF_create_t *cparam, fhea
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -6161,7 +6178,7 @@ test_man_fill_all_4th_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fhea
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -6276,7 +6293,7 @@ test_man_start_5th_recursive_indirect(hid_t fapl, H5HF_create_t *cparam, fheap_t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -6410,7 +6427,7 @@ test_man_remove_bogus(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpa
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -6563,7 +6580,7 @@ test_man_remove_one(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpara
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -6727,7 +6744,7 @@ test_man_remove_two(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t *tpara
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -6920,7 +6937,7 @@ test_man_remove_one_larger(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -7089,7 +7106,7 @@ test_man_remove_two_larger(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_t
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -7333,7 +7350,7 @@ test_man_remove_three_larger(hid_t fapl, H5HF_create_t *cparam, fheap_test_param
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
TEST_ERROR
/* Get a pointer to the internal file object */
@@ -7632,7 +7649,7 @@ test_man_incr_insert_remove(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, tparam->my_fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -7664,10 +7681,10 @@ test_man_incr_insert_remove(hid_t fapl, H5HF_create_t *cparam, fheap_test_param_
TESTING("incremental object insertion and removal")
for(i = 0; i < 100; i++) {
- sprintf(obj1.b, "%s%d", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", i);
+ HDsprintf(obj1.b, "%s%d", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", i);
for(j = 0; j < i; j++) {
- sprintf(obj2.b, "%s%d", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", j);
+ HDsprintf(obj2.b, "%s%d", "ABCDEFGHIJKLMNOPQRSTUVWXYZ", j);
if(H5HF_remove(fh, dxpl, heap_id[j]) < 0)
FAIL_STACK_ERROR
@@ -16352,18 +16369,40 @@ main(void)
fheap_test_param_t tparam; /* Testing parameters */
H5HF_create_t small_cparam; /* Creation parameters for "small" heap */
H5HF_create_t large_cparam; /* Creation parameters for "large" heap */
- hid_t fapl = -1; /* File access property list for data files */
+ hid_t fapl = -1, def_fapl = -1; /* File access property list for data files */
+ hid_t pb_fapl = -1; /* File access property list for data files */
+ hid_t fcpl = -1, def_fcpl = -1; /* File creation property list for data files */
fheap_test_type_t curr_test; /* Current test being worked on */
- unsigned u; /* Local index variable */
+ unsigned u, v; /* Local index variable */
unsigned nerrors = 0; /* Cumulative error count */
- int ExpressMode; /* Express testing level */
+ unsigned num_pb_fs = 1; /* The number of settings to test for page buffering and file space handling */
+ int ExpressMode; /* Express testing level */
/* Reset library */
h5_reset();
- fapl = h5_fileaccess();
+
+ def_fapl = h5_fileaccess();
ExpressMode = GetTestExpress();
+
+ /*
+ * Caution when turning on ExpressMode 0:
+ * It will activate testing with different combinations of
+ * page buffering and file space strategy and the
+ * running time will be long.
+ * For parallel build, the last two tests for page buffering
+ * are skipped because this feature is disabled in parallel.
+ * Activate full testing when this feature is re-enabled
+ * in the future for parallel build.
+ */
if(ExpressMode > 1)
- printf("***Express test mode on. Some tests may be skipped\n");
+ HDprintf("***Express test mode on. Some tests may be skipped\n");
+ else if(ExpressMode == 0) {
+#ifdef H5_HAVE_PARALLEL
+ num_pb_fs = NUM_PB_FS - 2;
+#else
+ num_pb_fs = NUM_PB_FS;
+#endif
+ }
/* Initialize heap creation parameters */
init_small_cparam(&small_cparam);
@@ -16374,458 +16413,419 @@ main(void)
shared_wobj_g = (unsigned char *)H5MM_malloc(shared_obj_size_g);
shared_robj_g = (unsigned char *)H5MM_malloc(shared_obj_size_g);
+ /* Create a copy def_fapl and enable page buffering */
+ if((pb_fapl = H5Pcopy(def_fapl)) < 0)
+ TEST_ERROR
+ if(H5Pset_page_buffer_size(pb_fapl, PAGE_BUFFER_PAGE_SIZE, 0, 0) < 0)
+ TEST_ERROR
+
+ /* Create a file creation property list */
+ if((def_fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR
+
/* Initialize the shared write buffer for objects */
for(u = 0; u < shared_obj_size_g; u++)
shared_wobj_g[u] = (unsigned char)u;
- /* Iterate over the testing parameters */
-#ifndef QAK
- for(curr_test = FHEAP_TEST_NORMAL; curr_test < FHEAP_TEST_NTESTS; H5_INC_ENUM(fheap_test_type_t, curr_test)) {
-#else /* QAK */
-HDfprintf(stderr, "Uncomment test loop!\n");
-curr_test = FHEAP_TEST_NORMAL;
-/* curr_test = FHEAP_TEST_REOPEN; */
-#endif /* QAK */
- /* Clear the testing parameters */
- HDmemset(&tparam, 0, sizeof(fheap_test_param_t));
- tparam.actual_id_len = HEAP_ID_LEN;
-
- /* Set appropriate testing parameters for each test */
- switch(curr_test) {
- /* "Normal" testing parameters */
- case FHEAP_TEST_NORMAL:
- puts("Testing with normal parameters");
- break;
+ for(v = 0; v < num_pb_fs; v++) {
+
+ if((fcpl = H5Pcopy(def_fcpl)) < 0)
+ TEST_ERROR
- /* "Re-open heap" testing parameters */
- case FHEAP_TEST_REOPEN:
- puts("Testing with reopen heap flag set");
- tparam.reopen_heap = FHEAP_TEST_REOPEN;
+ switch(v) {
+ case 0:
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, FALSE, (hsize_t)1) < 0)
+ TEST_ERROR
+ fapl = def_fapl;
+ break;
+ case 1:
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1) < 0)
+ TEST_ERROR
+ fapl = def_fapl;
+ break;
+ case 2:
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1) < 0)
+ TEST_ERROR
+ fapl = def_fapl;
+ break;
+ case 3:
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) < 0)
+ TEST_ERROR
+ fapl = def_fapl;
+ break;
+ case 4:
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1) < 0)
+ TEST_ERROR
+ fapl = pb_fapl;
+ break;
+ case 5:
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) < 0)
+ TEST_ERROR
+ fapl = pb_fapl;
break;
- /* An unknown test? */
- case FHEAP_TEST_NTESTS:
+ case NUM_PB_FS:
default:
goto error;
- } /* end switch */
+ }
- /* Test fractal heap creation */
-#ifndef QAK
- nerrors += test_create(fapl, &small_cparam, &tparam);
- nerrors += test_reopen(fapl, &small_cparam, &tparam);
- nerrors += test_open_twice(fapl, &small_cparam, &tparam);
- nerrors += test_delete_open(fapl, &small_cparam, &tparam);
- nerrors += test_id_limits(fapl, &small_cparam);
- nerrors += test_filtered_create(fapl, &small_cparam);
- nerrors += test_size(fapl, &small_cparam);
- nerrors += test_reopen_hdr(fapl, &small_cparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ /* Iterate over the testing parameters */
+ for(curr_test = FHEAP_TEST_NORMAL; curr_test < FHEAP_TEST_NTESTS; H5_INC_ENUM(fheap_test_type_t, curr_test)) {
+ /* Clear the testing parameters */
+ HDmemset(&tparam, 0, sizeof(fheap_test_param_t));
+ tparam.actual_id_len = HEAP_ID_LEN;
-#ifndef QAK2
-#ifndef QAK
- {
- fheap_test_fill_t fill; /* Size of objects to fill heap blocks with */
-
-#ifndef QAK2
- /* Filling with different sized objects */
- for(fill = FHEAP_TEST_FILL_LARGE; fill < FHEAP_TEST_FILL_N; H5_INC_ENUM(fheap_test_fill_t, fill)) {
-#else /* QAK2 */
-HDfprintf(stderr, "Uncomment test loop!\n");
-fill = FHEAP_TEST_FILL_LARGE;
-/* fill = FHEAP_TEST_FILL_SINGLE; */
-#endif /* QAK2 */
- tparam.fill = fill;
+ /* Set to run with different file space setting */
+ tparam.my_fcpl = fcpl;
/* Set appropriate testing parameters for each test */
- switch(fill) {
- /* "Bulk fill" heap blocks with 'large' objects */
- case FHEAP_TEST_FILL_LARGE:
- puts("Bulk-filling blocks w/large objects");
+ switch(curr_test) {
+ /* "Normal" testing parameters */
+ case FHEAP_TEST_NORMAL:
+ HDputs("Testing with normal parameters");
break;
- /* "Bulk fill" heap blocks with 'single' objects */
- case FHEAP_TEST_FILL_SINGLE:
- puts("Bulk-filling blocks w/single object");
+ /* "Re-open heap" testing parameters */
+ case FHEAP_TEST_REOPEN:
+ HDputs("Testing with reopen heap flag set");
+ tparam.reopen_heap = FHEAP_TEST_REOPEN;
break;
/* An unknown test? */
- case FHEAP_TEST_FILL_N:
+ case FHEAP_TEST_NTESTS:
default:
goto error;
} /* end switch */
- /*
- * Test fractal heap managed object insertion
- */
+ /* Test fractal heap creation */
+ nerrors += test_create(fapl, &small_cparam, &tparam);
+ nerrors += test_reopen(fapl, &small_cparam, &tparam);
+ nerrors += test_open_twice(fapl, &small_cparam, &tparam);
+ nerrors += test_delete_open(fapl, &small_cparam, &tparam);
-#ifndef QAK
- /* "Weird" sized objects */
- nerrors += test_man_insert_weird(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ nerrors += test_id_limits(fapl, &small_cparam, tparam.my_fcpl);
+ nerrors += test_filtered_create(fapl, &small_cparam, tparam.my_fcpl);
+ nerrors += test_size(fapl, &small_cparam, tparam.my_fcpl);
+ nerrors += test_reopen_hdr(fapl, &small_cparam, tparam.my_fcpl);
-#ifdef ALL_INSERT_TESTS
- /* "Standard" sized objects, building from simple to complex heaps */
- nerrors += test_man_insert_first(fapl, &small_cparam, &tparam);
- nerrors += test_man_insert_second(fapl, &small_cparam, &tparam);
- nerrors += test_man_insert_root_mult(fapl, &small_cparam, &tparam);
- nerrors += test_man_insert_force_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_insert_fill_second(fapl, &small_cparam, &tparam);
- nerrors += test_man_insert_third_direct(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_first_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_start_second_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_second_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_start_third_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_fourth_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_all_root_direct(fapl, &small_cparam, &tparam);
- nerrors += test_man_first_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_second_direct_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_first_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_second_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_second_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_recursive_indirect_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_start_2nd_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_recursive_indirect_two_deep(fapl, &small_cparam, &tparam);
- nerrors += test_man_start_3rd_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_first_3rd_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_3rd_recursive_indirect_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_all_3rd_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_start_4th_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_first_4th_recursive_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_4th_recursive_indirect_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_all_4th_recursive_indirect(fapl, &small_cparam, &tparam);
-#endif /* ALL_INSERT_TESTS */
- /* If this test fails, uncomment the tests above, which build up to this
- * level of complexity gradually. -QAK
- */
-#ifndef QAK
- if(ExpressMode > 1)
- printf("***Express test mode on. test_man_start_5th_recursive_indirect is skipped\n");
- else
- nerrors += test_man_start_5th_recursive_indirect(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ {
+ fheap_test_fill_t fill; /* Size of objects to fill heap blocks with */
- /*
- * Test fractal heap object deletion
- */
- /* Simple removal */
-#ifndef QAK
- nerrors += test_man_remove_bogus(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_one(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_two(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_one_larger(fapl, &small_cparam, &tparam);
- tparam.del_dir = FHEAP_DEL_FORWARD;
- nerrors += test_man_remove_two_larger(fapl, &small_cparam, &tparam);
- tparam.del_dir = FHEAP_DEL_REVERSE;
- nerrors += test_man_remove_two_larger(fapl, &small_cparam, &tparam);
- tparam.del_dir = FHEAP_DEL_FORWARD;
- nerrors += test_man_remove_three_larger(fapl, &small_cparam, &tparam);
- tparam.del_dir = FHEAP_DEL_REVERSE;
- nerrors += test_man_remove_three_larger(fapl, &small_cparam, &tparam);
-
- /* Incremental insert & removal */
- tparam.del_dir = FHEAP_DEL_FORWARD;
- nerrors += test_man_incr_insert_remove(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ /* Filling with different sized objects */
+ for(fill = FHEAP_TEST_FILL_LARGE; fill < FHEAP_TEST_FILL_N; H5_INC_ENUM(fheap_test_fill_t, fill)) {
+ tparam.fill = fill;
-#ifndef QAK
-#ifndef QAK2
- {
- fheap_test_del_dir_t del_dir; /* Deletion direction */
- fheap_test_del_drain_t drain_half; /* Deletion draining */
+ /* Set appropriate testing parameters for each test */
+ switch(fill) {
+ /* "Bulk fill" heap blocks with 'large' objects */
+ case FHEAP_TEST_FILL_LARGE:
+ HDputs("Bulk-filling blocks w/large objects");
+ break;
- /* More complex removal patterns */
- for(del_dir = FHEAP_DEL_FORWARD; del_dir < FHEAP_DEL_NDIRS; H5_INC_ENUM(fheap_test_del_dir_t, del_dir)) {
- tparam.del_dir = del_dir;
- for(drain_half = FHEAP_DEL_DRAIN_ALL; drain_half < FHEAP_DEL_DRAIN_N; H5_INC_ENUM(fheap_test_del_drain_t, drain_half)) {
- tparam.drain_half = drain_half;
-#else /* QAK2 */
-HDfprintf(stderr, "Uncomment test loops!\n");
-/* tparam.del_dir = FHEAP_DEL_FORWARD; */
-/* tparam.del_dir = FHEAP_DEL_REVERSE; */
-tparam.del_dir = FHEAP_DEL_HEAP;
-tparam.drain_half = FHEAP_DEL_DRAIN_ALL;
-/* tparam.drain_half = FHEAP_DEL_DRAIN_HALF; */
-#endif /* QAK2 */
- /* Don't need to test deletion directions when deleting entire heap */
- if(tparam.del_dir == FHEAP_DEL_HEAP && tparam.drain_half > FHEAP_DEL_DRAIN_ALL)
+ /* "Bulk fill" heap blocks with 'single' objects */
+ case FHEAP_TEST_FILL_SINGLE:
+ HDputs("Bulk-filling blocks w/single object");
break;
-#ifndef QAK
- /* Simple insertion patterns */
- nerrors += test_man_remove_root_direct(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_two_direct(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_first_row(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_first_two_rows(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_first_four_rows(fapl, &small_cparam, &tparam);
- if(ExpressMode > 1)
- printf("***Express test mode on. Some tests skipped\n");
- else {
- nerrors += test_man_remove_all_root_direct(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_2nd_indirect(fapl, &small_cparam, &tparam);
- nerrors += test_man_remove_3rd_indirect(fapl, &small_cparam, &tparam);
- } /* end else */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ /* An unknown test? */
+ case FHEAP_TEST_FILL_N:
+ default:
+ goto error;
+ } /* end switch */
-#ifndef QAK
- /* Skip blocks insertion */
- /* (covers insertion & deletion of skipped blocks) */
- nerrors += test_man_skip_start_block(fapl, &small_cparam, &tparam);
- nerrors += test_man_skip_start_block_add_back(fapl, &small_cparam, &tparam);
- nerrors += test_man_skip_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_skip_2nd_block(fapl, &small_cparam, &tparam);
- nerrors += test_man_skip_2nd_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_one_partial_skip_2nd_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_row_skip_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_skip_direct_skip_indirect_two_rows_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_direct_skip_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_direct_skip_2nd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_2nd_direct_less_one_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_direct_skip_2nd_indirect_skip_2nd_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_direct_skip_indirect_two_rows_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_direct_skip_indirect_two_rows_skip_indirect_row_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_2nd_direct_skip_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_2nd_direct_skip_2nd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_2nd_direct_fill_direct_skip_3rd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_2nd_direct_fill_direct_skip2_3rd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_3rd_direct_less_one_fill_direct_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_1st_row_3rd_direct_fill_2nd_direct_less_one_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
- if(ExpressMode > 1)
- printf("***Express test mode on. Some tests skipped\n");
- else {
- nerrors += test_man_fill_3rd_direct_fill_direct_skip_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_two_rows_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
- nerrors += test_man_fill_4th_direct_less_one_fill_2nd_direct_fill_direct_skip_3rd_indirect_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
- } /* end else */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ /*
+ * Test fractal heap managed object insertion
+ */
-#ifndef QAK
- /* Fragmented insertion patterns */
- /* (covers insertion & deletion of fragmented blocks) */
- nerrors += test_man_frag_simple(fapl, &small_cparam, &tparam);
- nerrors += test_man_frag_direct(fapl, &small_cparam, &tparam);
- nerrors += test_man_frag_2nd_direct(fapl, &small_cparam, &tparam);
- nerrors += test_man_frag_3rd_direct(fapl, &small_cparam, &tparam);
-#else /* QAK */
- HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK2
+ /* "Weird" sized objects */
+ nerrors += test_man_insert_weird(fapl, &small_cparam, &tparam);
+
+#ifdef ALL_INSERT_TESTS
+ /* "Standard" sized objects, building from simple to complex heaps */
+ nerrors += test_man_insert_first(fapl, &small_cparam, &tparam);
+ nerrors += test_man_insert_second(fapl, &small_cparam, &tparam);
+ nerrors += test_man_insert_root_mult(fapl, &small_cparam, &tparam);
+ nerrors += test_man_insert_force_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_insert_fill_second(fapl, &small_cparam, &tparam);
+ nerrors += test_man_insert_third_direct(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_first_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_start_second_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_second_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_start_third_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_fourth_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_all_root_direct(fapl, &small_cparam, &tparam);
+ nerrors += test_man_first_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_second_direct_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_first_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_second_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_second_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_recursive_indirect_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_start_2nd_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_recursive_indirect_two_deep(fapl, &small_cparam, &tparam);
+ nerrors += test_man_start_3rd_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_first_3rd_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_3rd_recursive_indirect_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_all_3rd_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_start_4th_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_first_4th_recursive_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_4th_recursive_indirect_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_all_4th_recursive_indirect(fapl, &small_cparam, &tparam);
+#endif /* ALL_INSERT_TESTS */
+ /* If this test fails, uncomment the tests above, which build up to this
+ * level of complexity gradually. -QAK
+ */
+ if(ExpressMode > 1)
+ HDprintf("***Express test mode on. test_man_start_5th_recursive_indirect is skipped\n");
+ else
+ nerrors += test_man_start_5th_recursive_indirect(fapl, &small_cparam, &tparam);
+
+ /*
+ * Test fractal heap object deletion
+ */
+ /* Simple removal */
+ nerrors += test_man_remove_bogus(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_one(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_two(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_one_larger(fapl, &small_cparam, &tparam);
+ tparam.del_dir = FHEAP_DEL_FORWARD;
+ nerrors += test_man_remove_two_larger(fapl, &small_cparam, &tparam);
+ tparam.del_dir = FHEAP_DEL_REVERSE;
+ nerrors += test_man_remove_two_larger(fapl, &small_cparam, &tparam);
+ tparam.del_dir = FHEAP_DEL_FORWARD;
+ nerrors += test_man_remove_three_larger(fapl, &small_cparam, &tparam);
+ tparam.del_dir = FHEAP_DEL_REVERSE;
+ nerrors += test_man_remove_three_larger(fapl, &small_cparam, &tparam);
+
+ /* Incremental insert & removal */
+ tparam.del_dir = FHEAP_DEL_FORWARD;
+ nerrors += test_man_incr_insert_remove(fapl, &small_cparam, &tparam);
+
+ {
+ fheap_test_del_dir_t del_dir; /* Deletion direction */
+ fheap_test_del_drain_t drain_half; /* Deletion draining */
+
+ /* More complex removal patterns */
+ for(del_dir = FHEAP_DEL_FORWARD; del_dir < FHEAP_DEL_NDIRS; H5_INC_ENUM(fheap_test_del_dir_t, del_dir)) {
+ tparam.del_dir = del_dir;
+ for(drain_half = FHEAP_DEL_DRAIN_ALL; drain_half < FHEAP_DEL_DRAIN_N; H5_INC_ENUM(fheap_test_del_drain_t, drain_half)) {
+ tparam.drain_half = drain_half;
+ /* Don't need to test deletion directions when deleting entire heap */
+ if(tparam.del_dir == FHEAP_DEL_HEAP && tparam.drain_half > FHEAP_DEL_DRAIN_ALL)
+ break;
+
+ /* Simple insertion patterns */
+ nerrors += test_man_remove_root_direct(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_two_direct(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_first_row(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_first_two_rows(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_first_four_rows(fapl, &small_cparam, &tparam);
+ if(ExpressMode > 1)
+ HDprintf("***Express test mode on. Some tests skipped\n");
+ else {
+ nerrors += test_man_remove_all_root_direct(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_2nd_indirect(fapl, &small_cparam, &tparam);
+ nerrors += test_man_remove_3rd_indirect(fapl, &small_cparam, &tparam);
+ } /* end else */
+
+ /* Skip blocks insertion */
+ /* (covers insertion & deletion of skipped blocks) */
+ nerrors += test_man_skip_start_block(fapl, &small_cparam, &tparam);
+ nerrors += test_man_skip_start_block_add_back(fapl, &small_cparam, &tparam);
+ nerrors += test_man_skip_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_skip_2nd_block(fapl, &small_cparam, &tparam);
+ nerrors += test_man_skip_2nd_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_one_partial_skip_2nd_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_row_skip_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_skip_direct_skip_indirect_two_rows_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_direct_skip_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_direct_skip_2nd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_2nd_direct_less_one_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_direct_skip_2nd_indirect_skip_2nd_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_direct_skip_indirect_two_rows_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_direct_skip_indirect_two_rows_skip_indirect_row_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_2nd_direct_skip_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_2nd_direct_skip_2nd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_2nd_direct_fill_direct_skip_3rd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_2nd_direct_fill_direct_skip2_3rd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_3rd_direct_less_one_fill_direct_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_1st_row_3rd_direct_fill_2nd_direct_less_one_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ if(ExpressMode > 1)
+ HDprintf("***Express test mode on. Some tests skipped\n");
+ else {
+ nerrors += test_man_fill_3rd_direct_fill_direct_skip_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_two_rows_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_3rd_direct_fill_2nd_direct_fill_direct_skip_3rd_indirect_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ nerrors += test_man_fill_4th_direct_less_one_fill_2nd_direct_fill_direct_skip_3rd_indirect_wrap_start_block_add_skipped(fapl, &small_cparam, &tparam);
+ } /* end else */
+
+ /* Fragmented insertion patterns */
+ /* (covers insertion & deletion of fragmented blocks) */
+ nerrors += test_man_frag_simple(fapl, &small_cparam, &tparam);
+ nerrors += test_man_frag_direct(fapl, &small_cparam, &tparam);
+ nerrors += test_man_frag_2nd_direct(fapl, &small_cparam, &tparam);
+ nerrors += test_man_frag_3rd_direct(fapl, &small_cparam, &tparam);
+ } /* end for */
} /* end for */
- } /* end for */
- /* Reset deletion drain parameter */
- tparam.drain_half = FHEAP_DEL_DRAIN_ALL;
+ /* Reset deletion drain parameter */
+ tparam.drain_half = FHEAP_DEL_DRAIN_ALL;
- } /* end block */
-#endif /* QAK2 */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK2
+ } /* end block */
} /* end for */
-#endif /* QAK2 */
- } /* end block */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ } /* end block */
- /*
- * Test fractal heap 'huge' & 'tiny' object insertion & deletion
- */
-#ifndef QAK
- {
- fheap_test_del_dir_t del_dir; /* Deletion direction */
- unsigned id_len; /* Length of heap IDs */
-
- /* Test "normal" & "direct" storage of 'huge' & 'tiny' heap IDs */
- for(id_len = 0; id_len < 3; id_len++) {
- /* Set the ID length for this test */
- small_cparam.id_len = (uint16_t)id_len;
-
- /* Print information about each test */
- switch(id_len) {
- /* Use "normal" form for 'huge' object's heap IDs */
- case 0:
- puts("Using 'normal' heap ID format for 'huge' objects");
- break;
+ /*
+ * Test fractal heap 'huge' & 'tiny' object insertion & deletion
+ */
+ {
+ fheap_test_del_dir_t del_dir; /* Deletion direction */
+ unsigned id_len; /* Length of heap IDs */
+
+ /* Test "normal" & "direct" storage of 'huge' & 'tiny' heap IDs */
+ for(id_len = 0; id_len < 3; id_len++) {
+ /* Set the ID length for this test */
+ small_cparam.id_len = (uint16_t)id_len;
+
+ /* Print information about each test */
+ switch(id_len) {
+ /* Use "normal" form for 'huge' object's heap IDs */
+ case 0:
+ HDputs("Using 'normal' heap ID format for 'huge' objects");
+ break;
- /* Use "direct" form for 'huge' object's heap IDs */
- case 1:
- puts("Using 'direct' heap ID format for 'huge' objects");
+ /* Use "direct" form for 'huge' object's heap IDs */
+ case 1:
+ HDputs("Using 'direct' heap ID format for 'huge' objects");
- /* Adjust actual length of heap IDs for directly storing 'huge' object's file offset & length in heap ID */
- tparam.actual_id_len = 17; /* 1 + 8 (file address size) + 8 (file length size) */
- break;
+ /* Adjust actual length of heap IDs for directly storing 'huge' object's file offset & length in heap ID */
+ tparam.actual_id_len = 17; /* 1 + 8 (file address size) + 8 (file length size) */
+ break;
- /* Use "direct" storage for 'huge' objects and larger IDs for 'tiny' objects */
- case 2:
- small_cparam.id_len = 37;
- puts("Using 'direct' heap ID format for 'huge' objects and larger IDs for 'tiny' objects");
- tparam.actual_id_len = 37;
- break;
+ /* Use "direct" storage for 'huge' objects and larger IDs for 'tiny' objects */
+ case 2:
+ small_cparam.id_len = 37;
+ HDputs("Using 'direct' heap ID format for 'huge' objects and larger IDs for 'tiny' objects");
+ tparam.actual_id_len = 37;
+ break;
- /* An unknown test? */
- default:
- goto error;
- } /* end switch */
+ /* An unknown test? */
+ default:
+ goto error;
+ } /* end switch */
+
+ /* Try several different methods of deleting objects */
+ for(del_dir = FHEAP_DEL_FORWARD; del_dir < FHEAP_DEL_NDIRS; H5_INC_ENUM(fheap_test_del_dir_t, del_dir)) {
+ tparam.del_dir = del_dir;
+
+ /* Test 'huge' object insert & delete */
+ nerrors += test_huge_insert_one(fapl, &small_cparam, &tparam);
+ nerrors += test_huge_insert_two(fapl, &small_cparam, &tparam);
+ nerrors += test_huge_insert_three(fapl, &small_cparam, &tparam);
+ nerrors += test_huge_insert_mix(fapl, &small_cparam, &tparam);
+ nerrors += test_filtered_huge(fapl, &small_cparam, &tparam);
+
+ /* Test 'tiny' object insert & delete */
+ nerrors += test_tiny_insert_one(fapl, &small_cparam, &tparam);
+ nerrors += test_tiny_insert_two(fapl, &small_cparam, &tparam);
+ nerrors += test_tiny_insert_mix(fapl, &small_cparam, &tparam);
+ } /* end for */
+ } /* end for */
+
+ /* Reset the "normal" heap ID lengths */
+ small_cparam.id_len = 0;
+ tparam.actual_id_len = HEAP_ID_LEN;
+ } /* end block */
+
+ /* Test I/O filter support */
/* Try several different methods of deleting objects */
+ {
+ fheap_test_del_dir_t del_dir; /* Deletion direction */
+
for(del_dir = FHEAP_DEL_FORWARD; del_dir < FHEAP_DEL_NDIRS; H5_INC_ENUM(fheap_test_del_dir_t, del_dir)) {
tparam.del_dir = del_dir;
- /* Test 'huge' object insert & delete */
-#ifndef QAK
- nerrors += test_huge_insert_one(fapl, &small_cparam, &tparam);
- nerrors += test_huge_insert_two(fapl, &small_cparam, &tparam);
- nerrors += test_huge_insert_three(fapl, &small_cparam, &tparam);
- nerrors += test_huge_insert_mix(fapl, &small_cparam, &tparam);
- nerrors += test_filtered_huge(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ /* Controlled tests */
+ /* XXX: Re-enable file size checks in these tests, after the file has persistent free space tracking working */
+ nerrors += test_filtered_man_root_direct(fapl, &small_cparam, &tparam);
+ nerrors += test_filtered_man_root_indirect(fapl, &small_cparam, &tparam);
-#ifndef QAK
- /* Test 'tiny' object insert & delete */
- nerrors += test_tiny_insert_one(fapl, &small_cparam, &tparam);
- nerrors += test_tiny_insert_two(fapl, &small_cparam, &tparam);
- nerrors += test_tiny_insert_mix(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
- } /* end for */
- } /* end for */
+ /* Random tests, with compressed blocks */
+ tparam.comp = FHEAP_TEST_COMPRESS;
+ nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(50*1000*1000) : (hsize_t)(25*1000*1000)), fapl, &small_cparam, &tparam);
+ nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(50*1000*1000) : (hsize_t)(2*1000*1000)), fapl, &small_cparam, &tparam);
- /* Reset the "normal" heap ID lengths */
- small_cparam.id_len = 0;
- tparam.actual_id_len = HEAP_ID_LEN;
- } /* end block */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#else /* QAK2 */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK2 */
+ /* Reset block compression */
+ tparam.comp = FHEAP_TEST_NO_COMPRESS;
+ } /* end for */
+ } /* end block */
- /* Test I/O filter support */
+ /* Random object insertion & deletion */
+ if(ExpressMode > 1)
+ HDprintf("***Express test mode on. Some tests skipped\n");
+ else {
+ /* Random tests using "small" heap creation parameters */
+ HDputs("Using 'small' heap creation parameters");
-#ifndef QAK
- /* Try several different methods of deleting objects */
- {
- fheap_test_del_dir_t del_dir; /* Deletion direction */
+ /* (reduce size of tests when re-opening each time) */
+ /* XXX: Try to speed things up enough that these tests don't have to be reduced when re-opening */
+ tparam.del_dir = FHEAP_DEL_FORWARD;
+ nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &small_cparam, &tparam);
+ nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &small_cparam, &tparam);
- for(del_dir = FHEAP_DEL_FORWARD; del_dir < FHEAP_DEL_NDIRS; H5_INC_ENUM(fheap_test_del_dir_t, del_dir)) {
- tparam.del_dir = del_dir;
+ tparam.del_dir = FHEAP_DEL_HEAP;
+ nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &small_cparam, &tparam);
+ nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &small_cparam, &tparam);
- /* Controlled tests */
-/* XXX: Re-enable file size checks in these tests, after the file has persistent free space tracking working */
- nerrors += test_filtered_man_root_direct(fapl, &small_cparam, &tparam);
- nerrors += test_filtered_man_root_indirect(fapl, &small_cparam, &tparam);
+ /* Random tests using "large" heap creation parameters */
+ HDputs("Using 'large' heap creation parameters");
+ tparam.actual_id_len = LARGE_HEAP_ID_LEN;
- /* Random tests, with compressed blocks */
- tparam.comp = FHEAP_TEST_COMPRESS;
- nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(50*1000*1000) : (hsize_t)(25*1000*1000)), fapl, &small_cparam, &tparam);
- nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(50*1000*1000) : (hsize_t)(2*1000*1000)), fapl, &small_cparam, &tparam);
+ /* (reduce size of tests when re-opening each time) */
+ /* XXX: Try to speed things up enough that these tests don't have to be reduced when re-opening */
+ tparam.del_dir = FHEAP_DEL_FORWARD;
+ nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &large_cparam, &tparam);
+ nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &large_cparam, &tparam);
- /* Reset block compression */
- tparam.comp = FHEAP_TEST_NO_COMPRESS;
- } /* end for */
- } /* end block */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ tparam.del_dir = FHEAP_DEL_HEAP;
+ nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &large_cparam, &tparam);
+ nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &large_cparam, &tparam);
-#ifndef QAK
- /* Random object insertion & deletion */
- if(ExpressMode > 1)
- printf("***Express test mode on. Some tests skipped\n");
- else {
-#ifndef QAK
- /* Random tests using "small" heap creation parameters */
- puts("Using 'small' heap creation parameters");
-
- /* (reduce size of tests when re-opening each time) */
-/* XXX: Try to speed things up enough that these tests don't have to be reduced when re-opening */
- tparam.del_dir = FHEAP_DEL_FORWARD;
- nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &small_cparam, &tparam);
- nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &small_cparam, &tparam);
-
- tparam.del_dir = FHEAP_DEL_HEAP;
- nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &small_cparam, &tparam);
- nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ /* Reset the "normal" heap ID length */
+ tparam.actual_id_len = SMALL_HEAP_ID_LEN;
+ } /* end else */
-#ifndef QAK
- /* Random tests using "large" heap creation parameters */
- puts("Using 'large' heap creation parameters");
- tparam.actual_id_len = LARGE_HEAP_ID_LEN;
-
- /* (reduce size of tests when re-opening each time) */
-/* XXX: Try to speed things up enough that these tests don't have to be reduced when re-opening */
- tparam.del_dir = FHEAP_DEL_FORWARD;
- nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &large_cparam, &tparam);
- nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &large_cparam, &tparam);
-
- tparam.del_dir = FHEAP_DEL_HEAP;
- nerrors += test_random((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(50*1000*1000)), fapl, &large_cparam, &tparam);
- nerrors += test_random_pow2((curr_test == FHEAP_TEST_NORMAL ? (hsize_t)(100*1000*1000) : (hsize_t)(4*1000*1000)), fapl, &large_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ /* Test object writing support */
- /* Reset the "normal" heap ID length */
- tparam.actual_id_len = SMALL_HEAP_ID_LEN;
- } /* end else */
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
+ /* Basic object writing */
+ nerrors += test_write(fapl, &small_cparam, &tparam);
-#ifndef QAK
- /* Test object writing support */
+ /* Writing objects in heap with filters */
+ tparam.comp = FHEAP_TEST_COMPRESS;
+ nerrors += test_write(fapl, &small_cparam, &tparam);
- /* Basic object writing */
- nerrors += test_write(fapl, &small_cparam, &tparam);
+ /* Reset block compression */
+ tparam.comp = FHEAP_TEST_NO_COMPRESS;
+ } /* end for */
- /* Writing objects in heap with filters */
- tparam.comp = FHEAP_TEST_COMPRESS;
- nerrors += test_write(fapl, &small_cparam, &tparam);
+ if(H5Pclose(fcpl) < 0)
+ TEST_ERROR
+ } /* end num_pb_fs */
- /* Reset block compression */
- tparam.comp = FHEAP_TEST_NO_COMPRESS;
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
-#ifndef QAK
- } /* end for */
-#endif /* QAK */
+ /* Tests that address specific bugs */
+ tparam.my_fcpl = def_fcpl;
+ fapl = def_fapl;
/* Tests that address specific bugs */
-#ifndef QAK
nerrors += test_bug1(fapl, &small_cparam, &tparam);
-#else /* QAK */
-HDfprintf(stderr, "Uncomment tests!\n");
-#endif /* QAK */
/* Verify symbol table messages are cached */
nerrors += (h5_verify_cached_stabs(FILENAME, fapl) < 0 ? 1 : 0);
if(nerrors)
goto error;
- puts("All fractal heap tests passed.");
+ HDputs("All fractal heap tests passed.");
/* Release space for the shared objects */
H5MM_xfree(shared_wobj_g);
@@ -16834,9 +16834,12 @@ HDfprintf(stderr, "Uncomment tests!\n");
H5MM_xfree(shared_lens_g);
H5MM_xfree(shared_offs_g);
+ if(H5Pclose(def_fcpl) < 0) TEST_ERROR
+ if(H5Pclose(pb_fapl) < 0) TEST_ERROR
+
/* Clean up file used */
#ifndef QAK
- h5_cleanup(FILENAME, fapl);
+ h5_cleanup(FILENAME, def_fapl);
#else /* QAK */
HDfprintf(stderr, "Uncomment cleanup!\n");
#endif /* QAK */
@@ -16844,14 +16847,17 @@ HDfprintf(stderr, "Uncomment cleanup!\n");
return 0;
error:
- puts("*** TESTS FAILED ***");
+ HDputs("*** TESTS FAILED ***");
H5E_BEGIN_TRY {
H5MM_xfree(shared_wobj_g);
H5MM_xfree(shared_robj_g);
H5MM_xfree(shared_ids_g);
H5MM_xfree(shared_lens_g);
H5MM_xfree(shared_offs_g);
- H5Pclose(fapl);
+ H5Pclose(def_fapl);
+ H5Pclose(pb_fapl);
+ H5Pclose(def_fcpl);
+ H5Pclose(fcpl);
} H5E_END_TRY;
return 1;
} /* end main() */
diff --git a/test/file_image.c b/test/file_image.c
index dd0a483..90b3233 100644
--- a/test/file_image.c
+++ b/test/file_image.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/filespace_1_8.h5 b/test/filespace_1_8.h5
index 85138b0..3fa2822 100644
--- a/test/filespace_1_8.h5
+++ b/test/filespace_1_8.h5
Binary files differ
diff --git a/test/fillval.c b/test/fillval.c
index 4f7adc1..ea13de0 100644
--- a/test/fillval.c
+++ b/test/fillval.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -2370,7 +2368,7 @@ main(int argc, char *argv[])
{
int nerrors=0, argno, test_contig=1, test_chunk=1, test_compact=1;
hid_t fapl = (-1), fapl2 = (-1); /* File access property lists */
- unsigned new_format; /* Whether to use the new format or not */
+ unsigned new_format; /* Whether to use the new format or not */
if(argc >= 2) {
test_contig = test_chunk = test_compact = 0;
diff --git a/test/filter_fail.c b/test/filter_fail.c
index f165d8a..76b3106 100644
--- a/test/filter_fail.c
+++ b/test/filter_fail.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/flush1.c b/test/flush1.c
index 64d072a..bdbd731 100644
--- a/test/flush1.c
+++ b/test/flush1.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -25,170 +23,321 @@
*/
#include "h5test.h"
+/* This file needs to access the file driver testing code */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+#include "H5FDpkg.h" /* File drivers */
+
const char *FILENAME[] = {
"flush",
+ "flush-swmr",
"noflush",
+ "noflush-swmr",
+ "flush_extend",
+ "flush_extend-swmr",
"noflush_extend",
+ "noflush_extend-swmr",
NULL
};
-static double the_data[100][100];
+/* Number and size of dataset dims, chunk size, etc. */
+#define NDIMS 1
+#define NELEMENTS 10000
+#define CHUNK_SIZE 25
+#define FIRST_DSET_NAME "dset1"
+#define SECOND_DSET_NAME "dset2"
+
+/* Number of sub-groups created in the containing group */
+#define NGROUPS 100
+static hid_t create_file(const char *filename, hid_t fapl_id, hbool_t swmr);
+static herr_t add_dset_to_file(hid_t fid, const char *dset_name);
+
+
/*-------------------------------------------------------------------------
- * Function: create_file
- *
- * Purpose: Creates files used in part 1 of the test
+ * Function: create_file
*
- * Return: Success: 0
+ * Purpose: Creates files and datasets used in part 1 of the test
*
- * Failure: 1
+ * Return: Success: a valid file ID
+ * Failure: -1
*
* Programmer: Leon Arber
* Sept. 26, 2006
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static hid_t
-create_file(char* name, hid_t fapl)
+create_file(const char *filename, hid_t fapl_id, hbool_t swmr)
{
- hid_t file, dcpl, space, dset, groups, grp;
- hsize_t ds_size[2] = {100, 100};
- hsize_t ch_size[2] = {5, 5};
- size_t i, j;
+ hid_t fid = -1; /* file ID */
+ hid_t top_gid = -1; /* containing group ID */
+ hid_t gid = -1; /* subgroup ID */
+ char group_name[16]; /* group name */
+ unsigned flags; /* file open flags */
+ int i; /* iterator */
- if((file = H5Fcreate(name, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
+ flags = H5F_ACC_TRUNC | (swmr ? H5F_ACC_SWMR_WRITE : 0);
- /* Create a chunked dataset */
- if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) FAIL_STACK_ERROR
- if(H5Pset_chunk(dcpl, 2, ch_size) < 0) FAIL_STACK_ERROR
- if((space = H5Screate_simple(2, ds_size, NULL)) < 0) FAIL_STACK_ERROR
- if((dset = H5Dcreate2(file, "dset", H5T_NATIVE_FLOAT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
+ if((fid = H5Fcreate(filename, flags, H5P_DEFAULT, fapl_id)) < 0)
+ STACK_ERROR
- /* Write some data */
- for(i = 0; i < ds_size[0]; i++)
- for(j = 0; j < (size_t)ds_size[1]; j++)
- the_data[i][j] = (double)i / (double)(j + 1);
- if(H5Dwrite(dset, H5T_NATIVE_DOUBLE, space, space, H5P_DEFAULT, the_data) < 0) FAIL_STACK_ERROR
+ /* Create a chunked dataset */
+ if(add_dset_to_file(fid, FIRST_DSET_NAME) < 0)
+ TEST_ERROR
/* Create some groups */
- if((groups = H5Gcreate2(file, "some_groups", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
- for(i = 0; i < 100; i++) {
- sprintf(name, "grp%02u", (unsigned)i);
- if((grp = H5Gcreate2(groups, name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) FAIL_STACK_ERROR
- if(H5Gclose(grp) < 0) FAIL_STACK_ERROR
+ if((top_gid = H5Gcreate2(fid, "top_group", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ STACK_ERROR
+ for(i = 0; i < NGROUPS; i++) {
+ HDsprintf(group_name, "group%02d", i);
+ if((gid = H5Gcreate2(top_gid, group_name, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ STACK_ERROR
+ if(H5Gclose(gid) < 0)
+ STACK_ERROR
} /* end for */
- return file;
+ if(H5Gclose(top_gid) < 0)
+ STACK_ERROR
+
+ return fid;
error:
- HD_exit(EXIT_FAILURE);
-}
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Gclose(gid);
+ H5Gclose(top_gid);
+ } H5E_END_TRY;
+ return -1;
+} /* end create_file() */
+
/*-------------------------------------------------------------------------
- * Function: extend_file
+ * Function: add_dset_to_file
*
- * Purpose: Add a small dataset to the file.
+ * Purpose: Add a dataset to the file.
*
- * Return: Success: 0
- *
- * Failure: 1
+ * Return: SUCCEED/FAIL
*
* Programmer: Leon Arber
* Oct. 4, 2006
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-static hid_t
-extend_file(hid_t file)
+static herr_t
+add_dset_to_file(hid_t fid, const char *dset_name)
{
- hid_t dcpl, space, dset;
- hsize_t ds_size[2] = {100, 100};
- hsize_t ch_size[2] = {5, 5};
- size_t i, j;
+ hid_t dcpl_id = -1; /* dataset creation plist ID */
+ hid_t sid = -1; /* dataspace ID */
+ hid_t did = -1; /* dataset ID */
+ int *data = NULL; /* data buffer */
+ hsize_t dims[1] = {NELEMENTS}; /* size of dataset */
+ hsize_t chunk_dims[1] = {CHUNK_SIZE}; /* chunk size */
+ int i; /* iterator */
/* Create a chunked dataset */
- if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
- if(H5Pset_chunk(dcpl, 2, ch_size) < 0) goto error;
- if((space = H5Screate_simple(2, ds_size, NULL)) < 0) goto error;
- if((dset = H5Dcreate2(file, "dset2", H5T_NATIVE_FLOAT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
- goto error;
+ if((dcpl_id = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ STACK_ERROR
+ if(H5Pset_chunk(dcpl_id, NDIMS, chunk_dims) < 0)
+ STACK_ERROR
+ if((sid = H5Screate_simple(NDIMS, dims, NULL)) < 0)
+ STACK_ERROR
+ if((did = H5Dcreate2(fid, dset_name, H5T_NATIVE_FLOAT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ STACK_ERROR
/* Write some data */
- for(i = 0; i < ds_size[0]; i++)
- for(j = 0; j < (size_t)ds_size[1]; j++)
- the_data[i][j] = (double)i / (double)(j + 1);
- if(H5Dwrite(dset, H5T_NATIVE_DOUBLE, space, space, H5P_DEFAULT, the_data) < 0) goto error;
+ if(NULL == (data = (int *)HDcalloc((size_t)NELEMENTS, sizeof(int))))
+ STACK_ERROR
+ for(i = 0; i < NELEMENTS; i++)
+ data[i] = i;
+ if(H5Dwrite(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data) < 0)
+ STACK_ERROR
+
+ if(H5Pclose(dcpl_id) < 0)
+ STACK_ERROR
+ if(H5Sclose(sid) < 0)
+ STACK_ERROR
+ if(H5Dclose(did) < 0)
+ STACK_ERROR
- return file;
+ HDfree(data);
+
+ return SUCCEED;
error:
- HD_exit(EXIT_FAILURE);
-}
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl_id);
+ H5Sclose(sid);
+ H5Dclose(did);
+ } H5E_END_TRY;
+
+ HDfree(data);
+
+ return FAIL;
+} /* end add_dset_to_file() */
+
/*-------------------------------------------------------------------------
* Function: main
*
- * Purpose: Part 1 of a two-part H5Fflush() test.
+ * Purpose: Creates files and datasets with and without flushing in
+ * a variety of situations.
*
- * Return: Success: 0
- *
- * Failure: 1
+ * Part 1 of a two-part H5Fflush() test.
+ *
+ * Return: EXIT_SUCCESS/EXIT_FAILURE
*
* Programmer: Robb Matzke
* Friday, October 23, 1998
*
- * Modifications:
- * Leon Arber
- * Sept. 26, 2006, expand test to check for failure if H5Fflush is not called.
- * Oct. 4 2006, expand test to check for partial failure in case file is flushed, but then
- * new datasets are created after the flush.
- *
- *
*-------------------------------------------------------------------------
*/
int
main(void)
{
- hid_t file, fapl;
- char name[1024];
+ char *driver = NULL; /* name of current VFD (from env var) */
+ hbool_t vfd_supports_swmr; /* whether the current VFD supports SWMR */
+ hid_t fid = -1; /* file ID */
+ hid_t fapl_id = -1; /* file access proplist ID */
+ char filename[1024]; /* filename */
+ hbool_t use_swmr; /* whether or not to use SWMR I/O */
h5_reset();
- fapl = h5_fileaccess();
+ if((fapl_id = h5_fileaccess()) < 0)
+ TEST_ERROR
+
+ /* Check if the current VFD supports SWMR */
+ driver = HDgetenv("HDF5_DRIVER");
+ vfd_supports_swmr = H5FD_supports_swmr_test(driver);
+
+ /*************************************************/
+ /* NOTE: Not closing the file ID is intentional! */
+ /*************************************************/
+
+ /* Create a file and flush */
+ TESTING("H5Fflush (part1 with flush)");
+ h5_fixname(FILENAME[0], fapl_id, filename, sizeof(filename));
+ use_swmr = FALSE;
+ if((fid = create_file(filename, fapl_id, use_swmr)) < 0)
+ TEST_ERROR
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR
+ PASSED();
- TESTING("H5Fflush (part1)");
+ /* Create a file and flush w/ SWMR I/O */
+ TESTING("H5Fflush (part1 with flush + SWMR)");
+ if(vfd_supports_swmr) {
+ h5_fixname(FILENAME[1], fapl_id, filename, sizeof(filename));
+ use_swmr = TRUE;
+ if((fid = create_file(filename, fapl_id, use_swmr)) < 0)
+ TEST_ERROR
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR
+ PASSED();
+ } /* end if */
+ else
+ SKIPPED();
- /* Create the file */
- h5_fixname(FILENAME[0], fapl, name, sizeof name);
- file = create_file(name, fapl);
- /* Flush and exit without closing the library */
- if (H5Fflush(file, H5F_SCOPE_GLOBAL) < 0) goto error;
+ /* Create a file which will not be flushed */
+ TESTING("H5Fflush (part1 without flush)");
+ h5_fixname(FILENAME[2], fapl_id, filename, sizeof(filename));
+ use_swmr = FALSE;
+ if((fid = create_file(filename, fapl_id, use_swmr)) < 0)
+ TEST_ERROR
+ PASSED();
- /* Create the file */
- h5_fixname(FILENAME[2], fapl, name, sizeof name);
- file = create_file(name, fapl);
- /* Flush and exit without closing the library */
- if(H5Fflush(file, H5F_SCOPE_GLOBAL) < 0) goto error;
- /* Add a bit to the file and don't flush the new part */
- extend_file(file);
+ /* Create a file which will not be flushed w/ SWMR I/O */
+ TESTING("H5Fflush (part1 without flush + SWMR)");
+ if(vfd_supports_swmr) {
+ h5_fixname(FILENAME[3], fapl_id, filename, sizeof(filename));
+ use_swmr = TRUE;
+ if((fid = create_file(filename, fapl_id, use_swmr)) < 0)
+ TEST_ERROR
+ PASSED();
+ } /* end if */
+ else
+ SKIPPED();
- /* Create the other file which will not be flushed */
- h5_fixname(FILENAME[1], fapl, name, sizeof name);
- file = create_file(name, fapl);
+ /* Create a file, flush, add a dataset, flush */
+ TESTING("H5Fflush (part1 with flush and later addition and another flush)");
+ h5_fixname(FILENAME[4], fapl_id, filename, sizeof(filename));
+ use_swmr = FALSE;
+ if((fid = create_file(filename, fapl_id, use_swmr)) < 0)
+ TEST_ERROR
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR
+ if(add_dset_to_file(fid, SECOND_DSET_NAME) < 0)
+ TEST_ERROR
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR
+ PASSED();
+ /* Create a file, flush, add a dataset, flush w/ SWMR I/O */
+ TESTING("H5Fflush (part1 with flush and later addition and another flush + SWMR)");
+ if(vfd_supports_swmr) {
+ h5_fixname(FILENAME[5], fapl_id, filename, sizeof(filename));
+ use_swmr = TRUE;
+ if((fid = create_file(filename, fapl_id, use_swmr)) < 0)
+ TEST_ERROR
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR
+ if(add_dset_to_file(fid, SECOND_DSET_NAME) < 0)
+ TEST_ERROR
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR
+ PASSED();
+ } /* end if */
+ else
+ SKIPPED();
+ /* Create a file, flush, add a dataset, (no flush) */
+ TESTING("H5Fflush (part1 with flush and later addition)");
+ h5_fixname(FILENAME[6], fapl_id, filename, sizeof(filename));
+ use_swmr = FALSE;
+ if((fid = create_file(filename, fapl_id, use_swmr)) < 0)
+ TEST_ERROR
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR
+ if(add_dset_to_file(fid, SECOND_DSET_NAME) < 0)
+ TEST_ERROR
PASSED();
- fflush(stdout);
- fflush(stderr);
+ /* Create a file, flush, add a dataset, (no flush) w/ SWMR I/O */
+ TESTING("H5Fflush (part1 with flush and later addition + SWMR)");
+ if(vfd_supports_swmr) {
+ h5_fixname(FILENAME[7], fapl_id, filename, sizeof(filename));
+ use_swmr = TRUE;
+ if((fid = create_file(filename, fapl_id, use_swmr)) < 0)
+ TEST_ERROR
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ FAIL_STACK_ERROR
+ if(add_dset_to_file(fid, SECOND_DSET_NAME) < 0)
+ TEST_ERROR
+ PASSED();
+ } /* end if */
+ else
+ SKIPPED();
+
+ if(!vfd_supports_swmr)
+ HDprintf("NOTE: Some tests were skipped since the current VFD lacks SWMR support\n");
+
+ /* Flush console output streams */
+ HDfflush(stdout);
+ HDfflush(stderr);
+
+ /* DO NOT CLOSE FILE ID! */
+ if(H5Pclose(fapl_id) < 0)
+ STACK_ERROR
+
+ /* _exit() is necessary since we want a hard close of the library */
HD_exit(EXIT_SUCCESS);
error:
- HD_exit(EXIT_FAILURE);
- return 1;
-}
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl_id);
+ } H5E_END_TRY;
+
+ HDexit(EXIT_FAILURE);
+} /* end main() */
diff --git a/test/flush2.c b/test/flush2.c
index 7dc45be..9fdf6c1 100644
--- a/test/flush2.c
+++ b/test/flush2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -24,202 +22,420 @@
*/
#include "h5test.h"
+/* This file needs to access the file driver testing code */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+#include "H5FDpkg.h" /* File drivers */
+
+/* Make this private property (defined in H5Fprivate.h) available */
+/* This is used in the helper routine clear_status_flags() */
+#define H5F_ACS_CLEAR_STATUS_FLAGS_NAME "clear_status_flags"
+
+
const char *FILENAME[] = {
"flush",
+ "flush-swmr",
"noflush",
+ "noflush-swmr",
+ "flush_extend",
+ "flush_extend-swmr",
"noflush_extend",
+ "noflush_extend-swmr",
NULL
};
-static double the_data[100][100];
+/* Number and size of dataset dims, chunk size, etc. */
+#define NELEMENTS 10000
+#define FIRST_DSET_NAME "dset1"
+#define SECOND_DSET_NAME "dset2"
+/* Number of sub-groups created in the containing group */
+#define NGROUPS 100
+
+static hbool_t dset_ok(hid_t fid, const char *dset_name);
+static hbool_t file_ok(const char *filename, hid_t fapl_id, hbool_t check_second_dset);
+
+
/*-------------------------------------------------------------------------
- * Function: check_dset
+ * Function: dset_ok
*
- * Purpose: Part 2 of a two-part H5Fflush() test, checks if the data in a dataset
- * is what it is supposed to be.
+ * Purpose: Checks if the data in a dataset is what it is supposed to be.
*
- * Return: Success: 0
- *
- * Failure: 1
+ * Return: TRUE/FALSE
*
* Programmer: Leon Arber
* Oct. 4, 2006.
*
*-------------------------------------------------------------------------
*/
-static int
-check_dset(hid_t file, const char* name)
+static hbool_t
+dset_ok(hid_t fid, const char *dset_name)
{
- hid_t space, dset;
- hsize_t ds_size[2] = {100, 100};
- double error;
- size_t i, j;
-
- /* Open the dataset */
- if((dset = H5Dopen2(file, name, H5P_DEFAULT)) < 0) goto error;
- if((space = H5Dget_space(dset)) < 0) goto error;
- if(H5Sget_simple_extent_dims(space, ds_size, NULL) < 0) goto error;
- assert(100 == ds_size[0] && 100 == ds_size[1]);
-
- /* Read some data */
- if(H5Dread(dset, H5T_NATIVE_DOUBLE, space, space, H5P_DEFAULT, the_data) < 0) goto error;
- for(i = 0; i < (size_t)ds_size[0]; i++)
- for(j = 0; j < (size_t)ds_size[1]; j++) {
- error = HDfabs(the_data[i][j] - (double)i / (double)(j + 1));
- if(error > (double)0.0001F) {
- H5_FAILED();
- printf(" dset[%lu][%lu] = %g\n",
- (unsigned long)i, (unsigned long)j, the_data[i][j]);
- printf(" should be %g\n", (double)i / (double)(j + 1));
- goto error;
- }
- }
- if(H5Dclose(dset) < 0) goto error;
- return 0;
+ hid_t sid = -1; /* dataspace ID */
+ hid_t did = -1; /* dataset ID */
+ int *data = NULL; /* data buffer */
+ hsize_t dims[1] = {0}; /* size of dataset */
+ int i; /* iterator */
+
+ /* Open the dataset and check size */
+ if((did = H5Dopen2(fid, dset_name, H5P_DEFAULT)) < 0)
+ goto error;
+ if((sid = H5Dget_space(did)) < 0)
+ goto error;
+ if(H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
+ goto error;
+ if(dims[0] != NELEMENTS)
+ goto error;
+
+ /* Read the data */
+ if(NULL == (data = (int *)HDcalloc((size_t)NELEMENTS, sizeof(int))))
+ goto error;
+ if(H5Dread(did, H5T_NATIVE_INT, sid, sid, H5P_DEFAULT, data) < 0)
+ goto error;
+ for(i = 0; i < NELEMENTS; i++)
+ if(i != data[i])
+ goto error;
+
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Dclose(did) < 0)
+ goto error;
+
+ HDfree(data);
+
+ return TRUE;
error:
- return 1;
-}
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Dclose(did);
+ } H5E_END_TRY;
+
+ HDfree(data);
+
+ return FALSE;
+} /* end dset_ok() */
/*-------------------------------------------------------------------------
- * Function: check_file
- *
- * Purpose: Part 2 of a two-part H5Fflush() test.
+ * Function: file_ok
*
- * Return: Success: 0
+ * Purpose: Checks that the contents of a file are what they should be.
*
- * Failure: 1
+ * Return: TRUE/FALSE
*
* Programmer: Leon Arber
* Sept. 26, 2006.
*
*-------------------------------------------------------------------------
*/
-static int
-check_file(char* filename, hid_t fapl, int flag)
+static hbool_t
+file_ok(const char *filename, hid_t fapl_id, hbool_t check_second_dset)
{
- hid_t file = -1, groups = -1;
- char name[1024];
- int i;
-
- if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) goto error;
- if(check_dset(file, "dset")) goto error;
-
- /* Open some groups */
- if((groups = H5Gopen2(file, "some_groups", H5P_DEFAULT)) < 0) goto error;
- for(i = 0; i < 100; i++) {
- hid_t grp;
- sprintf(name, "grp%02u", (unsigned)i);
- if((grp = H5Gopen2(groups, name, H5P_DEFAULT)) < 0) goto error;
- if(H5Gclose(grp) < 0) goto error;
+ hid_t fid = -1; /* file ID */
+ hid_t top_gid = -1; /* containing group ID */
+ hid_t gid = -1; /* subgroup ID */
+ char group_name[16]; /* group name */
+ int i; /* iterator */
+
+ /* open file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY, fapl_id)) < 0)
+ goto error;
+
+ /* check datasets */
+ if(!dset_ok(fid, FIRST_DSET_NAME))
+ goto error;
+ if(check_second_dset)
+ if(!dset_ok(fid, SECOND_DSET_NAME))
+ goto error;
+
+ /* check groups */
+ if((top_gid = H5Gopen2(fid, "top_group", H5P_DEFAULT)) < 0)
+ goto error;
+ for(i = 0; i < NGROUPS; i++) {
+ HDsprintf(group_name, "group%02d", i);
+ if((gid = H5Gopen2(top_gid, group_name, H5P_DEFAULT)) < 0)
+ goto error;
+ if(H5Gclose(gid) < 0)
+ goto error;
} /* end for */
- /* Check to see if that last added dataset in the third file is accessible
- * (it shouldn't be...but it might. Flag an error in case it is for now */
- if(flag && check_dset(file, "dset2")) goto error;
+ if(H5Gclose(top_gid) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+ return TRUE;
- if(H5Gclose(groups) < 0) goto error;
- if(H5Fclose(file) < 0) goto error;
+error:
+ H5E_BEGIN_TRY {
+ H5Gclose(top_gid);
+ H5Gclose(gid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
- return 0;
+ return FALSE;
+} /* end file_ok() */
+
+/*-------------------------------------------------------------------------
+ * Function: clear_status_flags
+ *
+ * Purpose: Clears the status_flags in the superblock of the file.
+ * It is smilar to the tool "h5clear".
+ *
+ * Return: SUCCEED/FAIL
+ *
+ * Programmer: Vailin Choi
+ * July 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+clear_status_flags(const char *filename, hid_t fapl_id)
+{
+ hid_t new_fapl_id = -1; /* copy of the file access plist ID */
+ hid_t fid = -1; /* file ID */
+ hbool_t clear = TRUE;
+
+ /* Get a copy of fapl */
+ if((new_fapl_id = H5Pcopy(fapl_id)) < 0)
+ goto error;
+
+ /* Set this private property */
+ if(H5Pset(new_fapl_id, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, &clear) < 0)
+ goto error;
+
+ /* Has to open rw */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, new_fapl_id)) < 0)
+ goto error;
+
+ /* CLose the property list */
+ if(H5Pclose(new_fapl_id) < 0)
+ goto error;
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ goto error;
+
+ return SUCCEED;
error:
H5E_BEGIN_TRY {
- H5Gclose(groups);
- H5Fclose(file);
+ H5Pclose(new_fapl_id);
+ H5Fclose(fid);
} H5E_END_TRY;
- return 1;
-} /* end check_file() */
+ return FAIL;
+} /* end clear_status_flags() */
/*-------------------------------------------------------------------------
* Function: main
*
- * Purpose: Part 2 of a two-part H5Fflush() test.
- *
- * Return: Success: 0
+ * Purpose: Confirms file contents in a variety of flush scenarios.
+ * Part 2 of a two-part H5Fflush() test.
*
- * Failure: 1
+ * Return: EXIT_SUCCESS/EXIT_FAILURE
*
* Programmer: Robb Matzke
* Friday, October 23, 1998
*
- * Modifications:
- * Leon Arber
- * Sept. 26, 2006, expand to check for case where the was file not flushed.
- *
*-------------------------------------------------------------------------
*/
int
main(void)
{
- hid_t fapl;
- H5E_auto2_t func;
- char name[1024];
+ char *driver = NULL; /* name of current VFD (from env var) */
+ hbool_t vfd_supports_swmr; /* whether the current VFD supports SWMR */
+ hid_t fapl_id = -1; /* file access proplist ID */
+ char filename[1024]; /* filename */
+ hbool_t check_second_dset; /* whether or not to check the second dset */
+ H5E_auto2_t func; /* for shutting off error reporting */
h5_reset();
- fapl = h5_fileaccess();
- TESTING("H5Fflush (part2 with flush)");
+ if((fapl_id = h5_fileaccess()) < 0)
+ PUTS_ERROR("bad vfd-dependent fapl")
+
+ /* Check if the current VFD supports SWMR */
+ driver = HDgetenv("HDF5_DRIVER");
+ vfd_supports_swmr = H5FD_supports_swmr_test(driver);
+ /* TEST 1 */
/* Check the case where the file was flushed */
- h5_fixname(FILENAME[0], fapl, name, sizeof name);
- if(check_file(name, fapl, FALSE)) {
- H5_FAILED()
- goto error;
- }
+ TESTING("H5Fflush (part2 with flush)");
+ h5_fixname(FILENAME[0], fapl_id, filename, sizeof(filename));
+ check_second_dset = FALSE;
+ if(file_ok(filename, fapl_id, check_second_dset))
+ PASSED()
else
- PASSED();
+ TEST_ERROR
+ /* TEST 2 */
+ /* Check the case where the file was flushed (w/SWMR) */
+ TESTING("H5Fflush (part2 with flush + SWMR)");
+ if(vfd_supports_swmr) {
+ h5_fixname(FILENAME[1], fapl_id, filename, sizeof(filename));
+ check_second_dset = FALSE;
+ if(clear_status_flags(filename, fapl_id) < 0)
+ TEST_ERROR
+ if(file_ok(filename, fapl_id, check_second_dset))
+ PASSED()
+ else
+ TEST_ERROR
+ } /* end if */
+ else
+ SKIPPED()
- /* Check the case where the file was not flushed. This should give an error
- * so we turn off the error stack temporarily */
+ /* TEST 3 */
+ /* Check the case where the file was not flushed */
TESTING("H5Fflush (part2 without flush)");
- H5Eget_auto2(H5E_DEFAULT,&func,NULL);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
-
- h5_fixname(FILENAME[1], fapl, name, sizeof name);
- if(check_file(name, fapl, FALSE))
- PASSED()
- else {
+ /* Turn the error stack off (failures expected) */
+ if(H5Eget_auto2(H5E_DEFAULT, &func, NULL) < 0)
+ FAIL_STACK_ERROR
+ if(H5Eset_auto2(H5E_DEFAULT, NULL, NULL) < 0)
+ FAIL_STACK_ERROR
+ h5_fixname(FILENAME[2], fapl_id, filename, sizeof(filename));
+ check_second_dset = FALSE;
+ if(file_ok(filename, fapl_id, check_second_dset)) {
#if defined H5_HAVE_WIN32_API && !defined (hdf5_EXPORTS)
SKIPPED();
- puts(" DLL will flush the file even when calling _exit, skip this test temporarily");
+ HDputs(" the DLL will flush the file even when calling _exit, skip this test temporarily");
#else
- H5_FAILED()
- goto error;
+ TEST_ERROR
#endif
- }
- H5Eset_auto2(H5E_DEFAULT, func, NULL);
+ } /* end if */
+ else
+ PASSED()
+ /* Turn the error stack back on */
+ if(H5Eset_auto2(H5E_DEFAULT, func, NULL) < 0)
+ FAIL_STACK_ERROR
- /* Check the case where the file was flushed, but more data was added afterward. This should give an error
- * so we turn off the error stack temporarily */
- TESTING("H5Fflush (part2 with flush and later addition)");
- H5Eget_auto2(H5E_DEFAULT,&func,NULL);
- H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
+ /* TEST 4 */
+ /* Check the case where the file was not flushed (w/SWMR) */
+ TESTING("H5Fflush (part2 without flush + SWMR)");
+ if(vfd_supports_swmr) {
+ /* Turn the error stack off (failures expected) */
+ if(H5Eget_auto2(H5E_DEFAULT, &func, NULL) < 0)
+ FAIL_STACK_ERROR
+ if(H5Eset_auto2(H5E_DEFAULT, NULL, NULL) < 0)
+ FAIL_STACK_ERROR
+ h5_fixname(FILENAME[3], fapl_id, filename, sizeof(filename));
+ check_second_dset = FALSE;
+ if(clear_status_flags(filename, fapl_id) < 0)
+ TEST_ERROR
+ if(file_ok(filename, fapl_id, check_second_dset)) {
+#if defined H5_HAVE_WIN32_API && !defined (hdf5_EXPORTS)
+ SKIPPED();
+ HDputs(" the DLL will flush the file even when calling _exit, skip this test temporarily");
+#else
+ TEST_ERROR
+#endif
+ } /* end if */
+ else
+ PASSED()
+ /* Turn the error stack back on */
+ if(H5Eset_auto2(H5E_DEFAULT, func, NULL) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+ else
+ SKIPPED()
+
+ /* TEST 5 */
+ /* Check the case where the file was flushed, but more data was
+ * added afterward and then flushed
+ */
+ TESTING("H5Fflush (part2 with flush and later addition and another flush)");
+ check_second_dset = TRUE;
+ h5_fixname(FILENAME[4], fapl_id, filename, sizeof(filename));
+ if(file_ok(filename, fapl_id, check_second_dset))
+ PASSED()
+ else
+ TEST_ERROR
- h5_fixname(FILENAME[2], fapl, name, sizeof name);
- if(check_file(name, fapl, TRUE))
+ /* TEST 6 */
+ /* Check the case where the file was flushed, but more data was
+ * added afterward and then flushed (w/SWMR)
+ */
+ TESTING("H5Fflush (part2 with flush and later addition and another flush + SWMR)");
+ if(vfd_supports_swmr) {
+ check_second_dset = TRUE;
+ h5_fixname(FILENAME[5], fapl_id, filename, sizeof(filename));
+ if(clear_status_flags(filename, fapl_id) < 0)
+ TEST_ERROR
+ if(file_ok(filename, fapl_id, check_second_dset))
+ PASSED()
+ else
+ TEST_ERROR
+ } /* end if */
+ else
+ SKIPPED()
+
+ /* TEST 7 */
+ /* Check the case where the file was flushed, but more data was added
+ * afterward and not flushed.
+ */
+ TESTING("H5Fflush (part2 with flush and later addition)");
+ /* Turn the error stack off (failures expected) */
+ if(H5Eget_auto2(H5E_DEFAULT, &func, NULL) < 0)
+ FAIL_STACK_ERROR
+ if(H5Eset_auto2(H5E_DEFAULT, NULL, NULL) < 0)
+ FAIL_STACK_ERROR
+ h5_fixname(FILENAME[6], fapl_id, filename, sizeof(filename));
+ check_second_dset = TRUE;
+ if(file_ok(filename, fapl_id, check_second_dset)) {
+#if defined H5_HAVE_WIN32_API && !defined (hdf5_EXPORTS)
+ SKIPPED()
+ HDputs(" the DLL will flush the file even when calling _exit, skip this test temporarily");
+#else
+ TEST_ERROR
+#endif
+ } /* end if */
+ else
PASSED()
- else {
+ /* Turn the error stack back on */
+ if(H5Eset_auto2(H5E_DEFAULT, func, NULL) < 0)
+ FAIL_STACK_ERROR
+
+ /* TEST 8 */
+ /* Check the case where the file was flushed, but more data was added
+ * afterward and not flushed (w/ SWMR).
+ */
+ TESTING("H5Fflush (part2 with flush and later addition + SWMR)");
+ if(vfd_supports_swmr) {
+ /* Turn the error stack off (failures expected) */
+ if(H5Eget_auto2(H5E_DEFAULT, &func, NULL) < 0)
+ FAIL_STACK_ERROR
+ if(H5Eset_auto2(H5E_DEFAULT, NULL, NULL) < 0)
+ FAIL_STACK_ERROR
+ h5_fixname(FILENAME[7], fapl_id, filename, sizeof(filename));
+ check_second_dset = TRUE;
+ if(clear_status_flags(filename, fapl_id) < 0)
+ TEST_ERROR
+ if(file_ok(filename, fapl_id, check_second_dset)) {
#if defined H5_HAVE_WIN32_API && !defined (hdf5_EXPORTS)
- SKIPPED();
- puts(" DLL will flush the file even when calling _exit, skip this test temporarily");
+ SKIPPED();
+ HDputs(" the DLL will flush the file even when calling _exit, skip this test temporarily");
#else
- H5_FAILED()
- goto error;
+ TEST_ERROR
#endif
- }
+ } /* end if */
+ else
+ PASSED();
+ /* Turn the error stack back on */
+ if(H5Eset_auto2(H5E_DEFAULT, func, NULL) < 0)
+ FAIL_STACK_ERROR
+ } /* end if */
+ else
+ SKIPPED()
- H5Eset_auto2(H5E_DEFAULT, func, NULL);
+ if(!vfd_supports_swmr)
+ HDprintf("NOTE: Some tests were skipped since the current VFD lacks SWMR support\n");
- h5_cleanup(FILENAME, fapl);
+ h5_cleanup(FILENAME, fapl_id);
- return 0;
+ HDexit(EXIT_SUCCESS);
error:
- return 1;
-}
+ HDexit(EXIT_FAILURE);
+} /* end main() */
diff --git a/test/flushrefresh.c b/test/flushrefresh.c
new file mode 100644
index 0000000..f35ed5e
--- /dev/null
+++ b/test/flushrefresh.c
@@ -0,0 +1,1149 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+
+/*
+ * Programmer: Mike McGreevy
+ * June 30, 2010
+ *
+ * Purpose: This test file contains routines used to test flushing and
+ * refreshing individual objects' metadata from the cache.
+ *
+ * Note: This file should NOT be run manually. Instead, invoke it
+ * via its associated test script, testflushrefresh.sh
+ *
+ */
+
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+
+/* ======== */
+/* Includes */
+/* ======== */
+
+#include "testhdf5.h"
+#include "H5FDpkg.h" /* File Drivers */
+
+/* ======= */
+/* Defines */
+/* ======= */
+
+/* Name of Test File */
+#define FILENAME "flushrefresh.h5"
+
+/* Names of Signal Files */
+#define SIGNAL_TO_SCRIPT "flushrefresh_VERIFICATION_START"
+#define SIGNAL_BETWEEN_PROCESSES_1 "flushrefresh_VERIFICATION_CHECKPOINT1"
+#define SIGNAL_BETWEEN_PROCESSES_2 "flushrefresh_VERIFICATION_CHECKPOINT2"
+#define SIGNAL_FROM_SCRIPT "flushrefresh_VERIFICATION_DONE"
+
+/* Paths to Various Objects in the Testfile */
+#define RG "/"
+#define D1 "/Dataset1"
+#define D2 "/Group1/Dataset2"
+#define D3 "/Group3/Dataset3"
+#define G1 "/Group1"
+#define G2 "/Group1/Group2"
+#define G3 "/Group3"
+#define T1 "/CommittedDatatype1"
+#define T2 "/Group1/Group2/CommittedDatatype2"
+#define T3 "/Group3/CommittedDatatype3"
+
+/* Flushed States */
+#define FLUSHED "FLUSHED"
+#define NOT_FLUSHED "NOT_FLUSHED"
+
+/* Error Handling */
+/* For errors occuring in the main process, use the standard TEST_ERROR macro.
+ For errors occurring in the spawned process (from the test script), use
+ the PROCESS_ERROR macro, which will send a signal to the main process so the
+ main process can propogate errors correctly. */
+FILE * errorfile;
+#define ERRFILE "flushrefresh_ERROR"
+#define PROCESS_ERROR \
+ { \
+ errorfile = HDfopen(ERRFILE, "w+"); \
+ HDfprintf(errorfile, "Error occurred in flushrefresh.\n"); \
+ HDfflush(errorfile); \
+ HDfclose(errorfile); \
+ TEST_ERROR; \
+ }
+
+#define CLEANUP_FILES \
+ { \
+ HDremove(ERRFILE); \
+ HDremove(FILENAME); \
+ HDremove(SIGNAL_TO_SCRIPT); \
+ HDremove(SIGNAL_BETWEEN_PROCESSES_1); \
+ HDremove(SIGNAL_BETWEEN_PROCESSES_2); \
+ HDremove(SIGNAL_FROM_SCRIPT); \
+ } \
+
+/* ===================== */
+/* Function Declarations */
+/* ===================== */
+
+/* Main */
+int main(int argc, const char *argv[]);
+
+/* Flush Test Framework */
+herr_t test_flush(void);
+herr_t flush_verification(const char * obj_pathname, const char * expected);
+herr_t run_flush_verification_process(const char * obj_pathname, const char * expected);
+
+/* Refresh Test Framework */
+herr_t test_refresh(void);
+herr_t refresh_verification(const char * obj_pathname);
+herr_t start_refresh_verification_process(const char * obj_pathname);
+herr_t end_refresh_verification_process(void);
+
+/* Other Helper Functions */
+herr_t check_for_errors(void);
+herr_t end_verification(void);
+
+/* ========= */
+/* Functions */
+/* ========= */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: This function coordinates the test of flush/refresh
+ * functionality verification. It accepts either one, two or
+ * no command line parameters. The main test routine runs
+ * with no command line parameters specified, while verification
+ * routines run with one or two command line parameters.
+ *
+ * Note: This program should not be run manually, as the
+ * test is controlled by the testflushrefresh.sh script. Running
+ * the flushrefresh program manually will result in failure, as
+ * it will time out waiting for a signal from the test script
+ * which will never come.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 1, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+int main(int argc, const char *argv[])
+{
+ /* Variables */
+ const char *envval = NULL;
+
+ /* Initialize library */
+ if(H5open() < 0)
+ TEST_ERROR;
+
+ /* Parse command line options */
+ if(argc == 1) {
+ /* No arguments supplied. Run main test routines if
+ * using sec2 or stdio driver, otherwise don't run
+ * anything. */
+
+ /* Determine driver being used */
+ envval = HDgetenv("HDF5_DRIVER");
+
+ if(envval == NULL || H5FD_supports_swmr_test(envval)) {
+ if(test_flush() != SUCCEED) TEST_ERROR;
+ if(test_refresh() != SUCCEED) TEST_ERROR;
+ } /* end if */
+ else {
+ HDfprintf(stdout, "Skipping all flush/refresh tests (only run with SWMR-enabled file drivers).\n");
+
+ /* Test script is expecting some signals, so send them out to end it. */
+ if(end_verification() < 0) TEST_ERROR;
+ if(end_verification() < 0) TEST_ERROR;
+ } /* end else */
+ } else if(argc == 3) {
+ /* Two arguments supplied. Pass them to flush verification routine. */
+ if(flush_verification(argv[1], argv[2]) != 0) PROCESS_ERROR;
+ } else if(argc == 2) {
+ /* One argument supplied. Pass it to refresh verification routine. */
+ if(refresh_verification(argv[1]) != 0) PROCESS_ERROR;
+ } else {
+ /* Illegal number of arguments supplied. Error. */
+ HDfprintf(stderr, "Error. %d is an Invalid number of arguments to main().\n", argc);
+ PROCESS_ERROR
+ } /* end if */
+
+ return SUCCEED;
+
+error:
+ /* Return */
+ return FAIL;
+} /* main */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_flush
+ *
+ * Purpose: This function tests flushing individual objects' metadata
+ * from the metadata cache.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 1, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t test_flush(void)
+{
+ /**************************************************************************
+ *
+ * Test Description:
+ *
+ * This test will build an HDF5 file with several objects in a varying
+ * hierarchical layout. It will then attempt to flush the objects
+ * in the file one by one, individually, using the four H5*flush
+ * routines (D,G,T, and O). After each call to either create or flush an
+ * object, a series of verifications will occur on each object in the file.
+ *
+ * Each verification consists of spawning off a new process and determining
+ * if the object can be opened and its information retreived in said
+ * alternate process. It reports the results, which are compared to an
+ * expected value (either that the object can be found on disk, or that it
+ * cannot).
+ *
+ * Note that to spawn a verification, this program sends a signal (by creating
+ * a file on disk) to the test script controlling it, indicating how to
+ * run the verification.
+ *
+ * Implementation is funky, but basically, an example:
+ *
+ * Step 1. Dataset is created.
+ * Step 2. Verify that dataset can't be opened by separate process, as
+ * it should not have been flushed to disk yet.
+ * Step 3. Group is created.
+ * Step 4. Verify that group can't be opened by separate process.
+ * Step 5. H5Gflush is called on the group.
+ * Step 6. Verify that group CAN be opened, but dataset still has
+ * yet to hit disk, and CANNOT be opened. Success! Only the group
+ * was flushed.
+ *
+ **************************************************************************/
+
+ /**************************************************************************
+ * Generated Test File will look like this:
+ *
+ * GROUP "/"
+ * DATASET "Dataset1"
+ * GROUP "Group1" {
+ * DATASET "Dataset2"
+ * GROUP "Group2" {
+ * DATATYPE "CommittedDatatype3"
+ * }
+ * }
+ * GROUP "Group3" {
+ * DATASET "Dataset3"
+ * DATATYPE "CommittedDatatype2"
+ * }
+ * DATATYPE "CommittedDatatype1"
+ **************************************************************************/
+
+ /* Variables */
+ hid_t fid,gid,gid2,gid3,sid,tid1,tid2,tid3,did,did2,did3,rid,fapl,status = 0;
+ hsize_t dims[2] = {3,5};
+
+ /* Testing Message */
+ HDfprintf(stdout, "Testing individual object flush behavior:\n");
+
+ /* Cleanup any old error or signal files */
+ CLEANUP_FILES;
+
+ /* ================ */
+ /* CREATE TEST FILE */
+ /* ================ */
+
+ /* Create file, open root group - have to use latest file format for SWMR */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR;
+ if((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) TEST_ERROR;
+ if((rid = H5Gopen2(fid, "/", H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create data space and types */
+ if((sid = H5Screate_simple(2, dims, dims)) < 0) TEST_ERROR;
+ if((tid1 = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR;
+ if((tid2 = H5Tcopy(H5T_NATIVE_CHAR)) < 0) TEST_ERROR;
+ if((tid3 = H5Tcopy(H5T_NATIVE_LONG)) < 0) TEST_ERROR;
+
+ /* Create Group1 */
+ if((gid = H5Gcreate2(fid, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Group2 */
+ if((gid2 = H5Gcreate2(gid, "Group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Group3 */
+ if((gid3 = H5Gcreate2(fid, "Group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset1 */
+ if((did = H5Dcreate2(fid, "Dataset1", tid1, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset2 */
+ if((did2 = H5Dcreate2(gid, "Dataset2", tid3, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset3 */
+ if((did3 = H5Dcreate2(gid3, "Dataset3", tid2, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype1 */
+ if((status = H5Tcommit2(fid, "CommittedDatatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype2 */
+ if((status = H5Tcommit2(gid2, "CommittedDatatype2", tid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype3 */
+ if((status = H5Tcommit2(gid3, "CommittedDatatype3", tid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* ============ */
+ /* FLUSH GROUPS */
+ /* ============ */
+
+ /* Test */
+ TESTING("to ensure H5Gflush correctly flushes single groups");
+
+ /* First, let's verify that nothing is currently flushed. */
+ if(run_flush_verification_process(RG, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Then, flush the root group and verify it's the only thing on disk */
+ if((status = H5Gflush(rid)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Group1 and Verify it is recently flushed, and nothing
+ * else has changed. */
+ if((status = H5Gflush(gid)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Group2 and Verify it is recently flushed, and nothing
+ * else has changed. */
+ if((status = H5Gflush(gid2)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ============== */
+ /* FLUSH DATASETS */
+ /* ============== */
+
+ /* Test */
+ TESTING("to ensure H5Dflush correctly flushes single datasets");
+
+ /* Flush Dataset1 and verify it's the only thing that hits disk. */
+ if((status = H5Dflush(did)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Dataset2 and verify it's the only thing that hits disk. */
+ if((status = H5Dflush(did2)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* =============== */
+ /* FLUSH DATATYPES */
+ /* =============== */
+
+ /* Test */
+ TESTING("to ensure H5Tflush correctly flushes single datatypes");
+
+ /* Flush Datatype 1 and verify it's the only thing that hits disk. */
+ if((status = H5Tflush(tid1)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Datatype 2 and verify it's the only thing that hits disk. */
+ if((status = H5Tflush(tid2)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ============= */
+ /* FLUSH OBJECTS */
+ /* ============= */
+
+ /* Test */
+ TESTING("to ensure H5Oflush correctly flushes single objects");
+
+ /* Flush Group3 and verify it's the only thing that hits disk. */
+ if((status = H5Oflush(gid3)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, NOT_FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush Dataset3 and verify it's the only thing that hits disk. */
+ if((status = H5Oflush(did3)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, NOT_FLUSHED) != 0) TEST_ERROR;
+
+ /* Flush CommittedDatatype3 and verify it's the only thing that hits disk. */
+ if((status = H5Oflush(tid3)) < 0) TEST_ERROR;
+ if(run_flush_verification_process(RG, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(G3, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(D3, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T1, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T2, FLUSHED) != 0) TEST_ERROR;
+ if(run_flush_verification_process(T3, FLUSHED) != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ================== */
+ /* Cleanup and Return */
+ /* ================== */
+ if(H5Pclose(fapl) < 0) TEST_ERROR;
+ if(H5Gclose(gid) < 0) TEST_ERROR;
+ if(H5Gclose(gid2) < 0) TEST_ERROR;
+ if(H5Dclose(did) < 0) TEST_ERROR;
+ if(H5Dclose(did2) < 0) TEST_ERROR;
+ if(H5Gclose(rid) < 0) TEST_ERROR;
+ if(H5Fclose(fid) < 0) TEST_ERROR;
+
+ /* Delete test file */
+ HDremove(FILENAME);
+
+ if(end_verification() < 0) TEST_ERROR;
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+} /* end test_flush */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_refresh
+ *
+ * Purpose: This function tests refresh (evict/reload) of individual
+ * objects' metadata from the metadata cache.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * August 17, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t test_refresh(void)
+{
+ /**************************************************************************
+ *
+ * Test Description:
+ *
+ * This test will build an HDF5 file with several objects in a varying
+ * hierarchical layout. It will then flush the entire file to disk. Then,
+ * an attribute will be added to each object in the file.
+ *
+ * One by one, this process will flush each object to disk, individually.
+ * It will also be coordinating with another process, which will open
+ * the object before it is flushed by this process, and then refresh the
+ * object after it's been flushed, comparing the before and after object
+ * information to ensure that they are as expected. (i.e., most notably,
+ * that an attribute has been added, and is only visible after a
+ * successful call to a H5*refresh function).
+ *
+ * As with the flush case, the implemention is a bit tricky as it's
+ * dealing with signals going back and forth between the two processes
+ * to ensure the timing is correct, but basically, an example:
+ *
+ * Step 1. Dataset is created.
+ * Step 2. Dataset is flushed.
+ * Step 3. Attribute on Dataset is created.
+ * Step 4. Another process opens the dataset and verifies that it does
+ * not see an attribute (as the attribute hasn't been flushed yet).
+ * Step 5. This process flushes the dataset again (with Attribute attached).
+ * Step 6. The other process calls H5Drefresh, which should evict/reload
+ * the object's metadata, and thus pick up the attribute that's
+ * attached to it. Most other before/after object information is
+ * compared for sanity as well.
+ * Step 7. Rinse and Repeat for each object in the file.
+ *
+ **************************************************************************/
+
+ /**************************************************************************
+ * Generated Test File will look like this:
+ *
+ * GROUP "/"
+ * DATASET "Dataset1"
+ * GROUP "Group1" {
+ * DATASET "Dataset2"
+ * GROUP "Group2" {
+ * DATATYPE "CommittedDatatype3"
+ * }
+ * }
+ * GROUP "Group3" {
+ * DATASET "Dataset3"
+ * DATATYPE "CommittedDatatype2"
+ * }
+ * DATATYPE "CommittedDatatype1"
+ **************************************************************************/
+
+ /* Variables */
+ hid_t aid,fid,sid,tid1,did,dcpl,fapl = 0;
+ hid_t gid,gid2,gid3,tid2,tid3,did2,did3;
+ herr_t status = 0;
+ hsize_t dims[2] = {50,50};
+ hsize_t cdims[2] = {1,1};
+ int fillval = 2;
+
+ /* Testing Message */
+ HDfprintf(stdout, "Testing individual object refresh behavior:\n");
+
+ /* Cleanup any old error or signal files */
+ CLEANUP_FILES;
+
+ /* ================ */
+ /* CREATE TEST FILE */
+ /* ================ */
+
+ /* Create File */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) TEST_ERROR;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR;
+ if((fid = H5Fcreate(FILENAME, H5F_ACC_TRUNC|H5F_ACC_SWMR_WRITE, H5P_DEFAULT, fapl)) < 0) TEST_ERROR;
+
+ /* Create data space and types */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR;
+ if(H5Pset_chunk(dcpl, 2, cdims) < 0) TEST_ERROR;
+ if(H5Pset_fill_value(dcpl, H5T_NATIVE_INT, &fillval) < 0) TEST_ERROR;
+ if((sid = H5Screate_simple(2, dims, dims)) < 0) TEST_ERROR;
+ if((tid1 = H5Tcopy(H5T_NATIVE_INT)) < 0) TEST_ERROR;
+ if((tid2 = H5Tcopy(H5T_NATIVE_CHAR)) < 0) TEST_ERROR;
+ if((tid3 = H5Tcopy(H5T_NATIVE_LONG)) < 0) TEST_ERROR;
+
+ /* Create Group1 */
+ if((gid = H5Gcreate2(fid, "Group1", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Group2 */
+ if((gid2 = H5Gcreate2(gid, "Group2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Group3 */
+ if((gid3 = H5Gcreate2(fid, "Group3", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset1 */
+ if((did = H5Dcreate2(fid, "Dataset1", tid1, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset2 */
+ if((did2 = H5Dcreate2(gid, "Dataset2", tid3, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create Dataset3 */
+ if((did3 = H5Dcreate2(gid3, "Dataset3", tid2, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype1 */
+ if((status = H5Tcommit2(fid, "CommittedDatatype1", tid1, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype2 */
+ if((status = H5Tcommit2(gid2, "CommittedDatatype2", tid2, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Create CommittedDatatype3 */
+ if((status = H5Tcommit2(gid3, "CommittedDatatype3", tid3, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+
+ /* Flush File to Disk */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0) TEST_ERROR;
+
+ /* ================ */
+ /* Refresh Datasets */
+ /* ================ */
+
+ TESTING("to ensure that H5Drefresh correctly refreshes single datasets");
+
+ /* Create an attribute on each object before flush. */
+
+ /* Verify First Dataset can be refreshed with H5Drefresh */
+ if(start_refresh_verification_process(D1) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(did, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(did) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Second Dataset can be refreshed with H5Drefresh */
+ if(start_refresh_verification_process(D2) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(did2, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(did2) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ============== */
+ /* Refresh Groups */
+ /* ============== */
+
+ TESTING("to ensure that H5Grefresh correctly refreshes single groups");
+
+ /* Verify First Group can be refreshed with H5Grefresh */
+ if(start_refresh_verification_process(G1) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(gid, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(gid) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Second Group can be refreshed with H5Grefresh */
+ if(start_refresh_verification_process(G2) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(gid2, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(gid2) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ================= */
+ /* Refresh Datatypes */
+ /* ================= */
+
+ TESTING("to ensure that H5Trefresh correctly refreshes single datatypes");
+
+ /* Verify First Committed Datatype can be refreshed with H5Trefresh */
+ if(start_refresh_verification_process(T1) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(tid1, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(tid1) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Second Committed Datatype can be refreshed with H5Trefresh */
+ if(start_refresh_verification_process(T2) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(tid2, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(tid2) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* =============== */
+ /* Refresh Objects */
+ /* =============== */
+
+ TESTING("to ensure that H5Orefresh correctly refreshes single objects");
+
+ /* Verify Third Dataset can be refreshed with H5Orefresh */
+ if(start_refresh_verification_process(D3) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(did3, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(did3) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Third Group can be refreshed with H5Orefresh */
+ if(start_refresh_verification_process(G3) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(gid3, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(gid3) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ /* Verify Third Committed Datatype can be refreshed with H5Orefresh */
+ if(start_refresh_verification_process(T3) != 0) TEST_ERROR;
+
+ if((aid = H5Acreate2(tid3, "Attribute", tid1, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR;
+ if(H5Aclose(aid) < 0) TEST_ERROR;
+ if(H5Oflush(tid3) < 0) TEST_ERROR;
+
+ if(end_refresh_verification_process() != 0) TEST_ERROR;
+
+ PASSED();
+
+ /* ================== */
+ /* Cleanup and Return */
+ /* ================== */
+
+ /* Close Stuff */
+ if(H5Pclose(fapl) < 0) TEST_ERROR;
+ if(H5Pclose(dcpl) < 0) TEST_ERROR;
+ if(H5Tclose(tid1) < 0) TEST_ERROR;
+ if(H5Tclose(tid2) < 0) TEST_ERROR;
+ if(H5Tclose(tid3) < 0) TEST_ERROR;
+ if(H5Dclose(did) < 0) TEST_ERROR;
+ if(H5Dclose(did2) < 0) TEST_ERROR;
+ if(H5Dclose(did3) < 0) TEST_ERROR;
+ if(H5Gclose(gid) < 0) TEST_ERROR;
+ if(H5Gclose(gid2) < 0) TEST_ERROR;
+ if(H5Gclose(gid3) < 0) TEST_ERROR;
+ if(H5Sclose(sid) < 0) TEST_ERROR;
+ if(H5Fclose(fid) < 0) TEST_ERROR;
+
+ /* Delete Test File */
+ HDremove(FILENAME);
+
+ if(end_verification() < 0) TEST_ERROR;
+
+ return SUCCEED;
+
+error:
+ /* Return */
+ return FAIL;
+} /* test_refresh() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: run_flush_verification_process
+ *
+ * Purpose: This function is used to communicate with the test script
+ * in order to spawn off a process to verify that a flush
+ * of an individual object was successful.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t run_flush_verification_process(const char * obj_pathname, const char * expected)
+{
+ HDremove(SIGNAL_FROM_SCRIPT);
+
+ /* Send Signal to SCRIPT indicating that it should kick off a verification process. */
+ h5_send_message(SIGNAL_TO_SCRIPT, obj_pathname, expected);
+
+ /* Wait for Signal from SCRIPT indicating that verification process has completed. */
+ if(h5_wait_message(SIGNAL_FROM_SCRIPT) < 0) TEST_ERROR;
+
+ /* Check to see if any errors occurred */
+ if(check_for_errors() < 0) TEST_ERROR;
+
+ /* Return */
+ return SUCCEED;
+
+error:
+ return FAIL;
+} /* run_flush_verification_process */
+
+
+/*-------------------------------------------------------------------------
+ * Function: flush_verification
+ *
+ * Purpose: This function tries to open target object in the test file.
+ * It compares the success of the open function to the expected
+ * value, and succeeds if they are equal and fails if they differ.
+ *
+ * Note that full path to the object must be provided as the
+ * obj_pathname argument.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t flush_verification(const char * obj_pathname, const char * expected)
+{
+ /* Variables */
+ hid_t oid = -1, fid = -1;
+ herr_t status = 0;
+ H5O_info_t oinfo;
+
+ /* Try to open the testfile and then obj_pathname within the file */
+ H5E_BEGIN_TRY {
+ fid = H5Fopen(FILENAME, H5F_ACC_SWMR_READ, H5P_DEFAULT);
+ oid = H5Oopen(fid, obj_pathname, H5P_DEFAULT);
+ status = H5Oget_info(oid, &oinfo);
+ } H5E_END_TRY;
+
+ /* Compare to expected result */
+ if(HDstrcmp(expected, FLUSHED) == 0) {
+ if((oid < 0) || (status < 0)) {
+ HDfprintf(stderr, "Error! %s should be on disk, but was NOT!\n", obj_pathname);
+ PROCESS_ERROR;
+ } /* end if */
+ } else if(HDstrcmp(expected, NOT_FLUSHED) == 0) {
+ if((oid > 0) || (status > 0)) {
+ HDfprintf(stderr, "Error! %s not expected to be flushed, but it was found on disk!\n", obj_pathname);
+ PROCESS_ERROR;
+ } /* end if */
+ } else {
+ HDfprintf(stderr, "Error! Bad verification parameters. %s is an invalid expected outcome.\n", expected);
+ PROCESS_ERROR;
+ } /* end if */
+
+ /* Cleanup */
+ H5E_BEGIN_TRY {
+ H5Oclose(oid);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+} /* flush_verification */
+
+
+/*-------------------------------------------------------------------------
+ * Function: start_refresh_verification_process
+ *
+ * Purpose: This function is used to communicate with the test script
+ * in order to spawn off a process which will test the
+ * H5*refresh routine.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t start_refresh_verification_process(const char * obj_pathname)
+{
+ HDremove(SIGNAL_BETWEEN_PROCESSES_1);
+
+ /* Send Signal to SCRIPT indicating that it should kick off a refresh
+ verification process */
+ h5_send_message(SIGNAL_TO_SCRIPT, obj_pathname, NULL);
+
+ /* Wait for Signal from VERIFICATION PROCESS indicating that it's opened the
+ target object and ready for MAIN PROCESS to modify it */
+ if(h5_wait_message(SIGNAL_BETWEEN_PROCESSES_1) < 0) TEST_ERROR;
+
+ /* Check to see if any errors occurred */
+ if(check_for_errors() < 0) TEST_ERROR;
+
+ /* Return */
+ return SUCCEED;
+
+error:
+ return FAIL;
+} /* start_refresh_verification_process */
+
+
+/*-------------------------------------------------------------------------
+ * Function: end_refresh_verification_process
+ *
+ * Purpose: This function is used to communicate with the verification
+ * process spawned by the start_refresh_verification_process
+ * function. It gives it the go-ahead to call H5*refresh
+ * on an object and conlcude the refresh verification.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t end_refresh_verification_process(void)
+{
+ HDremove(SIGNAL_FROM_SCRIPT);
+
+ /* Send Signal to REFRESH VERIFICATION PROCESS indicating that the object
+ has been modified and it should now attempt to refresh its metadata,
+ and verify the results. */
+ h5_send_message(SIGNAL_BETWEEN_PROCESSES_2, NULL, NULL);
+
+ /* Wait for Signal from SCRIPT indicating that the refresh verification
+ process has completed. */
+ if(h5_wait_message(SIGNAL_FROM_SCRIPT) < 0) TEST_ERROR;
+
+ /* Check to see if any errors occurred */
+ if(check_for_errors() < 0) TEST_ERROR;
+
+ /* Return */
+ return SUCCEED;
+
+error:
+ return FAIL;
+} /* end_refresh_verification_process */
+
+
+/*-------------------------------------------------------------------------
+ * Function: refresh_verification
+ *
+ * Purpose: This function opens the specified object, and checks to see
+ * that is does not have any attributes on it. It then sends
+ * a signal to the main process, which will flush the object
+ * (putting an attribute on the object on disk). This function
+ * will then refresh the object, and verify that it has picked
+ * up the new metadata reflective of the added attribute.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t refresh_verification(const char * obj_pathname)
+{
+ /* Variables */
+ hid_t oid,fid,status = 0;
+ H5O_info_t flushed_oinfo;
+ H5O_info_t refreshed_oinfo;
+ int tries = 800, sleep_tries = 400;
+ hbool_t ok = FALSE;
+
+ HDremove(SIGNAL_BETWEEN_PROCESSES_2);
+
+ /* Open Object */
+ if((fid = H5Fopen(FILENAME, H5F_ACC_SWMR_READ, H5P_DEFAULT)) < 0) PROCESS_ERROR;
+ if((oid = H5Oopen(fid, obj_pathname, H5P_DEFAULT)) < 0) PROCESS_ERROR;
+
+ /* Get Object info */
+ if((status = H5Oget_info(oid, &flushed_oinfo)) < 0) PROCESS_ERROR;
+
+ /* Make sure there are no attributes on the object. This is just a sanity
+ check to ensure we didn't erroneously flush the attribute before
+ starting the verification. */
+ if(flushed_oinfo.num_attrs != 0)
+ PROCESS_ERROR;
+
+ /* Send Signal to MAIN PROCESS indicating that it can go ahead and modify the
+ object. */
+ h5_send_message(SIGNAL_BETWEEN_PROCESSES_1, obj_pathname, NULL);
+
+ /* Wait for Signal from MAIN PROCESS indicating that it's modified the
+ object and we can run verification now. */
+ if(h5_wait_message(SIGNAL_BETWEEN_PROCESSES_2) < 0) PROCESS_ERROR;
+
+ /* Get object info again. This will NOT reflect what's on disk, only what's
+ in the cache. Thus, all values will be unchanged from above, despite
+ newer information being on disk. */
+ if((status = H5Oget_info(oid, &refreshed_oinfo)) < 0) PROCESS_ERROR;
+
+ /* Verify that before doing a refresh, getting the object info returns stale
+ information. (i.e., unchanged from above, despite new info on disk). */
+ if(flushed_oinfo.addr != refreshed_oinfo.addr) PROCESS_ERROR;
+ if(flushed_oinfo.type != refreshed_oinfo.type) PROCESS_ERROR;
+ if(flushed_oinfo.hdr.version != refreshed_oinfo.hdr.version) PROCESS_ERROR;
+ if(flushed_oinfo.hdr.flags != refreshed_oinfo.hdr.flags) PROCESS_ERROR;
+ if(flushed_oinfo.num_attrs != refreshed_oinfo.num_attrs) PROCESS_ERROR;
+ if(flushed_oinfo.hdr.nmesgs != refreshed_oinfo.hdr.nmesgs) PROCESS_ERROR;
+ if(flushed_oinfo.hdr.nchunks != refreshed_oinfo.hdr.nchunks) PROCESS_ERROR;
+ if(flushed_oinfo.hdr.space.total != refreshed_oinfo.hdr.space.total) PROCESS_ERROR;
+
+ /* Refresh object */
+ /* The H5*refresh function called depends on which object we are trying
+ * to refresh. (MIKE: add desired refresh call as parameter so adding new
+ * test cases is easy). */
+ do {
+
+ if((HDstrcmp(obj_pathname, D1) == 0) || (HDstrcmp(obj_pathname, D2) == 0)) {
+ if(H5Drefresh(oid) < 0) PROCESS_ERROR;
+ } /* end if */
+ else if((HDstrcmp(obj_pathname, G1) == 0) || (HDstrcmp(obj_pathname, G2) == 0)) {
+ if(H5Grefresh(oid) < 0) PROCESS_ERROR;
+ } /* end if */
+ else if((HDstrcmp(obj_pathname, T1) == 0) || (HDstrcmp(obj_pathname, T2) == 0)) {
+ if(H5Trefresh(oid) < 0) PROCESS_ERROR;
+ } /* end if */
+ else if((HDstrcmp(obj_pathname, D3) == 0) || (HDstrcmp(obj_pathname, G3) == 0) ||
+ (HDstrcmp(obj_pathname, T3) == 0)) {
+ if(H5Orefresh(oid) < 0) PROCESS_ERROR;
+ } /* end if */
+ else {
+ HDfprintf(stdout, "Error. %s is an unrecognized object.\n", obj_pathname);
+ PROCESS_ERROR;
+ } /* end else */
+
+ /* Get object info. This should now accurately reflect the refreshed object on disk. */
+ if((status = H5Oget_info(oid, &refreshed_oinfo)) < 0) PROCESS_ERROR;
+
+ /* Confirm following (first 4) attributes are the same: */
+ /* Confirm following (last 4) attributes are different */
+ if( (flushed_oinfo.addr == refreshed_oinfo.addr) &&
+ (flushed_oinfo.type == refreshed_oinfo.type) &&
+ (flushed_oinfo.hdr.version == refreshed_oinfo.hdr.version) &&
+ (flushed_oinfo.hdr.flags == refreshed_oinfo.hdr.flags) &&
+ (flushed_oinfo.num_attrs != refreshed_oinfo.num_attrs) &&
+ (flushed_oinfo.hdr.nmesgs != refreshed_oinfo.hdr.nmesgs) &&
+ (flushed_oinfo.hdr.nchunks != refreshed_oinfo.hdr.nchunks) &&
+ (flushed_oinfo.hdr.space.total != refreshed_oinfo.hdr.space.total) ) {
+ ok = TRUE;
+ break;
+ }
+ if(tries == sleep_tries)
+ HDsleep(1);
+
+ } while(--tries);
+
+ if(!ok) {
+ printf("FLUSHED: num_attrs=%d, nmesgs=%d, nchunks=%d, total=%d\n",
+ (int)flushed_oinfo.num_attrs, (int)flushed_oinfo.hdr.nmesgs,
+ (int)flushed_oinfo.hdr.nchunks, (int)flushed_oinfo.hdr.space.total);
+ printf("REFRESHED: num_attrs=%d, nmesgs=%d, nchunks=%d, total=%d\n",
+ (int)refreshed_oinfo.num_attrs, (int)refreshed_oinfo.hdr.nmesgs,
+ (int)refreshed_oinfo.hdr.nchunks, (int)refreshed_oinfo.hdr.space.total);
+ PROCESS_ERROR;
+ }
+
+ /* Close objects */
+ if(H5Oclose(oid) < 0) PROCESS_ERROR;
+ if(H5Fclose(fid) < 0) PROCESS_ERROR;
+
+ /* Return */
+ return SUCCEED;
+
+error:
+ return FAIL;
+} /* refresh_verification */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_for_errors()
+ *
+ * Purpose: This function checks the status of external verification
+ * processes to see if they've succeeded. It checks for the
+ * existance of flushrefresh_ERROR file. If present, that indicates
+ * an external verification process has failed, and this function
+ * thus fails as well. If not present, then nothing else has
+ * failed, and this function succeeds.
+ *
+ * Return: 0 on Success, 1 on Failure
+ *
+ * Programmer: Mike McGreevy
+ * July 1, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t check_for_errors(void)
+{
+ FILE * file;
+
+ if((file = HDfopen(ERRFILE, "r"))) {
+ HDfclose(file);
+ HDremove(ERRFILE);
+ return FAIL;
+ } /* end if */
+
+ return SUCCEED;
+} /* check_for_errors */
+
+
+/*-------------------------------------------------------------------------
+ * Function: end_verification
+ *
+ * Purpose: Tells test script that verification routines are completed and
+ * that the test can wrap up.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * July 16, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t end_verification(void)
+{
+ HDremove(SIGNAL_FROM_SCRIPT);
+
+ /* Send Signal to SCRIPT to indicate that we're done with verification. */
+ h5_send_message(SIGNAL_TO_SCRIPT, "VERIFICATION_DONE", "VERIFICATION_DONE");
+
+ /* Wait for Signal from SCRIPT indicating that we can continue. */
+ if(h5_wait_message(SIGNAL_FROM_SCRIPT) < 0) TEST_ERROR;
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+} /* end_verification */
+
diff --git a/test/freespace.c b/test/freespace.c
index af53eba..39c5688 100644
--- a/test/freespace.c
+++ b/test/freespace.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Tests for free-space manager
@@ -86,7 +84,7 @@ typedef struct TEST_free_section_t {
static herr_t TEST_sect_init_cls(H5FS_section_class_t *, void *);
static herr_t TEST_sect_free(H5FS_section_info_t *_sect);
static herr_t TEST_sect_can_merge(const H5FS_section_info_t *, const H5FS_section_info_t *, void H5_ATTR_UNUSED *);
-static herr_t TEST_sect_merging(H5FS_section_info_t *, H5FS_section_info_t *, void H5_ATTR_UNUSED *);
+static herr_t TEST_sect_merging(H5FS_section_info_t **, H5FS_section_info_t *, void H5_ATTR_UNUSED *);
static herr_t TEST_sect_can_shrink(const H5FS_section_info_t *, void *);
static herr_t TEST_sect_shrinking(H5FS_section_info_t **, void *);
@@ -245,26 +243,26 @@ TEST_sect_can_merge(const H5FS_section_info_t *_sect1,
* Merge the two sections (second section is merged into the first section)
*/
static herr_t
-TEST_sect_merging(H5FS_section_info_t *_sect1, H5FS_section_info_t *_sect2,
+TEST_sect_merging(H5FS_section_info_t **_sect1, H5FS_section_info_t *_sect2,
void H5_ATTR_UNUSED *_udata)
{
- TEST_free_section_t *sect1 = (TEST_free_section_t *)_sect1;
+ TEST_free_section_t **sect1 = (TEST_free_section_t **)_sect1;
TEST_free_section_t *sect2 = (TEST_free_section_t *)_sect2;
herr_t ret_value = SUCCEED; /* Return value */
/* Check arguments. */
HDassert(sect1);
- HDassert((sect1->sect_info.type == TEST_FSPACE_SECT_TYPE) ||
- (sect1->sect_info.type == TEST_FSPACE_SECT_TYPE_NEW) ||
- (sect1->sect_info.type == TEST_FSPACE_SECT_TYPE_NONE));
+ HDassert(((*sect1)->sect_info.type == TEST_FSPACE_SECT_TYPE) ||
+ ((*sect1)->sect_info.type == TEST_FSPACE_SECT_TYPE_NEW) ||
+ ((*sect1)->sect_info.type == TEST_FSPACE_SECT_TYPE_NONE));
HDassert(sect2);
HDassert((sect2->sect_info.type == TEST_FSPACE_SECT_TYPE) ||
(sect2->sect_info.type == TEST_FSPACE_SECT_TYPE_NEW) ||
(sect2->sect_info.type == TEST_FSPACE_SECT_TYPE_NONE));
- HDassert(H5F_addr_eq(sect1->sect_info.addr + sect1->sect_info.size, sect2->sect_info.addr));
+ HDassert(H5F_addr_eq((*sect1)->sect_info.addr + (*sect1)->sect_info.size, sect2->sect_info.addr));
/* Add second section's size to first section */
- sect1->sect_info.size += sect2->sect_info.size;
+ (*sect1)->sect_info.size += sect2->sect_info.size;
/* Get rid of second section */
if(TEST_sect_free((H5FS_section_info_t *)sect2) < 0)
@@ -1995,7 +1993,8 @@ test_fs_sect_shrink(hid_t fapl)
(hsize_t)(TEST_SECT_SIZE50), (H5FS_section_info_t **)&node)) < 0)
FAIL_STACK_ERROR
- if (node_found) TEST_ERROR
+ if (node_found)
+ TEST_ERROR
if(check_stats(f, frsp, &state))
TEST_ERROR
@@ -2095,7 +2094,8 @@ test_fs_sect_shrink(hid_t fapl)
(hsize_t)(TEST_SECT_SIZE50), (H5FS_section_info_t **)&node)) < 0)
FAIL_STACK_ERROR
- if (node_found) TEST_ERROR
+ if (node_found)
+ TEST_ERROR
/* section A should not be there in free-space */
if((node_found = H5FS_sect_find(f, dxpl_id, frsp,
@@ -2238,8 +2238,8 @@ test_fs_sect_change_class(hid_t fapl)
TEST_ERROR
if (H5FS_sect_change_class(f, dxpl_id, frsp, (H5FS_section_info_t *)sect_node1,
- TEST_FSPACE_SECT_TYPE_NONE) < 0)
- TEST_ERROR
+ TEST_FSPACE_SECT_TYPE_NONE) < 0)
+ TEST_ERROR
state.serial_sect_count += 1;
state.ghost_sect_count -=1;
@@ -2538,7 +2538,7 @@ test_fs_sect_extend(hid_t fapl)
TEST_ERROR
/* Extend a block by requested-size */
- if((status = H5FS_sect_try_extend(f, dxpl_id, frsp, (haddr_t)TEST_SECT_SIZE80, (hsize_t)TEST_SECT_SIZE20, (hsize_t)TEST_SECT_SIZE40)) < 0)
+ if((status = H5FS_sect_try_extend(f, dxpl_id, frsp, (haddr_t)TEST_SECT_SIZE80, (hsize_t)TEST_SECT_SIZE20, (hsize_t)TEST_SECT_SIZE40, 0, NULL)) < 0)
FAIL_STACK_ERROR
if(FALSE == status)
TEST_ERROR
@@ -2614,7 +2614,7 @@ test_fs_sect_extend(hid_t fapl)
TEST_ERROR
/* Extend the block by requested-size */
- if((status = H5FS_sect_try_extend(f, dxpl_id, frsp, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE20, (hsize_t)TEST_SECT_SIZE50)) < 0)
+ if((status = H5FS_sect_try_extend(f, dxpl_id, frsp, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE20, (hsize_t)TEST_SECT_SIZE50, 0, NULL)) < 0)
FAIL_STACK_ERROR
if(TRUE == status)
TEST_ERROR
@@ -2687,7 +2687,7 @@ test_fs_sect_extend(hid_t fapl)
TEST_ERROR
/* Extend the block by requested-size */
- if((status = H5FS_sect_try_extend(f, dxpl_id, frsp, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE20, (hsize_t)TEST_SECT_SIZE30)) < 0)
+ if((status = H5FS_sect_try_extend(f, dxpl_id, frsp, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE20, (hsize_t)TEST_SECT_SIZE30, 0, NULL)) < 0)
TEST_ERROR
if(FALSE == status)
TEST_ERROR
@@ -2761,7 +2761,7 @@ test_fs_sect_extend(hid_t fapl)
TEST_ERROR
/* Extend the block by requested-size */
- if((status = H5FS_sect_try_extend(f, dxpl_id, frsp, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE15, (hsize_t)TEST_SECT_SIZE40)) < 0)
+ if((status = H5FS_sect_try_extend(f, dxpl_id, frsp, (haddr_t)TEST_SECT_ADDR80, (hsize_t)TEST_SECT_SIZE15, (hsize_t)TEST_SECT_SIZE40, 0, NULL)) < 0)
TEST_ERROR
if(TRUE == status)
TEST_ERROR
@@ -2869,7 +2869,7 @@ test_fs_sect_iterate(hid_t fapl)
if(H5FS_sect_add(f, dxpl_id, frsp, (H5FS_section_info_t *)sect_node,
H5FS_ADD_RETURNED_SPACE, NULL) < 0)
FAIL_STACK_ERROR
- }
+ } /* end for */
if(H5FS_sect_iterate(f, dxpl_id, frsp, TEST_sects_cb, &udata) < 0)
TEST_ERROR
@@ -2905,7 +2905,7 @@ error:
H5E_BEGIN_TRY {
if(frsp)
H5FS_close(f, dxpl_id, frsp);
- H5Fclose(file);
+ H5Fclose(file);
H5Pclose(dxpl_id);
} H5E_END_TRY;
return 1;
@@ -2915,9 +2915,9 @@ error:
int
main(void)
{
- hid_t fapl = -1; /* File access property list for data files */
- unsigned nerrors = 0; /* Cumulative error count */
- const char *env_h5_drvr = NULL; /* File Driver value from environment */
+ hid_t fapl = -1; /* File access property list for data files */
+ unsigned nerrors = 0; /* Cumulative error count */
+ const char *env_h5_drvr = NULL; /* File Driver value from environment */
/* Get the VFD to use */
env_h5_drvr = HDgetenv("HDF5_DRIVER");
@@ -2926,11 +2926,16 @@ main(void)
h5_reset();
- fapl = h5_fileaccess();
+ if((fapl = h5_fileaccess()) < 0) {
+ nerrors++;
+ PUTS_ERROR("Can't get VFD-dependent fapl")
+ } /* end if */
/* make sure alignment is not set for tests to succeed */
- if(H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1) < 0)
- TEST_ERROR
+ if(H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1) < 0) {
+ nerrors++;
+ PUTS_ERROR("Can't set alignment")
+ } /* end if */
nerrors += test_fs_create(fapl);
nerrors += test_fs_sect_add(fapl);
@@ -2946,16 +2951,16 @@ main(void)
if(nerrors)
goto error;
- puts("All free-space tests passed.");
+ HDputs("All free-space tests passed.");
h5_cleanup(FILENAME, fapl);
- return 0;
+ HDexit(EXIT_SUCCESS);
error:
- puts("*** TESTS FAILED ***");
+ HDputs("*** TESTS FAILED ***");
H5E_BEGIN_TRY {
H5Pclose(fapl);
} H5E_END_TRY;
- return 1;
+ HDexit(EXIT_FAILURE);
} /* main() */
diff --git a/test/fsm_aggr_nopersist.h5 b/test/fsm_aggr_nopersist.h5
new file mode 100644
index 0000000..159e7f7
--- /dev/null
+++ b/test/fsm_aggr_nopersist.h5
Binary files differ
diff --git a/test/fsm_aggr_persist.h5 b/test/fsm_aggr_persist.h5
new file mode 100644
index 0000000..1a837dd
--- /dev/null
+++ b/test/fsm_aggr_persist.h5
Binary files differ
diff --git a/test/gen_bad_compound.c b/test/gen_bad_compound.c
index b864195..c52eb95 100644
--- a/test/gen_bad_compound.c
+++ b/test/gen_bad_compound.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_bad_ohdr.c b/test/gen_bad_ohdr.c
index 6d50230..36ba64a 100644
--- a/test/gen_bad_ohdr.c
+++ b/test/gen_bad_ohdr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_bogus.c b/test/gen_bogus.c
index fdd8744..ec29eb9 100644
--- a/test/gen_bogus.c
+++ b/test/gen_bogus.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_cross.c b/test/gen_cross.c
index af39f69..cf3fc35 100644
--- a/test/gen_cross.c
+++ b/test/gen_cross.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_deflate.c b/test/gen_deflate.c
index 3a9370d..3409a81 100644
--- a/test/gen_deflate.c
+++ b/test/gen_deflate.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_file_image.c b/test/gen_file_image.c
index 0637473..9607cc2 100644
--- a/test/gen_file_image.c
+++ b/test/gen_file_image.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_filespace.c b/test/gen_filespace.c
index e0c42e8..4397836 100644
--- a/test/gen_filespace.c
+++ b/test/gen_filespace.c
@@ -5,78 +5,112 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
+#include <assert.h>
#define NELMTS(X) (sizeof(X)/sizeof(X[0])) /* # of elements */
-#define TEST_THRESHOLD2 2 /* Free space section threshold */
const char *FILENAMES[] = {
- "filespace_persist.h5", /* H5F_FILE_SPACE_ALL_PERSIST */
- "filespace_default.h5", /* H5F_FILE_SPACE_ALL */
- "filespace_aggr_vfd.h5", /* H5F_FILE_SPACE_AGGR_VFD */
- "filespace_vfd.h5", /* H5F_FILE_SPACE_VFD */
- "filespace_threshold.h5" /* H5F_FILE_SPACE_ALL, non-default threshold */
+ "fsm_aggr_nopersist.h5", /* H5F_FSPACE_STRATEGY_FSM_AGGR + not persisting free-space */
+ "fsm_aggr_persist.h5", /* H5F_FSPACE_STRATEGY_FSM_AGGR + persisting free-space */
+ "paged_nopersist.h5", /* H5F_FSPACE_STRATEGY_PAGE + not persisting free-space */
+ "paged_persist.h5", /* H5F_FSPACE_STRATEGY_PAGE + persisting free-space */
+ "aggr.h5", /* H5F_FSPACE_STRATEGY_AGGR */
+ "none.h5" /* H5F_FSPACE_STRATEGY_NONE */
};
#define DATASET "dset"
#define NUM_ELMTS 100
+#define FALSE 0
+#define TRUE 1
+#define INC_ENUM(TYPE,VAR) (VAR)=((TYPE)((VAR)+1))
/*
- * Compile and run this program in file-space branch to generate
- * HDF5 files with different kinds of file space strategies
- * Move the HDF5 files to the 1.6 and 1.8 branch for compatibility
- * testing:test_filespace_compatible() will use the files
+ * Compile and run this program in the trunk to generate
+ * HDF5 files with combinations of 4 file space strategies
+ * and persist/not persist free-space.
+ * The library creates the file space info message with "mark if unknown"
+ * in these files.
+ *
+ * Move these files to 1.8 branch for compatibility testing:
+ * test_filespace_compatible() in test/tfile.c will use these files.
+ *
+ * Copy these files from the 1.8 branch back to the trunk for
+ * compatibility testing via test_filespace_round_compatible() in test/tfile.c.
+ *
*/
-static void gen_file(void)
-{
- hid_t fid;
- hid_t fcpl;
- hid_t dataset, space;
- hsize_t dim[1];
- int data[NUM_ELMTS];
- size_t j; /* Local index variable */
- int i; /* Local index variable */
- H5F_file_space_type_t fs_type; /* File space handling strategy */
-
- for(j = 0, fs_type = H5F_FILE_SPACE_ALL_PERSIST; j < NELMTS(FILENAMES); j++, fs_type = (H5F_file_space_type_t)(fs_type + 1)) {
- /* Get a copy of the default file creation property */
- fcpl = H5Pcreate(H5P_FILE_CREATE);
-
- if(fs_type == H5F_FILE_SPACE_NTYPES) /* last file */
- /* Set default strategy but non-default threshold */
- H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL, (hsize_t)TEST_THRESHOLD2);
- else
- /* Set specified file space strategy and free space section threshold */
- H5Pset_file_space(fcpl, fs_type, (hsize_t)0);
-
- /* Create the file with the file space info */
- fid = H5Fcreate(FILENAMES[j], H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
-
- dim[0] = NUM_ELMTS;
- space = H5Screate_simple(1, dim, NULL);
- dataset = H5Dcreate2(fid, DATASET, H5T_NATIVE_INT, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
-
- for(i = 0; i < NUM_ELMTS; i++)
- data[i] = i;
-
- H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data);
- H5Dclose(dataset);
- H5Sclose(space);
- H5Fclose(fid);
- }
-}
-
int main(void)
{
- gen_file();
+ hid_t fid = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list */
+ hid_t did = -1; /* Dataset ID */
+ hid_t sid = -1; /* Dataspace ID */
+ hsize_t dim[1]; /* Dimension sizes */
+ int data[NUM_ELMTS]; /* Buffer for data */
+ int i, j; /* Local index variables */
+ H5F_fspace_strategy_t fs_strategy; /* File space handling strategy */
+ unsigned fs_persist; /* Persisting free-space or not */
+
+ j = 0;
+ for(fs_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR; fs_strategy < H5F_FSPACE_STRATEGY_NTYPES; INC_ENUM(H5F_fspace_strategy_t, fs_strategy)) {
+ for(fs_persist = FALSE; fs_persist <= TRUE; fs_persist++) {
+
+ if(fs_persist && fs_strategy >= H5F_FSPACE_STRATEGY_AGGR)
+ continue;
+
+ /* Get a copy of the default file creation property */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ goto error;
+
+ if(H5Pset_file_space_strategy(fcpl, fs_strategy, fs_persist, (hsize_t)1) < 0)
+ goto error;
+
+ /* Create the file with the file space info */
+ if((fid = H5Fcreate(FILENAMES[j], H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Create the dataset */
+ dim[0] = NUM_ELMTS;
+ if((sid = H5Screate_simple(1, dim, NULL)) < 0)
+ goto error;
+ if((did = H5Dcreate2(fid, DATASET, H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ goto error;
+
+ for(i = 0; i < NUM_ELMTS; i++)
+ data[i] = i;
+
+ /* Write the dataset */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+ if(H5Pclose(fcpl) < 0)
+ goto error;
+ ++j;
+ }
+ }
+ assert(j == NELMTS(FILENAMES));
return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Sclose(sid);
+ H5Sclose(did);
+ H5Pclose(fcpl);
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return -1;
}
diff --git a/test/gen_filters.c b/test/gen_filters.c
index f506602..fa66078 100644
--- a/test/gen_filters.c
+++ b/test/gen_filters.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5test.h"
diff --git a/test/gen_mergemsg.c b/test/gen_mergemsg.c
index 5df5674..f158d57 100644
--- a/test/gen_mergemsg.c
+++ b/test/gen_mergemsg.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_new_array.c b/test/gen_new_array.c
index c78644d..27f162c 100644
--- a/test/gen_new_array.c
+++ b/test/gen_new_array.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_new_fill.c b/test/gen_new_fill.c
index 4ba85ad..5bdbf73 100644
--- a/test/gen_new_fill.c
+++ b/test/gen_new_fill.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_new_group.c b/test/gen_new_group.c
index b998913..6924291 100644
--- a/test/gen_new_group.c
+++ b/test/gen_new_group.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_new_mtime.c b/test/gen_new_mtime.c
index d7a0f32..b44d567 100644
--- a/test/gen_new_mtime.c
+++ b/test/gen_new_mtime.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_new_super.c b/test/gen_new_super.c
index f6d15f4..f6ce589 100644
--- a/test/gen_new_super.c
+++ b/test/gen_new_super.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_noencoder.c b/test/gen_noencoder.c
index 46a2036..e6ac9cf 100644
--- a/test/gen_noencoder.c
+++ b/test/gen_noencoder.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/test/gen_nullspace.c b/test/gen_nullspace.c
index 8349a5c..9d76deb 100644
--- a/test/gen_nullspace.c
+++ b/test/gen_nullspace.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_old_array.c b/test/gen_old_array.c
index a4162f2..3fab657 100644
--- a/test/gen_old_array.c
+++ b/test/gen_old_array.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_old_group.c b/test/gen_old_group.c
index 1b33cea..d109329 100644
--- a/test/gen_old_group.c
+++ b/test/gen_old_group.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_old_layout.c b/test/gen_old_layout.c
index 312ee58..56c3e4e 100644
--- a/test/gen_old_layout.c
+++ b/test/gen_old_layout.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_old_mtime.c b/test/gen_old_mtime.c
index ca6012a..cbe3bdc 100644
--- a/test/gen_old_mtime.c
+++ b/test/gen_old_mtime.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_plist.c b/test/gen_plist.c
index c617ad0..41f232d 100644
--- a/test/gen_plist.c
+++ b/test/gen_plist.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -81,6 +79,12 @@ main(void)
0.2f,
(256 * 2048),
H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY};
+ H5AC_cache_image_config_t my_cache_image_config = {
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ -1};
+
/* check endianess */
{
@@ -356,6 +360,9 @@ main(void)
assert(ret > 0);
if((ret = H5Pset_mdc_config(fapl1, &my_cache_config)) < 0)
assert(ret > 0);
+ if((ret = H5Pset_mdc_image_config(fapl1, &my_cache_image_config)) < 0)
+ assert(ret > 0);
+
if((ret = H5Pset_core_write_tracking(fapl1, TRUE, (size_t)(1024 * 1024))) < 0)
assert(ret > 0);
@@ -394,6 +401,12 @@ main(void)
if((ret = H5Pset_sizes(fcpl1, 8, 4) < 0))
assert(ret > 0);
+ if((ret = H5Pset_file_space_strategy(fcpl1, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1)) < 0)
+ assert(ret > 0);
+
+ if((ret = H5Pset_file_space_page_size(fcpl1, (hsize_t)4096)) < 0)
+ assert(ret > 0);
+
if((ret = encode_plist(fcpl1, little_endian, word_length, "testfiles/plist_files/fcpl_")) < 0)
assert(ret > 0);
diff --git a/test/gen_sizes_lheap.c b/test/gen_sizes_lheap.c
index de7e261..81742df 100644
--- a/test/gen_sizes_lheap.c
+++ b/test/gen_sizes_lheap.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_specmetaread.c b/test/gen_specmetaread.c
index f489119..d06bd59 100644
--- a/test/gen_specmetaread.c
+++ b/test/gen_specmetaread.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gen_udlinks.c b/test/gen_udlinks.c
index 55abab0..e48d0e8 100644
--- a/test/gen_udlinks.c
+++ b/test/gen_udlinks.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/genall5.c b/test/genall5.c
new file mode 100644
index 0000000..a48f14b
--- /dev/null
+++ b/test/genall5.c
@@ -0,0 +1,3886 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 9/23/15
+ *
+ * This file contains a heavily edited and functionaly reduce
+ * version of the test code first written by Quincey in a file
+ * of the same name.
+ */
+
+#include "cache_common.h"
+#include "genall5.h"
+
+#define DSET_DIMS (1024 * 1024)
+#define DSET_SMALL_DIMS (64 * 1024)
+#define DSET_CHUNK_DIMS 1024
+#define DSET_COMPACT_DIMS 4096
+
+
+/*-------------------------------------------------------------------------
+ * Function: ns_grp_0
+ *
+ * Purpose: Create an empty "new style" group at the specified location
+ * in the specified file.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+ns_grp_0(hid_t fid, const char *group_name)
+{
+ hid_t gid = -1;
+ hid_t gcpl = -1;
+ herr_t ret;
+
+ if ( pass ) {
+
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_0: H5Pcreate() failed";
+ }
+ assert(gcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pset_link_creation_order(gcpl, H5P_CRT_ORDER_TRACKED);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_0: H5Pset_link_creation_order() failed";
+ }
+ assert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ gid = H5Gcreate2(fid, group_name, H5P_DEFAULT, gcpl, H5P_DEFAULT);
+
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_0: H5Gcreate2() failed";
+ }
+ assert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(gcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_0: H5Pclose(gcpl) failed";
+ }
+ assert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_0: H5Gclose(gid) failed";
+ }
+ assert(ret >= 0);
+ }
+
+ return;
+
+} /* ns_grp_0 */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_ns_grp_0
+ *
+ * Purpose: verify an empty "new style" group at the specified location
+ * in the specified file.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void vrfy_ns_grp_0(hid_t fid, const char *group_name)
+{
+ hid_t gid = -1;
+ hid_t gcpl = -1;
+ H5G_info_t grp_info;
+ unsigned crt_order_flags = 0;
+ herr_t ret;
+
+ if ( pass ) {
+
+ gid = H5Gopen2(fid, group_name, H5P_DEFAULT);
+
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: H5Gopen2() failed";
+ }
+ HDassert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ gcpl = H5Gget_create_plist(gid);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: H5Gget_create_plist() failed";
+ }
+ HDassert(gcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pget_link_creation_order(gcpl, &crt_order_flags);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: H5Pget_link_creation_order() failed";
+ }
+ else if ( H5P_CRT_ORDER_TRACKED != crt_order_flags ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_0: H5P_CRT_ORDER_TRACKED != crt_order_flags";
+ }
+ HDassert(ret >= 0);
+ HDassert(H5P_CRT_ORDER_TRACKED == crt_order_flags);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(gcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: H5Pclose() failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ memset(&grp_info, 0, sizeof(grp_info));
+ ret = H5Gget_info(gid, &grp_info);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: H5Gget_info() failed";
+ }
+ else if ( H5G_STORAGE_TYPE_COMPACT != grp_info.storage_type ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_0: H5G_STORAGE_TYPE_COMPACT != grp_info.storage_type";
+ }
+ else if ( 0 != grp_info.nlinks ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: 0 != grp_info.nlinks";
+ }
+ else if ( 0 != grp_info.max_corder ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: 0 != grp_info.max_corder";
+ }
+ else if ( FALSE != grp_info.mounted ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: FALSE != grp_info.mounted";
+ }
+
+ HDassert(ret >= 0);
+ HDassert(H5G_STORAGE_TYPE_COMPACT == grp_info.storage_type);
+ HDassert(0 == grp_info.nlinks);
+ HDassert(0 == grp_info.max_corder);
+ HDassert(false == grp_info.mounted);
+ }
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_0: H5Gclose() failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_ns_grp_0() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: ns_grp_c
+ *
+ * Purpose: Create a compact "new style" group, with 'nlinks'
+ * soft/hard/external links in it in the specified file.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+ns_grp_c(hid_t fid, const char *group_name, unsigned nlinks)
+{
+ hid_t gid = -1;
+ hid_t gcpl = -1;
+ unsigned max_compact;
+ unsigned u;
+ herr_t ret;
+
+ if ( pass ) {
+
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Pcreate(H5P_GROUP_CREATE) failed";
+ }
+ HDassert(gcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pset_link_creation_order(gcpl, H5P_CRT_ORDER_TRACKED);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Pset_link_creation_order() failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ gid = H5Gcreate2(fid, group_name, H5P_DEFAULT, gcpl, H5P_DEFAULT);
+
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Gcreate2() failed";
+ }
+ HDassert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ max_compact = 0;
+ ret = H5Pget_link_phase_change(gcpl, &max_compact, NULL);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Pget_link_phase_change() failed";
+ }
+ else if ( nlinks <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: nlinks <= 0";
+ }
+ else if ( nlinks >= max_compact ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: nlinks >= max_compact";
+ }
+
+ HDassert(ret >= 0);
+ HDassert(nlinks > 0);
+ HDassert(nlinks < max_compact);
+ }
+
+ u = 0;
+ while ( ( pass ) && ( u < nlinks ) ) {
+
+ char linkname[16];
+
+ sprintf(linkname, "%u", u);
+
+ if(0 == (u % 3)) {
+
+ ret = H5Lcreate_soft(group_name, gid, linkname, H5P_DEFAULT,
+ H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Lcreate_soft() failed";
+ }
+ HDassert(ret >= 0);
+ } /* end if */
+ else if(1 == (u % 3)) {
+
+ ret = H5Lcreate_hard(fid, "/", gid, linkname, H5P_DEFAULT,
+ H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Lcreate_hard() failed";
+ }
+ HDassert(ret >= 0);
+ } /* end else-if */
+ else {
+
+ HDassert(2 == (u % 3));
+ ret = H5Lcreate_external("external.h5", "/ext", gid, linkname,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Lcreate_external() failed";
+ }
+ HDassert(ret >= 0);
+ } /* end else */
+
+ u++;
+
+ } /* end while() */
+
+ if ( pass ) {
+
+ ret = H5Pclose(gcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Pclose(gcpl) failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_c: H5Gclose(gid) failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* ns_grp_c() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_ns_grp_c
+ *
+ * Purpose: Verify a compact "new style" group, with 'nlinks'
+ * soft/hard/external links in it in the specified file.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+vrfy_ns_grp_c(hid_t fid, const char *group_name, unsigned nlinks)
+{
+ hid_t gid = -1;
+ hid_t gcpl = -1;
+ H5G_info_t grp_info;
+ unsigned crt_order_flags = 0;
+ unsigned u;
+ herr_t ret;
+
+ if ( pass ) {
+
+ gid = H5Gopen2(fid, group_name, H5P_DEFAULT);
+
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Gopen2() failed";
+ }
+ HDassert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ gcpl = H5Gget_create_plist(gid);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Gget_create_plist(gid) failed";
+ }
+ assert(gcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pget_link_creation_order(gcpl, &crt_order_flags);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Pget_link_creation_order() failed";
+ }
+ else if ( H5P_CRT_ORDER_TRACKED != crt_order_flags ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_c: H5P_CRT_ORDER_TRACKED != crt_order_flags";
+ }
+ HDassert(ret >= 0);
+ HDassert(H5P_CRT_ORDER_TRACKED == crt_order_flags);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(gcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Pclose() failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ memset(&grp_info, 0, sizeof(grp_info));
+ ret = H5Gget_info(gid, &grp_info);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Gget_info() failed";
+ }
+ else if ( H5G_STORAGE_TYPE_COMPACT != grp_info.storage_type ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_c: H5G_STORAGE_TYPE_COMPACT != grp_info.storage_type";
+ }
+ else if ( nlinks != grp_info.nlinks ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: nlinks != grp_info.nlinks";
+ }
+ else if ( nlinks != grp_info.max_corder ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: nlinks != grp_info.max_corder";
+ }
+ else if ( FALSE != grp_info.mounted ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: FALSE != grp_info.mounted";
+ }
+
+ HDassert(ret >= 0);
+ HDassert(H5G_STORAGE_TYPE_COMPACT == grp_info.storage_type);
+ HDassert(nlinks == grp_info.nlinks);
+ HDassert(nlinks == grp_info.max_corder);
+ HDassert(false == grp_info.mounted);
+ }
+
+ u = 0;
+ while ( ( pass ) && ( u < nlinks ) ) {
+
+ H5L_info_t lnk_info;
+ char linkname[16];
+ htri_t link_exists;
+
+ sprintf(linkname, "%u", u);
+ link_exists = H5Lexists(gid, linkname, H5P_DEFAULT);
+
+ if ( link_exists < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Lexists() failed";
+ }
+ HDassert(link_exists >= 0);
+
+ memset(&lnk_info, 0, sizeof(grp_info));
+ ret = H5Lget_info(gid, linkname, &lnk_info, H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Lget_info() failed";
+ }
+ else if ( TRUE != lnk_info.corder_valid ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: TRUE != lnk_info.corder_valid";
+ }
+ else if ( u != lnk_info.corder ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: u != lnk_info.corder";
+ }
+ else if ( H5T_CSET_ASCII != lnk_info.cset ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5T_CSET_ASCII != lnk_info.cset";
+ }
+ HDassert(ret >= 0);
+ HDassert(true == lnk_info.corder_valid);
+ HDassert(u == lnk_info.corder);
+ HDassert(H5T_CSET_ASCII == lnk_info.cset);
+
+ if ( 0 == (u % 3) ) {
+
+ char *slinkval;
+
+ if ( H5L_TYPE_SOFT != lnk_info.type ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5L_TYPE_SOFT != lnk_info.type";
+ }
+ else if ( (strlen(group_name) + 1) != lnk_info.u.val_size ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_c: (strlen(group_name) + 1) != lnk_info.u.val_size";
+ }
+ HDassert(H5L_TYPE_SOFT == lnk_info.type);
+ HDassert((strlen(group_name) + 1) == lnk_info.u.val_size);
+
+ slinkval = (char *)malloc(lnk_info.u.val_size);
+
+ if ( ! slinkval ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: malloc of slinkval failed";
+ }
+ HDassert(slinkval);
+
+ ret = H5Lget_val(gid, linkname, slinkval, lnk_info.u.val_size,
+ H5P_DEFAULT);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Lget_val() failed";
+ }
+ else if ( 0 != strcmp(slinkval, group_name) ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_c: 0 != strcmp(slinkval, group_name)";
+ }
+ HDassert(ret >= 0);
+ HDassert(0 == strcmp(slinkval, group_name));
+
+ free(slinkval);
+ } /* end if */
+ else if(1 == (u % 3)) {
+
+ H5O_info_t root_oinfo;
+
+ if ( H5L_TYPE_HARD != lnk_info.type ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5L_TYPE_HARD != lnk_info.type";
+ }
+ HDassert(H5L_TYPE_HARD == lnk_info.type);
+
+ memset(&root_oinfo, 0, sizeof(root_oinfo));
+ ret = H5Oget_info(fid, &root_oinfo);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Oget_info() failed.";
+ }
+ else if ( root_oinfo.addr != lnk_info.u.address ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_c: root_oinfo.addr != lnk_info.u.address";
+ }
+ HDassert(ret >= 0);
+ HDassert(root_oinfo.addr == lnk_info.u.address);
+ } /* end else-if */
+ else {
+ void *elinkval;
+ const char *file = NULL;
+ const char *path = NULL;
+
+ HDassert(2 == (u % 3));
+
+ if ( H5L_TYPE_EXTERNAL != lnk_info.type ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_c: H5L_TYPE_EXTERNAL != lnk_info.type";
+ }
+ HDassert(H5L_TYPE_EXTERNAL == lnk_info.type);
+
+ elinkval = malloc(lnk_info.u.val_size);
+
+ if ( ! elinkval ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: malloc of elinkval failed.";
+ }
+ HDassert(elinkval);
+
+ ret = H5Lget_val(gid, linkname, elinkval, lnk_info.u.val_size,
+ H5P_DEFAULT);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Lget_val() failed.";
+ }
+ HDassert(ret >= 0);
+
+ ret = H5Lunpack_elink_val(elinkval, lnk_info.u.val_size,
+ NULL, &file, &path);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Lunpack_elink_val() failed.";
+ }
+ else if ( 0 != strcmp(file, "external.h5") ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_c: 0 != strcmp(file, \"external.h5\")";
+ }
+ else if ( 0 != strcmp(path, "/ext") ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: 0 != strcmp(path, \"/ext\")";
+ }
+ HDassert(ret >= 0);
+ HDassert(0 == strcmp(file, "external.h5"));
+ HDassert(0 == strcmp(path, "/ext"));
+
+ free(elinkval);
+ } /* end else */
+
+ u++;
+
+ } /* end while */
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_c: H5Gclose() failed.";
+ }
+ assert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_ns_grp_c() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: ns_grp_d
+ *
+ * Purpose: Create a dense "new style" group, with 'nlinks'
+ * (soft/hard/external) links in it in the specified file.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+ns_grp_d(hid_t fid, const char *group_name, unsigned nlinks)
+{
+ hid_t gid = -1;
+ hid_t gcpl = -1;
+ unsigned max_compact;
+ unsigned u;
+ herr_t ret;
+
+ if ( pass ) {
+
+ gcpl = H5Pcreate(H5P_GROUP_CREATE);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Pcreate() failed.";
+ }
+ HDassert(gcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pset_link_creation_order(gcpl, H5P_CRT_ORDER_TRACKED);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Pset_link_creation_order() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ gid = H5Gcreate2(fid, group_name, H5P_DEFAULT, gcpl, H5P_DEFAULT);
+
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Gcreate2() failed.";
+ }
+ HDassert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ max_compact = 0;
+ ret = H5Pget_link_phase_change(gcpl, &max_compact, NULL);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Pget_link_phase_change() failed.";
+ }
+ else if ( nlinks <= max_compact ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: nlinks <= max_compact";
+ }
+ HDassert(ret >= 0);
+ HDassert(nlinks > max_compact);
+ }
+
+ u = 0;
+ while ( ( pass ) && ( u < nlinks ) ) {
+
+ char linkname[16];
+
+ sprintf(linkname, "%u", u);
+
+ if(0 == (u % 3)) {
+
+ ret = H5Lcreate_soft(group_name, gid, linkname,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Lcreate_soft() failed.";
+ }
+ HDassert(ret >= 0);
+ } /* end if */
+ else if(1 == (u % 3)) {
+
+ ret = H5Lcreate_hard(fid, "/", gid, linkname,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Lcreate_hard() failed.";
+ }
+ HDassert(ret >= 0);
+ } /* end else-if */
+ else {
+
+ HDassert(2 == (u % 3));
+
+ ret = H5Lcreate_external("external.h5", "/ext", gid, linkname,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Lcreate_external() failed.";
+ }
+ HDassert(ret >= 0);
+ } /* end else */
+
+ u++;
+
+ } /* end while */
+
+ if ( pass ) {
+
+ ret = H5Pclose(gcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ns_grp_d: H5Gclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* ns_grp_d() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_ns_grp_d
+ *
+ * Purpose: Verify a dense "new style" group, with 'nlinks'
+ * soft/hard/external links in it in the specified file.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+
+void
+vrfy_ns_grp_d(hid_t fid, const char *group_name, unsigned nlinks)
+{
+ hid_t gid = -1;
+ hid_t gcpl = -1;
+ H5G_info_t grp_info;
+ unsigned crt_order_flags = 0;
+ unsigned u;
+ herr_t ret;
+
+ if ( pass ) {
+
+ gid = H5Gopen2(fid, group_name, H5P_DEFAULT);
+
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Gopen2() failed.";
+ }
+ HDassert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ gcpl = H5Gget_create_plist(gid);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Gget_create_plist() failed.";
+ }
+ assert(gcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pget_link_creation_order(gcpl, &crt_order_flags);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_d: H5Pget_link_creation_order() failed.";
+ }
+ else if ( H5P_CRT_ORDER_TRACKED != crt_order_flags ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_d: H5P_CRT_ORDER_TRACKED != crt_order_flags";
+ }
+ HDassert(ret >= 0);
+ HDassert(H5P_CRT_ORDER_TRACKED == crt_order_flags);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(gcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ memset(&grp_info, 0, sizeof(grp_info));
+ ret = H5Gget_info(gid, &grp_info);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Gget_info() failed.";
+ }
+ else if ( H5G_STORAGE_TYPE_DENSE != grp_info.storage_type ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_d: H5G_STORAGE_TYPE_DENSE != grp_info.storage_type";
+ }
+ else if ( nlinks != grp_info.nlinks ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: nlinks != grp_info.nlinks";
+ }
+ else if ( nlinks != grp_info.max_corder ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: nlinks != grp_info.max_corder";
+ }
+ else if ( FALSE != grp_info.mounted ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: FALSE != grp_info.mounted";
+ }
+ HDassert(ret >= 0);
+ HDassert(H5G_STORAGE_TYPE_DENSE == grp_info.storage_type);
+ HDassert(nlinks == grp_info.nlinks);
+ HDassert(nlinks == grp_info.max_corder);
+ HDassert(false == grp_info.mounted);
+ }
+
+ u = 0;
+ while ( ( pass ) && ( u < nlinks ) ) {
+
+ H5L_info_t lnk_info;
+ char linkname[16];
+ htri_t link_exists;
+
+ sprintf(linkname, "%u", u);
+ link_exists = H5Lexists(gid, linkname, H5P_DEFAULT);
+
+ if ( link_exists < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Lexists() failed.";
+ }
+ HDassert(link_exists >= 0);
+
+ memset(&lnk_info, 0, sizeof(grp_info));
+ ret = H5Lget_info(gid, linkname, &lnk_info, H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Lget_info() failed.";
+ }
+ else if ( TRUE != lnk_info.corder_valid ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: TRUE != lnk_info.corder_valid";
+ }
+ else if ( u != lnk_info.corder ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: u != lnk_info.corder";
+ }
+ else if ( H5T_CSET_ASCII != lnk_info.cset ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5T_CSET_ASCII != lnk_info.cset";
+ }
+ HDassert(ret >= 0);
+ HDassert(true == lnk_info.corder_valid);
+ HDassert(u == lnk_info.corder);
+ HDassert(H5T_CSET_ASCII == lnk_info.cset);
+
+ if(0 == (u % 3)) {
+ char *slinkval;
+
+ if ( H5L_TYPE_SOFT != lnk_info.type ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5L_TYPE_SOFT != lnk_info.type";
+ }
+ else if ( (strlen(group_name) + 1) != lnk_info.u.val_size ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5L_TYPE_SOFT != lnk_info.type";
+ }
+ HDassert(H5L_TYPE_SOFT == lnk_info.type);
+ HDassert((strlen(group_name) + 1) == lnk_info.u.val_size);
+
+ slinkval = (char *)malloc(lnk_info.u.val_size);
+
+ if ( ! slinkval ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: malloc of slinkval failed";
+ }
+ HDassert(slinkval);
+
+ ret = H5Lget_val(gid, linkname, slinkval, lnk_info.u.val_size,
+ H5P_DEFAULT);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Lget_val() failed";
+ }
+ else if ( 0 != strcmp(slinkval, group_name) ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_d: 0 != strcmp(slinkval, group_name)";
+ }
+ HDassert(ret >= 0);
+ HDassert(0 == strcmp(slinkval, group_name));
+
+ free(slinkval);
+ } /* end if */
+ else if(1 == (u % 3)) {
+ H5O_info_t root_oinfo;
+
+ if ( H5L_TYPE_HARD != lnk_info.type ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5L_TYPE_HARD != lnk_info.type";
+ }
+ HDassert(H5L_TYPE_HARD == lnk_info.type);
+
+ memset(&root_oinfo, 0, sizeof(root_oinfo));
+ ret = H5Oget_info(fid, &root_oinfo);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Oget_info() failed.";
+ }
+ else if ( root_oinfo.addr != lnk_info.u.address ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_d: root_oinfo.addr != lnk_info.u.address";
+ }
+ HDassert(ret >= 0);
+ HDassert(root_oinfo.addr == lnk_info.u.address);
+ } /* end else-if */
+ else {
+ void *elinkval;
+ const char *file = NULL;
+ const char *path = NULL;
+
+ HDassert(2 == (u % 3));
+
+ if ( H5L_TYPE_EXTERNAL != lnk_info.type ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_d: H5L_TYPE_EXTERNAL != lnk_info.type";
+ }
+ HDassert(H5L_TYPE_EXTERNAL == lnk_info.type);
+
+ elinkval = malloc(lnk_info.u.val_size);
+
+ if ( ! elinkval ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: malloc of elinkval failed.";
+ }
+ HDassert(elinkval);
+
+ ret = H5Lget_val(gid, linkname, elinkval, lnk_info.u.val_size,
+ H5P_DEFAULT);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Lget_val failed.";
+ }
+ HDassert(ret >= 0);
+
+ ret = H5Lunpack_elink_val(elinkval, lnk_info.u.val_size, NULL,
+ &file, &path);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Lunpack_elink_val failed.";
+ }
+ else if ( 0 != strcmp(file, "external.h5") ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_d: 0 != strcmp(file, \"external.h5\").";
+ }
+ else if ( 0 != strcmp(path, "/ext") ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ns_grp_d: 0 != strcmp(path, \"/ext\")";
+ }
+ HDassert(ret >= 0);
+ HDassert(0 == strcmp(file, "external.h5"));
+ HDassert(0 == strcmp(path, "/ext"));
+
+ free(elinkval);
+
+ } /* end else */
+
+ u++;
+
+ } /* end while() */
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ns_grp_d: H5Gclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_ns_grp_d() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: os_grp_0
+ *
+ * Purpose: Create an empty "old style" group.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+os_grp_0(hid_t fid, const char *group_name)
+{
+ hid_t gid = -1;
+ herr_t ret;
+
+ if ( pass ) { /* turn file format latest off */
+
+ ret = H5Fset_latest_format(fid, FALSE);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_0: H5Fset_latest_format() failed(1).";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ gid = H5Gcreate2(fid, group_name, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_0: H5Gcreate2() failed.";
+ }
+ HDassert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_0: H5Gclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) { /* turn file format latest on */
+
+ ret = H5Fset_latest_format(fid, TRUE);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_0: H5Fset_latest_format() failed(2).";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* os_grp_0() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_os_grp_0
+ *
+ * Purpose: Validate an empty "old style" group.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+vrfy_os_grp_0(hid_t fid, const char *group_name)
+{
+ hid_t gid = -1;
+ hid_t gcpl = -1;
+ H5G_info_t grp_info;
+ unsigned crt_order_flags = 0;
+ herr_t ret;
+
+ if ( pass ) {
+
+ gid = H5Gopen2(fid, group_name, H5P_DEFAULT);
+
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: H5Gopen2() failed.";
+ }
+ HDassert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ gcpl = H5Gget_create_plist(gid);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: H5Gget_create_plist() failed.";
+ }
+ HDassert(gcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pget_link_creation_order(gcpl, &crt_order_flags);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: H5Pget_link_creation_order() failed";
+ }
+ else if ( 0 != crt_order_flags ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: 0 != crt_order_flags";
+ }
+ HDassert(ret >= 0);
+ HDassert(0 == crt_order_flags);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(gcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ memset(&grp_info, 0, sizeof(grp_info));
+ ret = H5Gget_info(gid, &grp_info);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: H5Gget_info() failed.";
+ }
+ else if ( H5G_STORAGE_TYPE_SYMBOL_TABLE != grp_info.storage_type ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: H5G_STORAGE_TYPE_SYMBOL_TABLE != grp_info.storage_type";
+ }
+ else if ( 0 != grp_info.nlinks ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: 0 != grp_info.nlinks";
+ }
+ else if ( 0 != grp_info.max_corder ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: 0 != grp_info.max_corder";
+ }
+ else if ( FALSE != grp_info.mounted ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: FALSE != grp_info.mounted";
+ }
+ HDassert(ret >= 0);
+ HDassert(H5G_STORAGE_TYPE_SYMBOL_TABLE == grp_info.storage_type);
+ HDassert(0 == grp_info.nlinks);
+ HDassert(0 == grp_info.max_corder);
+ HDassert(false == grp_info.mounted);
+ }
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_0: H5Gclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_os_grp_0() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: os_grp_n
+ *
+ * Purpose: Create an "old style" group, with 'nlinks' soft/hard
+ * links in it.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+os_grp_n(hid_t fid, const char *group_name, int proc_num, unsigned nlinks)
+{
+ hid_t gid = -1;
+ unsigned u;
+ herr_t ret;
+
+ if ( pass ) { /* turn file format latest off */
+
+ ret = H5Fset_latest_format(fid, FALSE);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_n: H5Fset_latest_format() failed(1).";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ gid = H5Gcreate2(fid, group_name, H5P_DEFAULT, H5P_DEFAULT,
+ H5P_DEFAULT);
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_n: H5Gcreate2() failed.";
+ }
+ HDassert(gid > 0);
+ }
+
+ HDassert(nlinks > 0);
+
+ u = 0;
+ while ( ( pass ) && ( u < nlinks ) ) {
+
+ char linkname[32];
+
+ sprintf(linkname, "ln%d_%u", proc_num, u);
+
+ if(0 == (u % 2)) {
+
+ ret = H5Lcreate_soft(group_name, gid, linkname, H5P_DEFAULT,
+ H5P_DEFAULT);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_n: H5Lcreate_soft() failed.";
+ }
+ HDassert(ret >= 0);
+ } /* end if */
+ else {
+
+ HDassert(1 == (u % 2));
+
+ ret = H5Lcreate_hard(fid, "/", gid, linkname, H5P_DEFAULT,
+ H5P_DEFAULT);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_n: H5Lcreate_hard() failed.";
+ }
+ HDassert(ret >= 0);
+ } /* end else */
+
+ u++;
+
+ } /* end while */
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_n: H5Gclose() failed.";
+ }
+ assert(ret >= 0);
+ }
+
+ if ( pass ) { /* turn file format latest on */
+
+ ret = H5Fset_latest_format(fid, TRUE);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "os_grp_n: H5Fset_latest_format() failed(2).";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+} /* os_grp_n() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_os_grp_n
+ *
+ * Purpose: Validate an "old style" group with 'nlinks' soft/hard
+ * links in it.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+vrfy_os_grp_n(hid_t fid, const char *group_name, int proc_num, unsigned nlinks)
+{
+ hid_t gid = -1;
+ hid_t gcpl = -1;
+ H5G_info_t grp_info;
+ unsigned crt_order_flags = 0;
+ unsigned u;
+ herr_t ret;
+
+ if ( pass ) {
+
+ gid = H5Gopen2(fid, group_name, H5P_DEFAULT);
+
+ if ( gid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Gopen2() failed";
+ }
+ HDassert(gid > 0);
+ }
+
+ if ( pass ) {
+
+ gcpl = H5Gget_create_plist(gid);
+
+ if ( gcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Gget_create_plist() failed";
+ }
+ HDassert(gcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pget_link_creation_order(gcpl, &crt_order_flags);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Pget_link_creation_order";
+ }
+ else if ( 0 != crt_order_flags ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: 0 != crt_order_flags";
+ }
+ HDassert(ret >= 0);
+ HDassert(0 == crt_order_flags);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(gcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Pclose() failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ memset(&grp_info, 0, sizeof(grp_info));
+
+ ret = H5Gget_info(gid, &grp_info);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Gget_info() failed";
+ }
+ else if ( H5G_STORAGE_TYPE_SYMBOL_TABLE != grp_info.storage_type ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_os_grp_n: H5G_STORAGE_TYPE_SYMBOL_TABLE != grp_info.storage_type";
+ }
+ else if ( nlinks != grp_info.nlinks ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: nlinks != grp_info.nlinks";
+ }
+ else if ( 0 != grp_info.max_corder ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: 0 != grp_info.max_corder";
+ }
+ else if ( FALSE != grp_info.mounted ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: FALSE != grp_info.mounted";
+ }
+ HDassert(ret >= 0);
+ HDassert(H5G_STORAGE_TYPE_SYMBOL_TABLE == grp_info.storage_type);
+ HDassert(nlinks == grp_info.nlinks);
+ HDassert(0 == grp_info.max_corder);
+ HDassert(false == grp_info.mounted);
+ }
+
+ u = 0;
+ while ( ( pass ) && ( u < nlinks ) ) {
+
+ H5L_info_t lnk_info;
+ char linkname[32];
+ htri_t link_exists;
+
+ sprintf(linkname, "ln%d_%u", proc_num, u);
+ link_exists = H5Lexists(gid, linkname, H5P_DEFAULT);
+
+ if ( link_exists < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Lexists() failed";
+ }
+ HDassert(link_exists >= 0);
+
+ memset(&lnk_info, 0, sizeof(grp_info));
+ ret = H5Lget_info(gid, linkname, &lnk_info, H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Lget_info() failed";
+ }
+ else if ( FALSE != lnk_info.corder_valid ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: FALSE != lnk_info.corder_valid";
+ }
+ else if ( H5T_CSET_ASCII != lnk_info.cset ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5T_CSET_ASCII != lnk_info.cset";
+ }
+ HDassert(ret >= 0);
+ HDassert(false == lnk_info.corder_valid);
+ HDassert(H5T_CSET_ASCII == lnk_info.cset);
+
+ if(0 == (u % 2)) {
+ char *slinkval;
+
+ if ( H5L_TYPE_SOFT != lnk_info.type ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5L_TYPE_SOFT != lnk_info.type";
+ }
+ else if ( (strlen(group_name) + 1) != lnk_info.u.val_size ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_os_grp_n: (strlen(group_name) + 1) != lnk_info.u.val_size";
+ }
+ HDassert(H5L_TYPE_SOFT == lnk_info.type);
+ HDassert((strlen(group_name) + 1) == lnk_info.u.val_size);
+
+ slinkval = (char *)malloc(lnk_info.u.val_size);
+
+ if ( ! slinkval ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: malloc of slinkval failed";
+ }
+ HDassert(slinkval);
+
+ ret = H5Lget_val(gid, linkname, slinkval, lnk_info.u.val_size,
+ H5P_DEFAULT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Lget_val() failed";
+ }
+ else if ( 0 != strcmp(slinkval, group_name) ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_os_grp_n: 0 != strcmp(slinkval, group_name)";
+ }
+ HDassert(ret >= 0);
+ HDassert(0 == strcmp(slinkval, group_name));
+
+ free(slinkval);
+ } /* end if */
+ else {
+ H5O_info_t root_oinfo;
+
+ HDassert(1 == (u % 2));
+
+ if ( H5L_TYPE_HARD != lnk_info.type ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5L_TYPE_HARD != lnk_info.type";
+ }
+ HDassert(H5L_TYPE_HARD == lnk_info.type);
+
+ memset(&root_oinfo, 0, sizeof(root_oinfo));
+ ret = H5Oget_info(fid, &root_oinfo);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Oget_info() failed.";
+ }
+ else if ( root_oinfo.addr != lnk_info.u.address ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_os_grp_n: root_oinfo.addr != lnk_info.u.address";
+ }
+ HDassert(ret >= 0);
+ HDassert(root_oinfo.addr == lnk_info.u.address);
+ } /* end else */
+
+ u++;
+
+ } /* end while */
+
+ if ( pass ) {
+
+ ret = H5Gclose(gid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_os_grp_n: H5Gclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_os_grp_n() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: ds_ctg_i
+ *
+ * Purpose: Create a contiguous dataset w/int datatype. Write data
+ * to the data set or not as indicated by the write_data
+ * parameter.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+ds_ctg_i(hid_t fid, const char *dset_name, hbool_t write_data)
+{
+ int *wdata = NULL;
+ unsigned u;
+ hid_t dsid = -1;
+ hid_t sid = -1;
+ hsize_t dims[1] = {DSET_DIMS};
+ herr_t ret;
+
+ if ( pass ) {
+
+ sid = H5Screate_simple(1, dims, NULL);
+
+ if ( sid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_i: H5Screate_simple() failed";
+ }
+ HDassert(sid > 0);
+ }
+
+ if ( pass ) {
+
+ dsid = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( dsid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_i: H5Dcreate2() failed";
+ }
+ HDassert(dsid > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sclose(sid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_i: H5Sclose() failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ wdata = (int *)malloc(sizeof(int) * DSET_DIMS);
+
+ if ( ! wdata ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_i: malloc of wdata failed.";
+ }
+ HDassert(wdata);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ for(u = 0; u < DSET_DIMS; u++)
+
+ wdata[u] = (int)u;
+
+ ret = H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, wdata);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_i: H5Dwrite() failed.";
+ }
+ HDassert(ret >= 0);
+
+ free(wdata);
+ }
+
+ if ( pass ) {
+
+ ret = H5Dclose(dsid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_i: H5Dclose() failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* ds_ctg_i */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_ds_ctg_i
+ *
+ * Purpose: Validate a contiguous datasets w/int datatypes. Validate
+ * data if indicated via the write_data parameter.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+vrfy_ds_ctg_i(hid_t fid, const char *dset_name, hbool_t write_data)
+{
+ int *rdata = NULL;
+ unsigned u;
+ hid_t dsid = -1;
+ hid_t sid = -1;
+ hid_t tid = -1;
+ hid_t dcpl = -1;
+ H5D_space_status_t allocation;
+ H5D_layout_t layout;
+ int ndims;
+ hsize_t dims[1], max_dims[1];
+ htri_t type_equal;
+ herr_t ret;
+
+ if ( pass ) {
+
+ dsid = H5Dopen2(fid, dset_name, H5P_DEFAULT);
+
+ if ( dsid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Dopen2() failed.";
+ }
+ HDassert(dsid > 0);
+ }
+
+ if ( pass ) {
+
+ sid = H5Dget_space(dsid);
+
+ if ( sid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Dget_space() failed.";
+ }
+ HDassert(sid > 0);
+ }
+
+ if ( pass ) {
+
+ ndims = H5Sget_simple_extent_ndims(sid);
+
+ if ( 1 != ndims ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: 1 != ndims";
+ }
+ HDassert(1 == ndims);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sget_simple_extent_dims(sid, dims, max_dims);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Sget_simple_extent_dims() failed";
+ }
+ else if ( DSET_DIMS != dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: DSET_DIMS != dims[0]";
+ }
+ else if ( DSET_DIMS != max_dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: DSET_DIMS != max_dims[0]";
+ }
+ HDassert(ret >= 0);
+ HDassert(DSET_DIMS == dims[0]);
+ HDassert(DSET_DIMS == max_dims[0]);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sclose(sid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Sclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ tid = H5Dget_type(dsid);
+
+ if ( tid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Dget_type() failed.";
+ }
+ HDassert(tid > 0);
+ }
+
+ if ( pass ) {
+
+ type_equal = H5Tequal(tid, H5T_NATIVE_INT);
+
+ if ( 1 != type_equal ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: type not H5T_NATIVE_INT";
+ }
+ HDassert(1 == type_equal);
+ }
+
+ if ( pass ) {
+
+ ret = H5Tclose(tid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Tclose() failed.";
+ }
+ assert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Dget_space_status(dsid, &allocation);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Dget_space_status() failed.";
+ }
+ else if ( write_data && ( allocation != H5D_SPACE_STATUS_ALLOCATED ) ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ds_ctg_i: write_data && allocation != H5D_SPACE_STATUS_ALLOCATED";
+ }
+ else if ( !write_data &&
+ ( allocation != H5D_SPACE_STATUS_NOT_ALLOCATED ) ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: !write_data && allocation != H5D_SPACE_STATUS_NOT_ALLOCATED";
+ }
+ HDassert(ret >= 0);
+ HDassert((write_data && allocation == H5D_SPACE_STATUS_ALLOCATED) ||
+ (!write_data && allocation == H5D_SPACE_STATUS_NOT_ALLOCATED));
+ }
+
+ if ( pass ) {
+
+ dcpl = H5Dget_create_plist(dsid);
+
+ if ( dcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Dget_create_plist() failed.";
+ }
+ HDassert(dcpl > 0);
+ }
+
+ if ( pass ) {
+
+ layout = H5Pget_layout(dcpl);
+
+ if ( H5D_CONTIGUOUS != layout ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5D_CONTIGUOUS != layout";
+ }
+ HDassert(H5D_CONTIGUOUS == layout);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(dcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ rdata = (int *)malloc(sizeof(int) * DSET_DIMS);
+
+ if ( ! rdata ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: malloc of rdata failed.";
+ }
+ HDassert(rdata);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ ret = H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, rdata);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Dread() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ for(u = 0; u < DSET_DIMS; u++) {
+
+ if ( (int)u != rdata[u] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: u != rdata[u].";
+ break;
+ }
+ HDassert((int)u == rdata[u]);
+ }
+
+ free(rdata);
+ } /* end if */
+
+ if ( pass ) {
+
+ ret = H5Dclose(dsid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_i: H5Dclose() failed";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_ds_ctg_i() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: ds_chk_i
+ *
+ * Purpose: Create a chunked dataset w/int datatype. Write data
+ * to the data set or not as indicated by the write_data
+ * parameter.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+ds_chk_i(hid_t fid, const char *dset_name, hbool_t write_data)
+{
+ int *wdata = NULL;
+ unsigned u;
+ hid_t dsid = -1;
+ hid_t dcpl = -1;
+ hid_t sid = -1;
+ hsize_t dims[1] = {DSET_DIMS};
+ hsize_t chunk_dims[1] = {DSET_CHUNK_DIMS};
+ herr_t ret;
+
+ if ( pass ) {
+
+ sid = H5Screate_simple(1, dims, NULL);
+
+ if ( sid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: H5Screate_simple() failed.";
+ }
+ HDassert(sid > 0);
+ }
+
+ if ( pass ) {
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+
+ if ( dcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: H5Pcreate() failed.";
+ }
+ HDassert(dcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pset_chunk(dcpl, 1, chunk_dims);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: H5Pset_chunk() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ dsid = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT);
+
+ if ( dsid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: H5Dcreate2() failed";
+ }
+ HDassert(dsid > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(dcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sclose(sid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: H5Sclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ wdata = (int *)malloc(sizeof(int) * DSET_DIMS);
+
+ if ( ! wdata ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: malloc of wdata failed.";
+ }
+ HDassert(wdata);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ for(u = 0; u < DSET_DIMS; u++)
+ wdata[u] = (int)u;
+
+ ret = H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, wdata);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: H5Dwrite() failed.";
+ }
+ HDassert(ret >= 0);
+
+ free(wdata);
+ } /* end if */
+
+ if ( pass ) {
+
+ ret = H5Dclose(dsid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_chk_i: H5Dclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* ds_chk_i */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_ds_chk_i
+ *
+ * Purpose: Validate a chunked datasets w/int datatypes. Validate
+ * data if indicated via the write_data parameter.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+vrfy_ds_chk_i(hid_t fid, const char *dset_name, hbool_t write_data)
+{
+ int *rdata = NULL;
+ unsigned u;
+ hid_t dsid = -1;
+ hid_t sid = -1;
+ hid_t tid = -1;
+ hid_t dcpl = -1;
+ H5D_space_status_t allocation;
+ H5D_layout_t layout;
+ int ndims;
+ hsize_t dims[1], max_dims[1], chunk_dims[1];
+ htri_t type_equal;
+ herr_t ret;
+
+ if ( pass ) {
+
+ dsid = H5Dopen2(fid, dset_name, H5P_DEFAULT);
+
+ if ( dsid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Dopen2() failed.";
+ }
+ HDassert(dsid > 0);
+ }
+
+ if ( pass ) {
+
+ sid = H5Dget_space(dsid);
+
+ if ( sid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Dget_space() failed.";
+ }
+ HDassert(sid > 0);
+ }
+
+ if ( pass ) {
+
+ ndims = H5Sget_simple_extent_ndims(sid);
+
+ if ( 1 != ndims ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: 1 != ndims";
+ }
+ HDassert(1 == ndims);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sget_simple_extent_dims(sid, dims, max_dims);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Sget_simple_extent_dims() failed";
+ }
+ else if ( DSET_DIMS != dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: DSET_DIMS != dims[0]";
+ }
+ else if ( DSET_DIMS != max_dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: DSET_DIMS != max_dims[0]";
+ }
+ HDassert(ret >= 0);
+ HDassert(DSET_DIMS == dims[0]);
+ HDassert(DSET_DIMS == max_dims[0]);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sclose(sid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Sclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ tid = H5Dget_type(dsid);
+
+ if ( tid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Dget_type() failed.";
+ }
+ HDassert(tid > 0);
+ }
+
+ if ( pass ) {
+
+ type_equal = H5Tequal(tid, H5T_NATIVE_INT);
+
+ if ( 1 != type_equal ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: tid != H5T_NATIVE_INT";
+ }
+ HDassert(1 == type_equal);
+ }
+
+ if ( pass ) {
+
+ ret = H5Tclose(tid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Tclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Dget_space_status(dsid, &allocation);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Dget_space_status() failed.";
+ }
+ else if ( write_data && ( allocation != H5D_SPACE_STATUS_ALLOCATED ) ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ds_chk_i: write_data && allocation != H5D_SPACE_STATUS_ALLOCATED";
+ }
+ else if ( !write_data &&
+ ( allocation != H5D_SPACE_STATUS_NOT_ALLOCATED ) ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: !write_data && allocation != H5D_SPACE_STATUS_NOT_ALLOCATED";
+ }
+ HDassert(ret >= 0);
+ HDassert((write_data && allocation == H5D_SPACE_STATUS_ALLOCATED) ||
+ (!write_data && allocation == H5D_SPACE_STATUS_NOT_ALLOCATED));
+ }
+
+ if ( pass ) {
+
+ dcpl = H5Dget_create_plist(dsid);
+
+ if ( dcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Dget_create_plist() failed.";
+ }
+ HDassert(dcpl > 0);
+ }
+
+ if ( pass ) {
+
+ layout = H5Pget_layout(dcpl);
+
+ if ( H5D_CHUNKED != layout ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5D_CHUNKED != layout";
+ }
+ HDassert(H5D_CHUNKED == layout);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pget_chunk(dcpl, 1, chunk_dims);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Pget_chunk";
+ }
+ else if ( DSET_CHUNK_DIMS != chunk_dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: ";
+ }
+ HDassert(ret >= 0);
+ HDassert(DSET_CHUNK_DIMS == chunk_dims[0]);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(dcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ rdata = (int *)malloc(sizeof(int) * DSET_DIMS);
+
+ if ( ! rdata ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: malloc of rdata failed.";
+ }
+ HDassert(rdata);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ ret = H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT,
+ rdata);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Dread() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ for(u = 0; u < DSET_DIMS; u++) {
+
+ if ( (int)u != rdata[u] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: u != rdata[u]";
+ break;
+ }
+ HDassert((int)u == rdata[u]);
+ }
+
+ free(rdata);
+ } /* end if */
+
+
+ if ( pass ) {
+
+ ret = H5Dclose(dsid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_chk_i: H5Dclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_ds_chk_i() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: ds_cpt_i
+ *
+ * Purpose: Create a compact dataset w/int datatype. Write data
+ * to the data set or not as indicated by the write_data
+ * parameter.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+ds_cpt_i(hid_t fid, const char *dset_name, hbool_t write_data)
+{
+ int *wdata = NULL;
+ unsigned u;
+ hid_t dsid = -1;
+ hid_t dcpl = -1;
+ hid_t sid = -1;
+ hsize_t dims[1] = {DSET_COMPACT_DIMS};
+ herr_t ret;
+
+ if ( pass ) {
+
+ sid = H5Screate_simple(1, dims, NULL);
+
+ if ( sid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: H5Screate_simple() failed.";
+ }
+ HDassert(sid > 0);
+ }
+
+ if ( pass ) {
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+
+ if ( dcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: H5Pcreate() failed.";
+ }
+ HDassert(dcpl > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pset_layout(dcpl, H5D_COMPACT);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: H5Pset_layout() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ dsid = H5Dcreate2(fid, dset_name, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT);
+
+ if ( dsid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: H5Dcreate2() failed.";
+ }
+ HDassert(dsid > 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(dcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sclose(sid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: H5Sclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ wdata = (int *)malloc(sizeof(int) * DSET_COMPACT_DIMS);
+
+ if ( ! wdata ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: malloc of wdata failed.";
+ }
+ HDassert(wdata);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ for(u = 0; u < DSET_COMPACT_DIMS; u++)
+ wdata[u] = (int)u;
+
+ ret = H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, wdata);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: H5Dwrite() failed.";
+ }
+ HDassert(ret >= 0);
+
+ free(wdata);
+ } /* end if */
+
+ if ( pass ) {
+
+ ret = H5Dclose(dsid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_cpt_i: H5Dclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* ds_cpt_i() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_ds_cpt_i
+ *
+ * Purpose: Validate a compact datasets w/int datatypes. Validate
+ * data if indicated via the write_data parameter.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+vrfy_ds_cpt_i(hid_t fid, const char *dset_name, hbool_t write_data)
+{
+ int *rdata = NULL;
+ unsigned u;
+ hid_t dsid = -1;
+ hid_t sid = -1;
+ hid_t tid = -1;
+ hid_t dcpl = -1;
+ H5D_space_status_t allocation;
+ H5D_layout_t layout;
+ int ndims;
+ hsize_t dims[1], max_dims[1];
+ htri_t type_equal;
+ herr_t ret;
+
+ if ( pass ) {
+
+ dsid = H5Dopen2(fid, dset_name, H5P_DEFAULT);
+
+ if ( dsid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Dopen2() failed.";
+ }
+ HDassert(dsid > 0);
+ }
+
+ if ( pass ) {
+
+ sid = H5Dget_space(dsid);
+
+ if ( sid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Dget_space() failed.";
+ }
+ HDassert(sid > 0);
+ }
+
+ if ( pass ) {
+
+ ndims = H5Sget_simple_extent_ndims(sid);
+
+ if ( 1 != ndims ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: 1 != ndims";
+ }
+ HDassert(1 == ndims);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sget_simple_extent_dims(sid, dims, max_dims);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Sget_simple_extent_dims() failed";
+ }
+ else if ( DSET_COMPACT_DIMS != dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: DSET_COMPACT_DIMS != dims[0]";
+ }
+ else if ( DSET_COMPACT_DIMS != max_dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: DSET_COMPACT_DIMS != max_dims[0]";
+ }
+ HDassert(ret >= 0);
+ HDassert(DSET_COMPACT_DIMS == dims[0]);
+ HDassert(DSET_COMPACT_DIMS == max_dims[0]);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sclose(sid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Sclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ tid = H5Dget_type(dsid);
+
+ if ( tid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Dget_type() failed.";
+ }
+ HDassert(tid > 0);
+ }
+
+ if ( pass ) {
+
+ type_equal = H5Tequal(tid, H5T_NATIVE_INT);
+
+ if ( 1 != type_equal ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: type != H5T_NATIVE_INT";
+ }
+ HDassert(1 == type_equal);
+ }
+
+ if ( pass ) {
+
+ ret = H5Tclose(tid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Tclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Dget_space_status(dsid, &allocation);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Dget_space_status() failed.";
+ }
+ else if ( H5D_SPACE_STATUS_ALLOCATED != allocation ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ds_cpt_i: H5D_SPACE_STATUS_ALLOCATED != allocation";
+ }
+ HDassert(ret >= 0);
+ HDassert(H5D_SPACE_STATUS_ALLOCATED == allocation);
+ }
+
+ if ( pass ) {
+
+ dcpl = H5Dget_create_plist(dsid);
+
+ if ( dcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Dget_create_plist() failed.";
+ }
+ HDassert(dcpl > 0);
+ }
+
+ if ( pass ) {
+
+ layout = H5Pget_layout(dcpl);
+
+ if ( H5D_COMPACT != layout ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5D_COMPACT != layout";
+ }
+ HDassert(H5D_COMPACT == layout);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(dcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ rdata = (int *)malloc(sizeof(int) * DSET_COMPACT_DIMS);
+
+ if ( ! rdata ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: malloc of rdata failed.";
+ }
+ HDassert(rdata);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ ret = H5Dread(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL,
+ H5P_DEFAULT, rdata);
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Dread() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ for(u = 0; u < DSET_COMPACT_DIMS; u++) {
+
+ if ( (int)u != rdata[u] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: (int)u != rdata[u]";
+ break;
+ }
+ HDassert((int)u == rdata[u]);
+ }
+
+ free(rdata);
+ } /* end if */
+
+ if ( pass ) {
+
+ ret = H5Dclose(dsid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_cpt_i: H5Dclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_ds_cpt_i() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: ds_ctg_v
+ *
+ * Purpose: Create a contiguous dataset w/variable-length datatype.
+ * Write data to the data set or not as indicated by the
+ * write_data parameter.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+ds_ctg_v(hid_t fid, const char *dset_name, hbool_t write_data)
+{
+ hid_t dsid = -1;
+ hid_t sid = -1;
+ hid_t tid = -1;
+ hsize_t dims[1] = {DSET_SMALL_DIMS};
+ herr_t ret;
+ hvl_t *wdata = NULL;
+ unsigned u;
+
+ if ( pass ) {
+
+ sid = H5Screate_simple(1, dims, NULL);
+
+ if ( sid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: H5Screate_simple";
+ }
+ HDassert(sid > 0);
+ }
+
+ if ( pass ) {
+
+ tid = H5Tvlen_create(H5T_NATIVE_INT);
+
+ if ( tid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: H5Tvlen_create() failed.";
+ }
+ HDassert(tid > 0);
+ }
+
+ if ( pass ) {
+
+ dsid = H5Dcreate2(fid, dset_name, tid, sid, H5P_DEFAULT,
+ H5P_DEFAULT, H5P_DEFAULT);
+
+ if ( dsid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: H5Dcreate2() failed.";
+ }
+ HDassert(dsid > 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ wdata = (hvl_t *)malloc(sizeof(hvl_t) * DSET_SMALL_DIMS);
+
+ if ( ! wdata ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: malloc of wdata failed.";
+ }
+ HDassert(wdata);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ for(u = 0; u < DSET_SMALL_DIMS; u++) {
+
+ int *tdata;
+ unsigned len;
+ unsigned v;
+
+ len = (u % 10) + 1;
+ tdata = (int *)malloc(sizeof(int) * len);
+
+ if ( !tdata ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: malloc of tdata failed.";
+ break;
+ }
+ HDassert(tdata);
+
+ for(v = 0; v < len; v++)
+ tdata[v] = (int)(u + v);
+
+ wdata[u].len = len;
+ wdata[u].p = tdata;
+ } /* end for */
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ ret = H5Dwrite(dsid, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, wdata);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: H5Dwrite() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ ret = H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, wdata);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: H5Dvlen_reclaim() failed.";
+ }
+ HDassert(ret >= 0);
+
+ free(wdata);
+
+ } /* end if */
+
+ if ( pass ) {
+
+ ret = H5Sclose(sid);
+
+ if ( sid < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: H5Sclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Tclose(tid);
+
+ if ( tid < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: H5Tclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Dclose(dsid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "ds_ctg_v: H5Dclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* ds_ctg_v() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: vrfy_ds_ctg_v
+ *
+ * Purpose: Validate a contiguous datasets w/variable-length datatypes.
+ * Validate data if indicated via the write_data parameter.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+vrfy_ds_ctg_v(hid_t fid, const char *dset_name, hbool_t write_data)
+{
+ hid_t dsid = -1;
+ hid_t sid = -1;
+ hid_t tid = -1;
+ hid_t tmp_tid = -1;
+ hid_t dcpl = -1;
+ H5D_space_status_t allocation;
+ H5D_layout_t layout;
+ int ndims;
+ hsize_t dims[1], max_dims[1];
+ htri_t type_equal;
+ hvl_t *rdata = NULL;
+ unsigned u;
+ herr_t ret;
+
+ if ( pass ) {
+
+ dsid = H5Dopen2(fid, dset_name, H5P_DEFAULT);
+
+ if ( dsid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Dopen2() failed.";
+ }
+ HDassert(dsid > 0);
+ }
+
+ if ( pass ) {
+
+ sid = H5Dget_space(dsid);
+
+ if ( sid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Dget_space() failed";
+ }
+ HDassert(sid > 0);
+ }
+
+ if ( pass ) {
+
+ ndims = H5Sget_simple_extent_ndims(sid);
+
+ if ( 1 != ndims ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: 1 != ndims";
+ }
+ HDassert(1 == ndims);
+ }
+
+ if ( pass ) {
+
+ ret = H5Sget_simple_extent_dims(sid, dims, max_dims);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Sget_simple_extent_dims() failed.";
+ }
+ else if ( DSET_SMALL_DIMS != dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: DSET_SMALL_DIMS != dims[0]";
+ }
+ else if ( DSET_SMALL_DIMS != max_dims[0] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: DSET_SMALL_DIMS != max_dims[0]";
+ }
+ HDassert(ret >= 0);
+ HDassert(DSET_SMALL_DIMS == dims[0]);
+ HDassert(DSET_SMALL_DIMS == max_dims[0]);
+ }
+
+ if ( pass ) {
+
+ tid = H5Dget_type(dsid);
+
+ if ( tid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Dget_type() failed.";
+ }
+ HDassert(tid > 0);
+ }
+
+ if ( pass ) {
+
+ tmp_tid = H5Tvlen_create(H5T_NATIVE_INT);
+
+ if ( tmp_tid <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Tvlen_create() failed.";
+ }
+ HDassert(tmp_tid > 0);
+ }
+
+ if ( pass ) {
+
+ type_equal = H5Tequal(tid, tmp_tid);
+
+ if ( 1 != type_equal ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: type != vlen H5T_NATIVE_INT";
+ }
+ HDassert(1 == type_equal);
+ }
+
+ if ( pass ) {
+
+ ret = H5Tclose(tmp_tid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Tclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Dget_space_status(dsid, &allocation);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Dget_space_status() failed";
+ }
+ else if ( write_data && (allocation != H5D_SPACE_STATUS_ALLOCATED) ) {
+
+ pass = FALSE;
+ failure_mssg =
+ "vrfy_ds_ctg_v: write_data && allocation != H5D_SPACE_STATUS_ALLOCATED";
+ }
+ else if ( !write_data &&
+ ( allocation != H5D_SPACE_STATUS_NOT_ALLOCATED ) ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: !write_data && allocation != H5D_SPACE_STATUS_NOT_ALLOCATED";
+ }
+ HDassert(ret >= 0);
+ HDassert((write_data && allocation == H5D_SPACE_STATUS_ALLOCATED) ||
+ (!write_data && allocation == H5D_SPACE_STATUS_NOT_ALLOCATED));
+ }
+
+ if ( pass ) {
+
+ dcpl = H5Dget_create_plist(dsid);
+
+ if ( dcpl <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Dget_create_plist() failed.";
+ }
+ HDassert(dcpl > 0);
+ }
+
+ if ( pass ) {
+
+ layout = H5Pget_layout(dcpl);
+
+ if ( H5D_CONTIGUOUS != layout ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5D_CONTIGUOUS != layout";
+ }
+ HDassert(H5D_CONTIGUOUS == layout);
+ }
+
+ if ( pass ) {
+
+ ret = H5Pclose(dcpl);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Pclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ rdata = (hvl_t *)malloc(sizeof(hvl_t) * DSET_SMALL_DIMS);
+
+ if ( !rdata ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: malloc of rdata failed.";
+ }
+ HDassert(rdata);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ ret = H5Dread(dsid, tid, H5S_ALL, H5S_ALL, H5P_DEFAULT, rdata);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Dread() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ for(u = 0; u < DSET_SMALL_DIMS; u++) {
+ unsigned len;
+ unsigned v;
+
+ len = (unsigned)rdata[u].len;
+ for(v = 0; v < len; v++) {
+ int *tdata = (int *)rdata[u].p;
+
+ if ( !tdata ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: !tdata";
+ break;
+ }
+ else if ( (int)(u + v) != tdata[v] ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: (int)(u + v) != tdata[v]";
+ break;
+ }
+ HDassert(tdata);
+ HDassert((int)(u + v) == tdata[v]);
+ } /* end for */
+ } /* end for */
+ }
+
+ if ( ( pass ) && ( write_data ) ) {
+
+ ret = H5Dvlen_reclaim(tid, sid, H5P_DEFAULT, rdata);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Dvlen_reclaim() failed.";
+ }
+ HDassert(ret >= 0);
+
+ free(rdata);
+ } /* end if */
+
+ if ( pass ) {
+
+ ret = H5Sclose(sid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Sclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Tclose(tid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Tclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ if ( pass ) {
+
+ ret = H5Dclose(dsid);
+
+ if ( ret < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "vrfy_ds_ctg_v: H5Dclose() failed.";
+ }
+ HDassert(ret >= 0);
+ }
+
+ return;
+
+} /* vrfy_ds_ctg_v() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_zoo
+ *
+ * Purpose: Given the path to a group, construct a variety of HDF5
+ * data sets, groups, and other objects selected so as to
+ * include instances of all on disk data structures used
+ * in the HDF5 library.
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * This function was initially created to assist in testing
+ * the cache image feature of the metadata cache. Thus, it
+ * only concerns itself with the version 2 superblock, and
+ * on disk structures that can occur with this version of
+ * the superblock.
+ *
+ * Note the associated validate_zoo() function.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+create_zoo(hid_t fid, const char *base_path, int proc_num)
+{
+ char full_path[1024];
+
+ HDassert(base_path);
+
+ /* Add & verify an empty "new style" group */
+ if ( pass ) {
+ sprintf(full_path, "%s/A", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ns_grp_0(fid, full_path);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/A", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ns_grp_0(fid, full_path);
+ }
+
+ /* Add & verify a compact "new style" group (3 link messages) */
+ if ( pass ) {
+ sprintf(full_path, "%s/B", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ns_grp_c(fid, full_path, 3);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/B", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ns_grp_c(fid, full_path, 3);
+ }
+
+ /* Add & verify a dense "new style" group (w/300 links, in v2 B-tree &
+ * fractal heap)
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/C", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ns_grp_d(fid, full_path, 300);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/C", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ns_grp_d(fid, full_path, 300);
+ }
+
+ /* Add & verify an empty "old style" group to file */
+ if ( pass ) {
+ sprintf(full_path, "%s/D", base_path);
+ HDassert(strlen(full_path) < 1024);
+ os_grp_0(fid, full_path);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/D", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_os_grp_0(fid, full_path);
+ }
+
+ /* Add & verify an "old style" group (w/300 links, in v1 B-tree &
+ * local heap) to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/E", base_path);
+ HDassert(strlen(full_path) < 1024);
+ os_grp_n(fid, full_path, proc_num, 300);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/E", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_os_grp_n(fid, full_path, proc_num, 300);
+ }
+
+ /* Add & verify a contiguous dataset w/integer datatype (but no data)
+ * to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/F", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ds_ctg_i(fid, full_path, FALSE);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/F", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_ctg_i(fid, full_path, FALSE);
+ }
+
+ /* Add & verify a contiguous dataset w/integer datatype (with data)
+ * to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/G", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ds_ctg_i(fid, full_path, TRUE);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/G", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_ctg_i(fid, full_path, TRUE);
+ }
+
+ /* Add & verify a chunked dataset w/integer datatype (but no data)
+ * to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/H", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ds_chk_i(fid, full_path, FALSE);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/H", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_chk_i(fid, full_path, FALSE);
+ }
+
+ /* Add & verify a chunked dataset w/integer datatype (and data)
+ * to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/I", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ds_chk_i(fid, full_path, TRUE);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/I", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_chk_i(fid, full_path, TRUE);
+ }
+
+ /* Add & verify a compact dataset w/integer datatype (but no data)
+ * to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/J", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ds_cpt_i(fid, full_path, FALSE);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/J", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_cpt_i(fid, full_path, FALSE);
+ }
+
+ /* Add & verify a compact dataset w/integer datatype (and data)
+ * to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/K", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ds_cpt_i(fid, full_path, TRUE);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/K", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_cpt_i(fid, full_path, TRUE);
+ }
+
+ /* Add & verify a contiguous dataset w/variable-length datatype
+ * (but no data) to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/L", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ds_ctg_v(fid, full_path, FALSE);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/L", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_ctg_v(fid, full_path, FALSE);
+ }
+
+ /* Add & verify a contiguous dataset w/variable-length datatype
+ * (and data) to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/M", base_path);
+ HDassert(strlen(full_path) < 1024);
+ ds_ctg_v(fid, full_path, TRUE);
+ }
+
+ if ( pass ) {
+ sprintf(full_path, "%s/M", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_ctg_v(fid, full_path, TRUE);
+ }
+
+ return;
+
+} /* create_zoo() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: validate_zoo
+ *
+ * Purpose: Given the path to a group in which a "zoo" has been
+ * constructed, validate the objects in the "zoo".
+ *
+ * If pass is false on entry, do nothing.
+ *
+ * If an error is detected, set pass to FALSE, and set
+ * failure_mssg to point to an appropriate error message.
+ *
+ * This function was initially created to assist in testing
+ * the cache image feature of the metadata cache. Thus, it
+ * only concerns itself with the version 2 superblock, and
+ * on disk structures that can occur with this version of
+ * the superblock.
+ *
+ * Note the associated validate_zoo() function.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 9/14/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+void
+validate_zoo(hid_t fid, const char *base_path, int proc_num)
+{
+ char full_path[1024];
+
+ HDassert(base_path);
+
+ /* validate an empty "new style" group */
+ if ( pass ) {
+ sprintf(full_path, "%s/A", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ns_grp_0(fid, full_path);
+ }
+
+ /* validate a compact "new style" group (3 link messages) */
+ if ( pass ) {
+ sprintf(full_path, "%s/B", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ns_grp_c(fid, full_path, 3);
+ }
+
+ /* validate a dense "new style" group (w/300 links, in v2 B-tree &
+ * fractal heap)
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/C", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ns_grp_d(fid, full_path, 300);
+ }
+
+ /* validate an empty "old style" group in file */
+ if ( pass ) {
+ sprintf(full_path, "%s/D", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_os_grp_0(fid, full_path);
+ }
+
+ /* validate an "old style" group (w/300 links, in v1 B-tree &
+ * local heap)
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/E", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_os_grp_n(fid, full_path, proc_num, 300);
+ }
+
+ /* validate a contiguous dataset w/integer datatype (but no data)
+ * in file.
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/F", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_ctg_i(fid, full_path, FALSE);
+ }
+
+ /* validate a contiguous dataset w/integer datatype (with data)
+ * in file.
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/G", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_ctg_i(fid, full_path, TRUE);
+ }
+
+ /* validate a chunked dataset w/integer datatype (but no data)
+ * in file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/H", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_chk_i(fid, full_path, FALSE);
+ }
+
+ /* validate a chunked dataset w/integer datatype (and data)
+ * in file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/I", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_chk_i(fid, full_path, TRUE);
+ }
+
+ /* Validate a compact dataset w/integer datatype (but no data)
+ * in file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/J", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_cpt_i(fid, full_path, FALSE);
+ }
+
+ /* validate a compact dataset w/integer datatype (and data)
+ * in file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/K", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_cpt_i(fid, full_path, TRUE);
+ }
+
+ /* validate a contiguous dataset w/variable-length datatype
+ * (but no data) to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/L", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_ctg_v(fid, full_path, FALSE);
+ }
+
+ /* validate a contiguous dataset w/variable-length datatype
+ * (and data) to file
+ */
+ if ( pass ) {
+ sprintf(full_path, "%s/M", base_path);
+ HDassert(strlen(full_path) < 1024);
+ vrfy_ds_ctg_v(fid, full_path, TRUE);
+ }
+
+ return;
+
+} /* validate_zoo() */
+
diff --git a/test/genall5.h b/test/genall5.h
new file mode 100644
index 0000000..1dce195
--- /dev/null
+++ b/test/genall5.h
@@ -0,0 +1,51 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 9/4/15
+ *
+ * This file contains declarations of all functions defined
+ * in genall5.c
+ */
+
+void create_zoo(hid_t fid, const char *base_path, int proc_num);
+void validate_zoo(hid_t fid, const char *base_path, int proc_num);
+
+void ns_grp_0(hid_t fid, const char *group_name);
+void vrfy_ns_grp_0(hid_t fid, const char *group_name);
+
+void ns_grp_c(hid_t fid, const char *group_name, unsigned nlinks);
+void vrfy_ns_grp_c(hid_t fid, const char *group_name, unsigned nlinks);
+
+void ns_grp_d(hid_t fid, const char *group_name, unsigned nlinks);
+void vrfy_ns_grp_d(hid_t fid, const char *group_name, unsigned nlinks);
+
+void os_grp_0(hid_t fid, const char *group_name);
+void vrfy_os_grp_0(hid_t fid, const char *group_name);
+
+void os_grp_n(hid_t fid, const char *group_name, int proc_num, unsigned nlinks);
+void vrfy_os_grp_n(hid_t fid, const char *group_name, int proc_num,
+ unsigned nlinks);
+
+void ds_ctg_i(hid_t fid, const char *dset_name, hbool_t write_data);
+void vrfy_ds_ctg_i(hid_t fid, const char *dset_name, hbool_t write_data);
+
+void ds_chk_i(hid_t fid, const char *dset_name, hbool_t write_data);
+void vrfy_ds_chk_i(hid_t fid, const char *dset_name, hbool_t write_data);
+
+void ds_cpt_i(hid_t fid, const char *dset_name, hbool_t write_data);
+void vrfy_ds_cpt_i(hid_t fid, const char *dset_name, hbool_t write_data);
+
+void ds_ctg_v(hid_t fid, const char *dset_name, hbool_t write_data);
+void vrfy_ds_ctg_v(hid_t fid, const char *dset_name, hbool_t write_data);
+
diff --git a/test/getname.c b/test/getname.c
index e6d26ce..c3f3c54 100644
--- a/test/getname.c
+++ b/test/getname.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/gheap.c b/test/gheap.c
index eafc49d..7564afa 100644
--- a/test/gheap.c
+++ b/test/gheap.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/h5fc_ext1_f.h5 b/test/h5fc_ext1_f.h5
new file mode 100644
index 0000000..b5c5867
--- /dev/null
+++ b/test/h5fc_ext1_f.h5
Binary files differ
diff --git a/test/h5fc_ext1_i.h5 b/test/h5fc_ext1_i.h5
new file mode 100644
index 0000000..960a8d5
--- /dev/null
+++ b/test/h5fc_ext1_i.h5
Binary files differ
diff --git a/test/h5fc_ext2_if.h5 b/test/h5fc_ext2_if.h5
new file mode 100644
index 0000000..88e42e7
--- /dev/null
+++ b/test/h5fc_ext2_if.h5
Binary files differ
diff --git a/test/h5fc_ext2_sf.h5 b/test/h5fc_ext2_sf.h5
new file mode 100644
index 0000000..c59a3ca
--- /dev/null
+++ b/test/h5fc_ext2_sf.h5
Binary files differ
diff --git a/test/h5fc_ext3_isf.h5 b/test/h5fc_ext3_isf.h5
new file mode 100644
index 0000000..d00fc55
--- /dev/null
+++ b/test/h5fc_ext3_isf.h5
Binary files differ
diff --git a/test/h5fc_ext_none.h5 b/test/h5fc_ext_none.h5
new file mode 100644
index 0000000..b1b1553
--- /dev/null
+++ b/test/h5fc_ext_none.h5
Binary files differ
diff --git a/test/h5test.c b/test/h5test.c
index f85e96c..2d77dc8 100644
--- a/test/h5test.c
+++ b/test/h5test.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -93,6 +91,12 @@ static const char *multi_letters = "msbrglo";
/* Length of multi-file VFD filename buffers */
#define H5TEST_MULTI_FILENAME_LEN 1024
+/* Temporary file for sending signal messages */
+#define TMP_SIGNAL_FILE "tmp_signal_file"
+
+/* The # of seconds to wait for the message file--used by h5_wait_message() */
+#define MESSAGE_TIMEOUT 300 /* Timeout in seconds */
+
/* Previous error reporting function */
static H5E_auto2_t err_func = NULL;
@@ -1711,3 +1715,116 @@ error:
return -1;
}
+/*-------------------------------------------------------------------------
+ * Function: h5_send_message
+ *
+ * Purpose: Sends the specified signal.
+ *
+ * In terms of this test framework, a signal consists of a file
+ * on disk. Since there are multiple processes that need to
+ * communicate with each other, they do so by writing and
+ * reading signal files on disk, the names and contents of
+ * which are used to inform a process about when it can
+ * proceed and what it should do next.
+ *
+ * This function writes a signal file. The first argument is
+ * the name of the signal file, and the second and third
+ * arguments are the contents of the first two lines of the
+ * signal file. The last two arguments may be NULL.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * August 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+h5_send_message(const char *send, const char *arg1, const char *arg2)
+{
+ FILE *signalfile = NULL;
+
+ /* Create signal file (which will send signal to some other process) */
+ signalfile = HDfopen(TMP_SIGNAL_FILE, "w+");
+
+ /* Write messages to signal file, if provided */
+ if(arg2 != NULL) {
+ HDassert(arg1);
+ HDfprintf(signalfile, "%s\n%s\n", arg1, arg2);
+ } /* end if */
+ else if(arg1 != NULL) {
+ HDassert(arg2 == NULL);
+ HDfprintf(signalfile, "%s\n", arg1);
+ } /* end if */
+ else {
+ HDassert(arg1 == NULL);
+ HDassert(arg2 == NULL);
+ }/* end else */
+
+ HDfclose(signalfile);
+
+ HDrename(TMP_SIGNAL_FILE, send);
+} /* h5_send_message() */
+
+/*-------------------------------------------------------------------------
+ * Function: h5_wait_message
+ *
+ * Purpose: Waits for the specified signal.
+ *
+ * In terms of this test framework, a signal consists of a file
+ * on disk. Since there are multiple processes that need to
+ * communicate with each other, they do so by writing and
+ * reading signal files on disk, the names and contents of
+ * which are used to inform a process about when it can
+ * proceed and what it should do next.
+ *
+ * This function continuously attempts to read the specified
+ * signal file from disk, and only continues once it has
+ * successfully done so (i.e., only after another process has
+ * called the "h5_send_message" function to write the signal file).
+ * This functon will then immediately remove the file (i.e.,
+ * to indicate that it has been received and can be reused),
+ * and then exits, allowing the calling function to continue.
+ *
+ * Return: void
+ *
+ * Programmer: Mike McGreevy
+ * August 18, 2010
+ *
+ *-------------------------------------------------------------------------
+ */
+herr_t
+h5_wait_message(const char *waitfor)
+{
+ FILE *returnfile;
+ time_t t0,t1;
+
+ /* Start timer. If this function runs for too long (i.e.,
+ expected signal is never received), it will
+ return failure */
+ HDtime(&t0);
+
+ /* Wait for return signal from some other process */
+ while ((returnfile = HDfopen(waitfor, "r")) == NULL) {
+
+ /* make note of current time. */
+ HDtime(&t1);
+
+ /* If we've been waiting for a signal for too long, then
+ it was likely never sent and we should fail rather
+ than loop infinitely */
+ if(HDdifftime(t1, t0) > MESSAGE_TIMEOUT) {
+ HDfprintf(stdout, "Error communicating between processes. Make sure test script is running.\n");
+ TEST_ERROR;
+ } /* end if */
+ } /* end while */
+
+ HDfclose(returnfile);
+ HDunlink(waitfor);
+
+ return SUCCEED;
+
+error:
+ return FAIL;
+} /* h5_wait_message() */
+
diff --git a/test/h5test.h b/test/h5test.h
index 6806e8f..0e23255 100644
--- a/test/h5test.h
+++ b/test/h5test.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -197,6 +195,9 @@ H5TEST_DLL char* getenv_all(MPI_Comm comm, int root, const char* name);
/* Extern global variables */
H5TEST_DLLVAR int TestVerbosity;
+H5TEST_DLL void h5_send_message(const char *file, const char *arg1, const char *arg2);
+H5TEST_DLL herr_t h5_wait_message(const char *file);
+
#ifdef __cplusplus
}
#endif
diff --git a/test/hyperslab.c b/test/hyperslab.c
index 065d001..c28d872 100644
--- a/test/hyperslab.c
+++ b/test/hyperslab.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/test/istore.c b/test/istore.c
index f6b83c9..8dc5efd 100644
--- a/test/istore.c
+++ b/test/istore.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/test/lheap.c b/test/lheap.c
index 61323a9..f5b7f77 100644
--- a/test/lheap.c
+++ b/test/lheap.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/links.c b/test/links.c
index 182eb6c..2d1ef03 100644
--- a/test/links.c
+++ b/test/links.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -21,18 +19,22 @@
*/
/*
+ * This file needs to access private information from the H5FD package.
+ * This file also needs to access the file driver testing code.
+ */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+
+/*
* This file needs to access private information from the H5G package.
* This file also needs to access the group testing code.
*/
#define H5G_FRIEND /*suppress error about including H5Gpkg */
#define H5G_TESTING
-#define H5FD_FRIEND /*suppress error about including H5FDpkg */
-#define H5FD_TESTING
-
#include "h5test.h"
#include "H5srcdir.h"
-#include "H5FDpkg.h" /* File drivers */
+#include "H5FDpkg.h" /* File drivers */
#include "H5Gpkg.h" /* Groups */
#include "H5Iprivate.h" /* IDs */
#include "H5Lprivate.h" /* Links */
@@ -820,7 +822,7 @@ long_links(hid_t fapl, hbool_t new_format)
static int
toomany(hid_t fapl, hbool_t new_format)
{
- hid_t fid = (-1); /* File ID */
+ hid_t fid = (-1); /* File ID */
hid_t gid = (-1), gid2 = (-1); /* Group IDs */
char objname[NAME_BUF_SIZE]; /* Object name */
char filename[NAME_BUF_SIZE];
@@ -2623,7 +2625,6 @@ external_link_toomany(hid_t fapl, hbool_t new_format)
/* Close first file */
if(H5Fclose(fid) < 0) TEST_ERROR
-
PASSED();
return 0;
@@ -11099,6 +11100,8 @@ corder_delete(hid_t fapl)
for(reopen_file = FALSE; reopen_file <= TRUE; reopen_file++) {
/* Create file */
h5_fixname(FILENAME[0], fapl, filename, sizeof filename);
+
+ /* Creating file with latest format will enable paged aggregation with persistent fs */
if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) FAIL_STACK_ERROR
/* Close file */
diff --git a/test/links_env.c b/test/links_env.c
index efc7c1e..b32fb32 100644
--- a/test/links_env.c
+++ b/test/links_env.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/mf.c b/test/mf.c
index e3d1845..3197989 100644
--- a/test/mf.c
+++ b/test/mf.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -20,6 +18,7 @@
* test_mf_aggr_*() tests for file memory that interact with the aggregators
* test_mf_align_*() tests for file memory with alignment setting
* test_filespace_*() tests for file space management
+ * test_page_*() tests for file space paging
*/
#include "h5test.h"
@@ -38,41 +37,63 @@
#include "H5Iprivate.h"
#include "H5VMprivate.h"
-#define FILENAME_LEN 1024
-
-#define TEST_BLOCK_SIZE1 1
-#define TEST_BLOCK_SIZE2 2
-#define TEST_BLOCK_SIZE3 3
-#define TEST_BLOCK_SIZE4 4
-#define TEST_BLOCK_SIZE5 5
-#define TEST_BLOCK_SIZE6 6
-#define TEST_BLOCK_SIZE7 7
-#define TEST_BLOCK_SIZE8 8
-#define TEST_BLOCK_SIZE20 20
-#define TEST_BLOCK_SIZE30 30
-#define TEST_BLOCK_SIZE40 40
-#define TEST_BLOCK_SIZE50 50
-#define TEST_BLOCK_SIZE80 80
-#define TEST_BLOCK_SIZE200 200
-#define TEST_BLOCK_SIZE600 600
-#define TEST_BLOCK_SIZE700 700
-#define TEST_BLOCK_SIZE1034 1034
-#define TEST_BLOCK_SIZE1970 1970
-#define TEST_BLOCK_SIZE2058 2058
-#define TEST_BLOCK_SIZE8000 8000
-#define TEST_BLOCK_SIZE2048 2048
-
-#define TEST_BLOCK_ADDR70 70
-#define TEST_BLOCK_ADDR100 100
-
-#define TEST_ALIGN1024 1024
-#define TEST_ALIGN4096 4096
-
-#define TEST_THRESHOLD10 10
-#define TEST_THRESHOLD3 3
-
-#define CORE_INCREMENT 1024
-#define FAMILY_SIZE 1024
+#define FILENAME_LEN 1024
+
+#define TBLOCK_SIZE1 1
+#define TBLOCK_SIZE2 2
+#define TBLOCK_SIZE3 3
+#define TBLOCK_SIZE4 4
+#define TBLOCK_SIZE5 5
+#define TBLOCK_SIZE6 6
+#define TBLOCK_SIZE7 7
+#define TBLOCK_SIZE8 8
+#define TBLOCK_SIZE10 10
+#define TBLOCK_SIZE11 11
+#define TBLOCK_SIZE20 20
+#define TBLOCK_SIZE30 30
+#define TBLOCK_SIZE36 36
+#define TBLOCK_SIZE40 40
+#define TBLOCK_SIZE50 50
+#define TBLOCK_SIZE80 80
+#define TBLOCK_SIZE90 90
+#define TBLOCK_SIZE98 98
+#define TBLOCK_SIZE100 100
+#define TBLOCK_SIZE150 150
+#define TBLOCK_SIZE200 200
+#define TBLOCK_SIZE600 600
+#define TBLOCK_SIZE700 700
+#define TBLOCK_SIZE1034 1034
+#define TBLOCK_SIZE1970 1970
+#define TBLOCK_SIZE2048 2048
+#define TBLOCK_SIZE2058 2058
+#define TBLOCK_SIZE2192 2192
+#define TBLOCK_SIZE3080 3080
+#define TBLOCK_SIZE3088 3088
+#define TBLOCK_SIZE3198 3198
+#define TBLOCK_SIZE3286 3286
+#define TBLOCK_SIZE3248 3248
+#define TBLOCK_SIZE3900 3900
+#define TBLOCK_SIZE4020 4020
+#define TBLOCK_SIZE4086 4086
+#define TBLOCK_SIZE4096 4096
+#define TBLOCK_SIZE4106 4106
+#define TBLOCK_SIZE5000 5000
+#define TBLOCK_SIZE6000 6000
+#define TBLOCK_SIZE8000 8000
+#define TBLOCK_SIZE8100 8100
+#define TBLOCK_SIZE8192 8192
+#define TBLOCK_SIZE8190 8190
+#define TBLOCK_SIZE12000 12000
+
+#define TBLOCK_ADDR70 70
+#define TBLOCK_ADDR100 100
+
+#define TEST_ALIGN16 16
+#define TEST_ALIGN1024 1024
+#define TEST_ALIGN4096 4096
+
+#define TEST_THRESHOLD10 10
+#define TEST_THRESHOLD3 3
const char *FILENAME[] = {
"mf",
@@ -80,24 +101,17 @@ const char *FILENAME[] = {
};
typedef enum {
- TEST_NORMAL, /* size of aggregator is >= alignment size */
- TEST_AGGR_SMALL, /* size of aggregator is smaller than alignment size */
- TEST_NTESTS /* The number of test types, must be last */
+ TEST_NORMAL, /* size of aggregator is >= alignment size */
+ TEST_AGGR_SMALL, /* size of aggregator is smaller than alignment size */
+ TEST_NTESTS /* The number of test types, must be last */
} test_type_t;
-typedef struct frspace_state_t {
- hsize_t tot_space; /* Total amount of space tracked */
- hsize_t tot_sect_count; /* Total # of sections tracked */
- hsize_t serial_sect_count; /* # of serializable sections tracked */
- hsize_t ghost_sect_count; /* # of un-serializable sections tracked */
-} frspace_state_t;
+static int check_stats(const H5F_t *f, const H5FS_t *frsp, H5FS_stat_t *state);
-
-static int check_stats(const H5F_t *, const H5FS_t *, frspace_state_t *);
static unsigned test_mf_eoa(const char *env_h5_drvr, hid_t fapl);
static unsigned test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl);
static unsigned test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl);
-static unsigned test_mf_tmp(const char *env_h5_drvr, hid_t fapl);
+static unsigned test_dichotomy(hid_t fapl);
static unsigned test_mf_fs_start(hid_t fapl);
static unsigned test_mf_fs_alloc_free(hid_t fapl);
static unsigned test_mf_fs_extend(hid_t fapl);
@@ -119,21 +133,27 @@ static unsigned test_mf_align_alloc3(const char *env_h5_drvr, hid_t fapl, hid_t
static unsigned test_mf_align_alloc4(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl);
static unsigned test_mf_align_alloc5(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl);
static unsigned test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl);
-static unsigned test_mf_fs_persist(hid_t fapl_new, hid_t fcpl);
-static unsigned test_mf_fs_gone(hid_t fapl_new, hid_t fcpl);
-static unsigned test_mf_fs_split(hid_t fapl_new, hid_t fcpl);
-static unsigned test_mf_fs_multi(hid_t fapl, hid_t fcpl);
-static unsigned test_mf_fs_drivers(hid_t fapl);
+static unsigned test_mf_tmp(const char *env_h5_drvr, hid_t fapl, hbool_t new_format);
+static unsigned test_mf_fs_gone(const char *env_h5_drvr, hid_t fapl, hbool_t new_format);
+static unsigned test_mf_strat_thres_gone(const char *env_h5_drvr, hid_t fapl, hbool_t new_format);
+static unsigned test_mf_fs_persist(const char *env_h5_drvr, hid_t fapl, hbool_t new_format);
+static unsigned test_mf_strat_thres_persist(const char *env_h5_drvr, hid_t fapl, hbool_t new_format);
+static unsigned test_mf_fs_persist_split(void);
+static unsigned test_mf_fs_persist_multi(void);
+static unsigned test_page_alloc_xfree(const char *env_h5_drvr, hid_t fapl);
+static unsigned test_page_small(const char *env_h5_drvr, hid_t fapl);
+static unsigned test_page_large(const char *env_h5_drvr, hid_t fapl);
+static unsigned test_page_large_try_extend(const char *env_h5_drvr, hid_t fapl);
+static unsigned test_page_small_try_extend(const char *env_h5_drvr, hid_t fapl);
+static unsigned test_page_try_shrink(const char *env_h5_drvr, hid_t fapl);
+static unsigned test_page_alignment(const char *env_h5_drvr, hid_t fapl);
/*
* Verify statistics for the free-space manager
*
- * Modifications:
- * Vailin Choi; July 2012
- * To ensure "f" and "frsp" are valid pointers
*/
static int
-check_stats(const H5F_t *f, const H5FS_t *frsp, frspace_state_t *state)
+check_stats(const H5F_t *f, const H5FS_t *frsp, H5FS_stat_t *state)
{
H5FS_stat_t frspace_stats; /* Statistics about the heap */
@@ -146,22 +166,22 @@ check_stats(const H5F_t *f, const H5FS_t *frsp, frspace_state_t *state)
if(frspace_stats.tot_space != state->tot_space) {
HDfprintf(stdout, "frspace_stats.tot_space = %Hu, state->tot_space = %Zu\n",
- frspace_stats.tot_space, state->tot_space);
+ frspace_stats.tot_space, state->tot_space);
TEST_ERROR
} /* end if */
if(frspace_stats.tot_sect_count != state->tot_sect_count) {
HDfprintf(stdout, "frspace_stats.tot_sect_count = %Hu, state->tot_sect_count = %Hu\n",
- frspace_stats.tot_sect_count, state->tot_sect_count);
+ frspace_stats.tot_sect_count, state->tot_sect_count);
TEST_ERROR
} /* end if */
if(frspace_stats.serial_sect_count != state->serial_sect_count) {
HDfprintf(stdout, "frspace_stats.serial_sect_count = %Hu, state->serial_sect_count = %Hu\n",
- frspace_stats.serial_sect_count, state->serial_sect_count);
+ frspace_stats.serial_sect_count, state->serial_sect_count);
TEST_ERROR
} /* end if */
if(frspace_stats.ghost_sect_count != state->ghost_sect_count) {
HDfprintf(stdout, "frspace_stats.ghost_sect_count = %Hu, state->ghost_sect_count = %Hu\n",
- frspace_stats.ghost_sect_count, state->ghost_sect_count);
+ frspace_stats.ghost_sect_count, state->ghost_sect_count);
TEST_ERROR
} /* end if */
@@ -173,13 +193,15 @@ error:
} /* check_stats() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks are allocated from file allocation
*
* Set up:
- * Turn off using meta/small data aggregator
- * There is nothing in free-space manager
+ * Turn off using meta/small data aggregator
+ * There is nothing in free-space manager
*
* Allocate two blocks which should be from file allocation
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
@@ -234,7 +256,7 @@ test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* nothing should be changed in meta_aggr */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &ma_size);
@@ -244,7 +266,7 @@ test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
if (addr1 < (haddr_t)file_size)
TEST_ERROR
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* nothing should be changed in meta_aggr */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &ma_size);
@@ -262,7 +284,7 @@ test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50))
+ if (new_file_size != (file_size+TBLOCK_SIZE30+TBLOCK_SIZE50))
TEST_ERROR
/* Re-open the file */
@@ -273,8 +295,8 @@ test_mf_eoa(const char *env_h5_drvr, hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -308,6 +330,7 @@ error:
} /* test_mf_eoa() */
/*
+ *-------------------------------------------------------------------------
* To verify that an allocated block from file allocation is shrunk.
*
* Set up:
@@ -323,6 +346,7 @@ error:
* Test 4: Allocate a block of 30 from file allocation
* H5MF_try_shrink() the block by 20 from the end: succeed
*
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
@@ -377,7 +401,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
type = H5FD_MEM_SUPER;
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
if (addr < (haddr_t)file_size)
TEST_ERROR
@@ -395,7 +419,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30))
+ if (new_file_size != (file_size+TBLOCK_SIZE30))
TEST_ERROR
/* Re-open the file */
@@ -409,7 +433,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
/* should succeed */
- if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30) <= 0)
+ if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30) <= 0)
TEST_ERROR
/* nothing should be changed in meta_aggr */
@@ -453,13 +477,13 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
if (addr < (haddr_t)file_size)
TEST_ERROR
/* should not succeed in shrinking */
- if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30 - 10) > 0)
+ if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30 - 10) > 0)
TEST_ERROR
/* nothing should be changed in meta_aggr */
@@ -475,7 +499,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if(new_file_size != (file_size + TEST_BLOCK_SIZE30))
+ if(new_file_size != (file_size + TBLOCK_SIZE30))
TEST_ERROR
PASSED()
@@ -503,7 +527,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
/* should not succeed in shrinking */
- if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30 + 10) > 0)
+ if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30 + 10) > 0)
TEST_ERROR
/* nothing should be changed in meta_aggr */
@@ -519,7 +543,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if(new_file_size != (file_size + TEST_BLOCK_SIZE30))
+ if(new_file_size != (file_size + TBLOCK_SIZE30))
TEST_ERROR
PASSED()
@@ -546,7 +570,7 @@ test_mf_eoa_shrink(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
/* should succeed in shrinking */
- if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr+10, (hsize_t)(TEST_BLOCK_SIZE30 - 10)) <= 0)
+ if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr+10, (hsize_t)(TBLOCK_SIZE30 - 10)) <= 0)
TEST_ERROR
/* nothing should be changed in meta_aggr */
@@ -588,6 +612,7 @@ error:
} /* test_mf_eoa_shrink() */
/*
+ *-------------------------------------------------------------------------
* To verify that an allocated block from file allocation is extended.
*
* Set up:
@@ -599,6 +624,7 @@ error:
*
* Test 2: Allocate a block of 30
* H5MF_try_extend() the block of size 20 by 50: fail
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
@@ -656,7 +682,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
type = H5FD_MEM_SUPER;
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
if (addr < (haddr_t)file_size)
TEST_ERROR
@@ -673,7 +699,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if(new_file_size != (file_size + TEST_BLOCK_SIZE30))
+ if(new_file_size != (file_size + TBLOCK_SIZE30))
TEST_ERROR
/* Re-open the file */
@@ -685,7 +711,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
FAIL_STACK_ERROR
/* should succeed */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)addr, (hsize_t)TEST_BLOCK_SIZE30, (hsize_t)TEST_BLOCK_SIZE50);
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)addr, (hsize_t)TBLOCK_SIZE30, (hsize_t)TBLOCK_SIZE50);
if(was_extended <= 0)
TEST_ERROR
@@ -703,7 +729,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if(new_file_size != (file_size + TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50))
+ if(new_file_size != (file_size + TBLOCK_SIZE30 + TBLOCK_SIZE50))
TEST_ERROR
PASSED()
@@ -734,7 +760,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
type = H5FD_MEM_SUPER;
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
if(addr < (haddr_t)file_size)
TEST_ERROR
@@ -744,7 +770,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
if(new_ma_addr != ma_addr)
TEST_ERROR
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)addr, (hsize_t)(TEST_BLOCK_SIZE30-10), (hsize_t)(TEST_BLOCK_SIZE50));
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)addr, (hsize_t)(TBLOCK_SIZE30-10), (hsize_t)(TBLOCK_SIZE50));
/* should not succeed */
if(was_extended > 0)
@@ -763,7 +789,7 @@ test_mf_eoa_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if(new_file_size != file_size + TEST_BLOCK_SIZE30)
+ if(new_file_size != file_size + TBLOCK_SIZE30)
TEST_ERROR
if(H5Pclose(fapl_new) < 0)
@@ -787,6 +813,7 @@ error:
} /* test_mf_eoa_extend() */
/*
+ *-------------------------------------------------------------------------
* To verify that temporary blocks are allocated correctly
*
* Set up:
@@ -806,13 +833,19 @@ error:
* space fails
* - Check that allocating another 1/2 of the file as normal address
* space fails
+ *-------------------------------------------------------------------------
*/
static unsigned
-test_mf_tmp(const char *env_h5_drvr, hid_t fapl)
+test_mf_tmp(const char *env_h5_drvr, hid_t fapl, hbool_t new_format)
{
- hid_t file = -1; /* File ID */
+ hid_t file = -1; /* File ID */
+ hid_t fapl2 = -1; /* File access property list */
+ hid_t fcpl = -1; /* File creation property list */
- TESTING("'temporary' file space allocation");
+ if(new_format)
+ TESTING("'temporary' file space allocation with new library format")
+ else
+ TESTING("'temporary' file space allocation with old library format")
/* Can't run this test with multi-file VFDs */
if(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi") && HDstrcmp(env_h5_drvr, "family")) {
@@ -829,8 +862,22 @@ test_mf_tmp(const char *env_h5_drvr, hid_t fapl)
/* Set the filename to use for this test */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ if(new_format) {
+ /* Copy the file access property list */
+ if((fapl2 = H5Pcopy(fapl)) < 0) FAIL_STACK_ERROR
+
+ /* Set the "use the latest version of the format" bounds for creating objects in the file */
+ if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ } /* end if */
+
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, new_format?fapl2:fapl)) < 0)
FAIL_STACK_ERROR
/* Close file */
@@ -855,13 +902,13 @@ test_mf_tmp(const char *env_h5_drvr, hid_t fapl)
FAIL_STACK_ERROR
/* Allocate some temporary address space */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc_tmp(f, (hsize_t)TEST_BLOCK_SIZE30)))
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc_tmp(f, (hsize_t)TBLOCK_SIZE30)))
FAIL_STACK_ERROR
/* Check if temporary file address is valid */
if(!H5F_IS_TMP_ADDR(f, tmp_addr))
TEST_ERROR
- if(tmp_addr < (haddr_t)(maxaddr - TEST_BLOCK_SIZE30))
+ if(tmp_addr < (haddr_t)(maxaddr - TBLOCK_SIZE30))
TEST_ERROR
/* Reading & writing with a temporary address value should fail */
@@ -878,7 +925,7 @@ test_mf_tmp(const char *env_h5_drvr, hid_t fapl)
/* Freeing a temporary address value should fail */
H5E_BEGIN_TRY {
- status = H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, tmp_addr, (hsize_t)TEST_BLOCK_SIZE30);
+ status = H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, tmp_addr, (hsize_t)TBLOCK_SIZE30);
} H5E_END_TRY;
if(status >= 0)
TEST_ERROR
@@ -963,10 +1010,12 @@ error:
} /* test_mf_tmp() */
/*
+ *-------------------------------------------------------------------------
* To verify that the free-space manager is created or opened
*
* Set up:
* Turn off using meta/small data aggregator
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_fs_start(hid_t fapl)
@@ -976,11 +1025,10 @@ test_mf_fs_start(hid_t fapl)
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* file size */
- H5FD_mem_t type;
- frspace_state_t state;
+ H5FS_stat_t state;
- TESTING("H5MF_alloc_create()/H5MF_alloc_open() of free-space manager");
+ TESTING("H5MF_create_fstype()/H5MF_open_fstype() of free-space manager");
/* Set the filename to use for this test (dependent on fapl) */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
@@ -1011,20 +1059,18 @@ test_mf_fs_start(hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- /* Start up free-space manager */
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
-
- HDmemset(&state, 0, sizeof(frspace_state_t));
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
if(H5Fclose(file) < 0)
@@ -1055,6 +1101,7 @@ error:
/*
+ *-------------------------------------------------------------------------
* To verify that a block is allocated/freed from/to the free-space manager
*
* Set up:
@@ -1078,10 +1125,7 @@ error:
* The block is allocated from file allocation
* Deallocate the block which will be returned to free-space manager
* (the space is shrunk and freed since it is at end of file)
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Initialize the new field "allow_eoa_shrink_only" for user data.
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_fs_alloc_free(hid_t fapl)
@@ -1091,12 +1135,10 @@ test_mf_fs_alloc_free(hid_t fapl)
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* file size */
- H5FD_mem_t type;
H5MF_free_section_t *sect_node = NULL;
- haddr_t addr;
- frspace_state_t state;
- H5MF_sect_ud_t udata;
- H5FS_section_info_t *node;
+ haddr_t addr;
+ haddr_t tmp;
+ H5FS_stat_t state;
TESTING("H5MF_alloc()/H5MF_xfree() of free-space manager:test 1");
@@ -1129,69 +1171,59 @@ test_mf_fs_alloc_free(hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
-
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
/* Create section A */
- sect_node = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30);
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ sect_node = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30);
/* Add section A to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE30;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE30;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Allocate a block of 30 */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is section A in free-space */
- if (addr != TEST_BLOCK_ADDR70)
+ if(addr != TBLOCK_ADDR70)
TEST_ERROR
- state.tot_space -= TEST_BLOCK_SIZE30;
+ state.tot_space -= TBLOCK_SIZE30;
state.tot_sect_count -= 1;
state.serial_sect_count -= 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the block to free-space */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30);
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30);
- state.tot_space += TEST_BLOCK_SIZE30;
+ state.tot_space += TBLOCK_SIZE30;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Remove section A from free-space */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)TEST_BLOCK_SIZE30, (H5FS_section_info_t **)&node) < 0)
- TEST_ERROR
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30, f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
+ TEST_ERROR
- /* Free the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ if(tmp != TBLOCK_ADDR70)
+ TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -1216,67 +1248,57 @@ test_mf_fs_alloc_free(hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
-
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
/* Create section A */
- sect_node = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30);
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ sect_node = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30);
- /* Add section A to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ /* Add section A to free-space manager */
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE30;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE30;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Allocate a block of 20 */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)(TEST_BLOCK_SIZE20));
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE20));
/* Verify that the allocated block is section A in free-space manager */
- if (addr != TEST_BLOCK_ADDR70)
+ if(addr != TBLOCK_ADDR70)
TEST_ERROR
/* should still have 1 section of size 10 left in free-space manager */
- state.tot_space -= (TEST_BLOCK_SIZE20);
+ state.tot_space -= (TBLOCK_SIZE20);
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the block to free-space manager */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TEST_BLOCK_SIZE20));
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TBLOCK_SIZE20));
/* Still 1 section in free-space because of merging */
- state.tot_space += TEST_BLOCK_SIZE20;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ state.tot_space += TBLOCK_SIZE20;
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
- /* Remove section A from free-space manager */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)TEST_BLOCK_SIZE30, (H5FS_section_info_t **)&node) < 0)
- FAIL_STACK_ERROR
+ /* Remove section A from free-space */
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30, f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
+ TEST_ERROR
- /* Free the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ if(tmp != TBLOCK_ADDR70)
+ TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -1301,36 +1323,28 @@ test_mf_fs_alloc_free(hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
-
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
/* Create section A */
- sect_node = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30);
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ sect_node = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30);
/* Add section A to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE30;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE30;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/*
@@ -1338,38 +1352,37 @@ test_mf_fs_alloc_free(hid_t fapl)
* Since free-space manager cannot fulfull the request,
* the block is obtained from file allocation
*/
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)(TEST_BLOCK_SIZE40));
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE40));
/* Verify that the allocated block is not section A in free-space */
- if (addr == TEST_BLOCK_ADDR70)
- TEST_ERROR
+ if(addr == TBLOCK_ADDR70)
+ TEST_ERROR
/* free-space info should be the same */
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Remove section A from free-space */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)TEST_BLOCK_SIZE30, (H5FS_section_info_t **)&node) < 0)
- FAIL_STACK_ERROR
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30, f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
+ TEST_ERROR
- /* Free the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ /* Verify that the block is section A in free-space */
+ if(tmp != TBLOCK_ADDR70)
+ TEST_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- if(check_stats(f, f->shared->fs_man[type], &state))
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the block of size 40 to free-space */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TEST_BLOCK_SIZE40));
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TBLOCK_SIZE40));
/*
* Free-space info is the same.
* The block is returned to free-space.
* It is shrunk and freed because it is at end of file.
*/
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
if(H5Fclose(file) < 0)
@@ -1400,6 +1413,7 @@ error:
/*
+ *-------------------------------------------------------------------------
* To verify that a block allocated from the free-space manager can be extended
*
* Set up:
@@ -1435,10 +1449,7 @@ error:
* Try to extend the allocated block by 50 from the free-space_manager:
* Fail: section A does not adjoin section B (70+20 != address of section B) even though
* the requested-size (50) equal to size of section B (50)
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Initialize the new field "allow_eoa_shrink_only" for user data.
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_fs_extend(hid_t fapl)
@@ -1448,13 +1459,11 @@ test_mf_fs_extend(hid_t fapl)
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
h5_stat_size_t file_size, new_file_size; /* file size */
- H5FD_mem_t type;
H5MF_free_section_t *sect_node1 = NULL, *sect_node2=NULL;
- haddr_t addr;
- frspace_state_t state; /* State of free space*/
- H5MF_sect_ud_t udata;
- H5FS_section_info_t *node;
- htri_t was_extended;
+ haddr_t addr;
+ haddr_t tmp;
+ H5FS_stat_t state; /* State of free space*/
+ htri_t was_extended;
TESTING("H5MF_try_extend() of free-space manager:test 1");
@@ -1487,101 +1496,91 @@ test_mf_fs_extend(hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
-
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
/* Create section A */
- sect_node1 = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30);
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ sect_node1 = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30);
/* Add section A to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE30;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE30;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Allocate a block of 30 */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is section A in free-space manager */
- if (addr != TEST_BLOCK_ADDR70)
- TEST_ERROR
+ if(addr != TBLOCK_ADDR70)
+ TEST_ERROR
- state.tot_space -= TEST_BLOCK_SIZE30;
+ state.tot_space -= TBLOCK_SIZE30;
state.tot_sect_count -= 1;
state.serial_sect_count -= 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Create section B */
- sect_node2 = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR100, (hsize_t)TEST_BLOCK_SIZE50);
+ sect_node2 = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR100, (hsize_t)TBLOCK_SIZE50);
/* Add section B to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2))
+ FAIL_STACK_ERROR
- state.tot_space += TEST_BLOCK_SIZE50;
+ state.tot_space += TBLOCK_SIZE50;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Try to extend the allocated block */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30, (hsize_t)TEST_BLOCK_SIZE50);
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_SUPER, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30, (hsize_t)TBLOCK_SIZE50);
/* should succeed */
if(was_extended <= 0)
TEST_ERROR
/* Section B is removed from free-space manager */
- state.tot_space -= TEST_BLOCK_SIZE50;
+ state.tot_space -= TBLOCK_SIZE50;
state.tot_sect_count -= 1;
state.serial_sect_count -= 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the extended block to free-space manager */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50));
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TBLOCK_SIZE30+TBLOCK_SIZE50));
/* Verify that the extended block is back into free-space */
- state.tot_space += TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50;
+ state.tot_space += (TBLOCK_SIZE30+TBLOCK_SIZE50);
state.tot_sect_count = 1;
state.serial_sect_count = 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Remove the extended block */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)(TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50), (H5FS_section_info_t **)&node) < 0)
- TEST_ERROR
-
- /* Remove the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE30+TBLOCK_SIZE50), f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
+ TEST_ERROR
+ if(tmp != TBLOCK_ADDR70)
+ TEST_ERROR
+
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -1605,95 +1604,84 @@ test_mf_fs_extend(hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
-
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
/* Create section A */
- sect_node1 = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30);
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ sect_node1 = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30);
- /* Add section A to free-space */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ /* Add section A to free-space manager */
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE30;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE30;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Allocate a block of 30 */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is section A in free-space manager */
- if (addr != TEST_BLOCK_ADDR70)
- TEST_ERROR
+ if(addr != TBLOCK_ADDR70)
+ TEST_ERROR
- state.tot_space -= TEST_BLOCK_SIZE30;
+ state.tot_space -= TBLOCK_SIZE30;
state.tot_sect_count -= 1;
state.serial_sect_count -= 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Create section B */
- sect_node2 = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR100, (hsize_t)TEST_BLOCK_SIZE50);
+ sect_node2 = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR100, (hsize_t)TBLOCK_SIZE50);
/* Add section B to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2))
+ FAIL_STACK_ERROR
- state.tot_space += TEST_BLOCK_SIZE50;
+ state.tot_space += TBLOCK_SIZE50;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Try to extend the allocated block */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30, (hsize_t)(TEST_BLOCK_SIZE50+10));
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_SUPER, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30, (hsize_t)(TBLOCK_SIZE50+10));
/* Should not be able to extend the allocated block */
if(was_extended)
TEST_ERROR
/* free-space info should remain the same */
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the allocated block A to free-space */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30);
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30);
/* the returned section A is merged with section B in free-space */
/* rest of the info remains the same */
- state.tot_space += TEST_BLOCK_SIZE30;
+ state.tot_space += TBLOCK_SIZE30;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Remove the merged sections A & B from free-space */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)(TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50), (H5FS_section_info_t **)&node) < 0)
- TEST_ERROR
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE30+TBLOCK_SIZE50), f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
+ TEST_ERROR
- /* Remove the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ if(tmp != addr) TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -1718,95 +1706,84 @@ test_mf_fs_extend(hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
-
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
/* Create section A */
- sect_node1 = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30);
+ sect_node1 = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30);
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
-
- /* Add section A to free-space */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ /* Add section A to free-space manager */
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE30;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE30;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Allocate a block of 30 */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is section A in free-space manager */
- if (addr != TEST_BLOCK_ADDR70)
- TEST_ERROR
+ if(addr != TBLOCK_ADDR70)
+ TEST_ERROR
- state.tot_space -= TEST_BLOCK_SIZE30;
+ state.tot_space -= TBLOCK_SIZE30;
state.tot_sect_count -= 1;
state.serial_sect_count -= 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Create section B */
- sect_node2 = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR100, (hsize_t)TEST_BLOCK_SIZE50);
+ sect_node2 = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR100, (hsize_t)TBLOCK_SIZE50);
/* Add section B to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2))
+ FAIL_STACK_ERROR
- state.tot_space += TEST_BLOCK_SIZE50;
+ state.tot_space += TBLOCK_SIZE50;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Try to extend the allocated block */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE30, (hsize_t)(TEST_BLOCK_SIZE40));
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_SUPER, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE30, (hsize_t)(TBLOCK_SIZE40));
/* Should succeed in extending the allocated block */
if(was_extended <=0)
TEST_ERROR
/* Should have 1 section of size=10 left in free-space manager */
- state.tot_space -= (TEST_BLOCK_SIZE40);
- if(check_stats(f, f->shared->fs_man[type], &state))
+ state.tot_space -= (TBLOCK_SIZE40);
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the extended block */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE40));
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TBLOCK_SIZE30+TBLOCK_SIZE40));
/* rest info is same, the extended section returned is merged with the section in free-space */
- state.tot_space += (TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE40);
+ state.tot_space += (TBLOCK_SIZE30+TBLOCK_SIZE40);
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Remove the merged sections A & B from free-space */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)(TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50), (H5FS_section_info_t **)&node) < 0)
- TEST_ERROR
-
- /* Remove the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE30+TBLOCK_SIZE50), f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
+ TEST_ERROR
+
+ if(tmp != addr) TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -1831,104 +1808,88 @@ test_mf_fs_extend(hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
-
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
/* Create section A */
- sect_node1 = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)(TEST_BLOCK_SIZE30-10));
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ sect_node1 = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)(TBLOCK_SIZE30-10));
/* Add section A of size=20 to free-space */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node1, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node1))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += (TEST_BLOCK_SIZE30-10);
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += (TBLOCK_SIZE30-10);
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Allocate a block of size=20 */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)(TEST_BLOCK_SIZE30-10));
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE30-10));
/* Verify that the allocated block is section A in free-space manager */
- if (addr != TEST_BLOCK_ADDR70)
- TEST_ERROR
+ if(addr != TBLOCK_ADDR70)
+ TEST_ERROR
- state.tot_space -= (TEST_BLOCK_SIZE30-10);
+ state.tot_space -= (TBLOCK_SIZE30-10);
state.tot_sect_count -= 1;
state.serial_sect_count -= 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Create section B */
- sect_node2 = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR100, (hsize_t)TEST_BLOCK_SIZE50);
+ sect_node2 = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR100, (hsize_t)TBLOCK_SIZE50);
/* Add section B to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node2, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node2))
+ FAIL_STACK_ERROR
- state.tot_space += TEST_BLOCK_SIZE50;
+ state.tot_space += TBLOCK_SIZE50;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Try to extend the allocated block */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)TEST_BLOCK_ADDR70, (hsize_t)(TEST_BLOCK_SIZE30-10), (hsize_t)TEST_BLOCK_SIZE50);
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_SUPER, (haddr_t)TBLOCK_ADDR70, (hsize_t)(TBLOCK_SIZE30-10), (hsize_t)TBLOCK_SIZE50);
/* Should not succeed in extending the allocated block */
if(was_extended)
TEST_ERROR
/* Free-space info should be the same */
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the allocated block */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TEST_BLOCK_SIZE30-10));
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TBLOCK_SIZE30-10));
- state.tot_space += (TEST_BLOCK_SIZE30-10);
+ state.tot_space += (TBLOCK_SIZE30-10);
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Remove section A from free-space manger */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)(TEST_BLOCK_SIZE30-10), (H5FS_section_info_t **)&node) < 0)
- TEST_ERROR
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE30-10), f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
+ TEST_ERROR
- /* Remove the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ if(tmp != addr) TEST_ERROR
/* Remove section B from free-space manager */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)TEST_BLOCK_SIZE50, (H5FS_section_info_t **)&node) < 0)
- TEST_ERROR
-
- /* Remove the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50, f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
+ TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -1957,6 +1918,7 @@ error:
} /* test_mf_fs_extend() */
/*
+ *-------------------------------------------------------------------------
* To verify that an aggregator is absorbed into a section.
*
* Test 1: To aborb the aggregator onto the beginning of the section
@@ -1979,25 +1941,20 @@ error:
* which will absorb meta_aggr to the section:
* section size + remaining size of aggregator is > aggr->alloc_size,
* section is allowed to absorb an aggregator (allow_sect_absorb is true)
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Initialize the new field "allow_eoa_shrink_only" for user data.
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_fs_absorb(const char *env_h5_drvr, hid_t fapl)
{
- hid_t file = -1; /* File ID */
- char filename[FILENAME_LEN]; /* Filename to use */
- H5F_t *f = NULL; /* Internal file object pointer */
- H5FD_mem_t type, stype;
- haddr_t addr, saddr;
- haddr_t ma_addr=HADDR_UNDEF;
- hsize_t ma_size=0;
+ hid_t file = -1; /* File ID */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ haddr_t addr, saddr;
+ haddr_t tmp;
+ haddr_t ma_addr=HADDR_UNDEF;
+ hsize_t ma_size=0;
H5MF_free_section_t *sect_node=NULL;
- H5MF_sect_ud_t udata;
- H5FS_section_info_t *node;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
TESTING("A free-space section absorbs an aggregator: test 1");
@@ -2023,53 +1980,39 @@ test_mf_fs_absorb(const char *env_h5_drvr, hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
-
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
- TEST_ERROR
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
/* Allocate a section from meta_aggr */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- /* Add a section to free-space that adjoins end of the aggregator */
- sect_node = H5MF_sect_simple_new((haddr_t)(ma_addr+ma_size), (hsize_t)TEST_BLOCK_SIZE2048);
+ /* Create a section */
+ sect_node = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)(ma_addr+ma_size), (hsize_t)TBLOCK_SIZE2048);
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
-
- /* When adding, meta_aggr is absorbed onto the beginning of the section */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ /* Add a section to free-space that adjoins end of the aggregator */
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node))
+ FAIL_STACK_ERROR
/* Verify that the section did absorb the aggregator */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)TEST_BLOCK_SIZE2048, (H5FS_section_info_t **)&node) < 0)
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(ma_addr+ma_size), f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
TEST_ERROR
- if (node->addr != ma_addr) TEST_ERROR
- if (node->size != (ma_size + TEST_BLOCK_SIZE2048)) TEST_ERROR
-
- /* Remove the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
- TEST_ERROR
+ if(tmp != ma_addr) TEST_ERROR
/* Restore info for aggregator */
f->shared->meta_aggr.addr = ma_addr;
f->shared->meta_aggr.size = ma_size;
/* Remove section from meta_aggr */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30);
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -2093,48 +2036,34 @@ test_mf_fs_absorb(const char *env_h5_drvr, hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
-
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
- TEST_ERROR
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
/* Allocate a section from meta_aggr */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
/* Allocate a section from sdata_aggr */
- stype = H5FD_MEM_DRAW;
- saddr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ saddr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* Add a section to free-space that adjoins the beginning of meta_aggr */
- sect_node = H5MF_sect_simple_new((haddr_t)addr, (hsize_t)TEST_BLOCK_SIZE30);
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ sect_node = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)addr, (hsize_t)TBLOCK_SIZE30);
/* When adding, meta_aggr is absorbed onto the end of the section */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node))
+ FAIL_STACK_ERROR
/* Verify that the section did absorb the aggregator */
- if(H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)(ma_size+TEST_BLOCK_SIZE30), (H5FS_section_info_t **)&node) < 0)
+ if(H5MF_find_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(ma_size+TBLOCK_SIZE30), f->shared->fs_man[H5FD_MEM_SUPER], &tmp) != TRUE)
TEST_ERROR
- if ((node->addr + TEST_BLOCK_SIZE30) != ma_addr) TEST_ERROR
- if (node->size != (ma_size + TEST_BLOCK_SIZE30)) TEST_ERROR
-
- /* free the free-space section node */
- if(H5MF_sect_simple_free((H5FS_section_info_t *)node) < 0)
+ if((tmp + TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* restore info to meta_aggr */
@@ -2142,9 +2071,9 @@ test_mf_fs_absorb(const char *env_h5_drvr, hid_t fapl)
f->shared->meta_aggr.size = ma_size;
/* Remove section from meta_aggr */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30);
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30);
/* Remove section from sdata_aggr */
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, saddr, (hsize_t)TBLOCK_SIZE50);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -2166,6 +2095,7 @@ error:
} /* test_mf_fs_absorb() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks are allocated from the aggregator
*
* Allocate first block (30) from meta_aggr: (nothing in the aggregator)
@@ -2180,6 +2110,7 @@ error:
* Result:
* The second block of 50 is allocated from meta_aggr
* There is space of 1968 left in meta_aggr
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_alloc1(const char *env_h5_drvr, hid_t fapl)
@@ -2187,6 +2118,7 @@ test_mf_aggr_alloc1(const char *env_h5_drvr, hid_t fapl)
hid_t file = -1; /* File ID */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
+ hid_t fcpl; /* File creation property list */
h5_stat_size_t file_size, new_file_size; /* file size */
H5FD_mem_t type;
haddr_t addr1, addr2;
@@ -2202,13 +2134,21 @@ test_mf_aggr_alloc1(const char *env_h5_drvr, hid_t fapl)
/* Set the filename to use for this test (dependent on fapl) */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+ /* File creation property list template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Set to H5F_FSPACE_STRATEGY_AGGR strategy */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_AGGR, FALSE, (hsize_t)1) < 0)
+ TEST_ERROR
+
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Close file */
if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
+ TEST_ERROR
/* Get the size of a file */
if((file_size = h5_get_file_size(filename, fapl)) < 0)
@@ -2216,54 +2156,58 @@ test_mf_aggr_alloc1(const char *env_h5_drvr, hid_t fapl)
/* Re-open the file */
if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
- FAIL_STACK_ERROR
+ TEST_ERROR
/* Get a pointer to the internal file object */
if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
+ TEST_ERROR
/* Allocate first block from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ if((addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30)) == HADDR_UNDEF)
+ TEST_ERROR
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Allocate second block from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ if((addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50)) == HADDR_UNDEF)
+ TEST_ERROR
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr2+TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr2+TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50)))
+ if(ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE30 + TBLOCK_SIZE50)))
TEST_ERROR
if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
+ TEST_ERROR
/* Get the size of the file */
if((new_file_size = h5_get_file_size(filename, fapl)) < 0)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50))
+ if(new_file_size != (file_size+TBLOCK_SIZE30+TBLOCK_SIZE50))
TEST_ERROR
/* Re-open the file */
if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
- FAIL_STACK_ERROR
+ TEST_ERROR
/* Get a pointer to the internal file object */
if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
+ TEST_ERROR
- /* Free the two blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
+ /* Free the two blocks: order matters because of H5F_FSPACE_STRATEGY_AGGR strategy */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50) < 0)
+ TEST_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30) < 0)
+ TEST_ERROR
if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
+ TEST_ERROR
/* Get the size of the file */
if((new_file_size = h5_get_file_size(filename, fapl)) < 0)
@@ -2273,6 +2217,9 @@ test_mf_aggr_alloc1(const char *env_h5_drvr, hid_t fapl)
if (new_file_size != file_size)
TEST_ERROR
+ if(H5Pclose(fcpl) < 0)
+ TEST_ERROR
+
PASSED()
} /* end if */
else {
@@ -2285,11 +2232,13 @@ test_mf_aggr_alloc1(const char *env_h5_drvr, hid_t fapl)
error:
H5E_BEGIN_TRY {
H5Fclose(file);
+ H5Pclose(fcpl);
} H5E_END_TRY;
return(1);
} /* test_mf_aggr_alloc1() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks are allocated from the aggregator
*
* Allocate first block (30) from meta_aggr: (nothing in the aggregator)
@@ -2311,6 +2260,7 @@ error:
* A block of request-size is extended via file allocation and is merged with meta_aggr
* The block of 2058 is allocated out of meta_aggr
* There is space of 1968 left in meta_aggr
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_alloc2(const char *env_h5_drvr, hid_t fapl)
@@ -2354,29 +2304,29 @@ test_mf_aggr_alloc2(const char *env_h5_drvr, hid_t fapl)
FAIL_STACK_ERROR
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr2+TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr2+TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50)))
+ if (ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE30 + TBLOCK_SIZE50)))
TEST_ERROR
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2058);
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2058);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr3+TEST_BLOCK_SIZE2058) != ma_addr)
+ if((addr3+TBLOCK_SIZE2058) != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50)))
+ if(ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE30 + TBLOCK_SIZE50)))
TEST_ERROR
if(H5Fclose(file) < 0)
@@ -2388,7 +2338,7 @@ test_mf_aggr_alloc2(const char *env_h5_drvr, hid_t fapl)
/* Verify the file is the correct size */
/* Unused space is freed from the end of the file */
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50+TEST_BLOCK_SIZE2058))
+ if(new_file_size != (file_size+TBLOCK_SIZE30+TBLOCK_SIZE50+TBLOCK_SIZE2058))
TEST_ERROR
/* Re-open the file */
@@ -2399,7 +2349,7 @@ test_mf_aggr_alloc2(const char *env_h5_drvr, hid_t fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50+TEST_BLOCK_SIZE2058);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30+TBLOCK_SIZE50+TBLOCK_SIZE2058);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -2429,6 +2379,7 @@ error:
} /* test_mf_aggr_alloc2() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks are allocated from the aggregator
*
* Allocate first block (30) from meta_aggr : (nothing in the aggregator)
@@ -2463,6 +2414,7 @@ error:
* Result:
* The fourth block of 50 is allocated from what is left in meta_aggr
* There is space of 1968 left in meta_aggr
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_alloc3(const char *env_h5_drvr, hid_t fapl)
@@ -2509,55 +2461,55 @@ test_mf_aggr_alloc3(const char *env_h5_drvr, hid_t fapl)
/* Allocate first block from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if ((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Allocate second block from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr2+TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr2+TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50)))
+ if(ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE30 + TBLOCK_SIZE50)))
TEST_ERROR
/* Allocate first block from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr1+TEST_BLOCK_SIZE30) != sdata_addr)
+ if((saddr1+TBLOCK_SIZE30) != sdata_addr)
TEST_ERROR
- if (sdata_size != (TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30)) TEST_ERROR
+ if(sdata_size != (TBLOCK_SIZE2048 - TBLOCK_SIZE30)) TEST_ERROR
/* Allocate third block, which is from file allocation not from meta_aggr */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)(TEST_BLOCK_SIZE2058));
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE2058));
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
/* info for meta_aggr shouldn't be changed */
- if (addr3 != (sdata_addr+sdata_size)) TEST_ERROR
- if ((addr3+TEST_BLOCK_SIZE2058) == new_ma_addr) TEST_ERROR
- if ((new_ma_addr != ma_addr) || (new_ma_size != ma_size)) TEST_ERROR
+ if(addr3 != (sdata_addr+sdata_size)) TEST_ERROR
+ if((addr3+TBLOCK_SIZE2058) == new_ma_addr) TEST_ERROR
+ if((new_ma_addr != ma_addr) || (new_ma_size != ma_size)) TEST_ERROR
/* Allocate fourth block, which should be from meta_aggr */
- addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr4+TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr4+TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50 + TEST_BLOCK_SIZE50)))
+ if(ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE30 + TBLOCK_SIZE50 + TBLOCK_SIZE50)))
TEST_ERROR
/* Free all the allocated blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE2058);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr4, (hsize_t)TEST_BLOCK_SIZE50);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE2058);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr4, (hsize_t)TBLOCK_SIZE50);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE30);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -2567,7 +2519,7 @@ test_mf_aggr_alloc3(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
PASSED()
@@ -2588,6 +2540,7 @@ error:
/*
+ *-------------------------------------------------------------------------
* To verify that blocks are allocated from the aggregator
*
* Allocate first block (30) from meta_aggr: (nothing in the aggregator)
@@ -2626,6 +2579,7 @@ error:
* The new block's address is returned
* The block does not adjoin meta_aggr
* meta_aggr's info is unchanged
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_alloc4(const char *env_h5_drvr, hid_t fapl)
@@ -2670,59 +2624,59 @@ test_mf_aggr_alloc4(const char *env_h5_drvr, hid_t fapl)
/* Allocate first block from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30))
+ if(ma_size != (TBLOCK_SIZE2048 - TBLOCK_SIZE30))
TEST_ERROR
/* Allocate first block from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr1+TEST_BLOCK_SIZE30) != sdata_addr)
+ if((saddr1+TBLOCK_SIZE30) != sdata_addr)
TEST_ERROR
/* Allocate second block from sdata_aggr */
- saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)(TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30));
+ saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE2048 - TBLOCK_SIZE30));
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if (saddr2+(TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30) != sdata_addr)
+ if(saddr2+(TBLOCK_SIZE2048 - TBLOCK_SIZE30) != sdata_addr)
TEST_ERROR
/* Allocate third block from sdata_aggr */
- saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr3+TEST_BLOCK_SIZE50) != sdata_addr)
+ if((saddr3+TBLOCK_SIZE50) != sdata_addr)
TEST_ERROR
- if(sdata_size != (TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE50))
+ if(sdata_size != (TBLOCK_SIZE2048 - TBLOCK_SIZE50))
TEST_ERROR
/* Allocate second block of 2058, which is from file allocation, not from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2058);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2058);
- if (addr2 != sdata_addr)
+ if(addr2 != sdata_addr)
TEST_ERROR
/* sdata_aggr is reset 0 */
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((sdata_addr != 0) && (sdata_size != 0))
+ if((sdata_addr != 0) && (sdata_size != 0))
TEST_ERROR
/* info is unchanged in meta_aggr */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if ((new_ma_addr != ma_addr) && (new_ma_size != ma_size))
+ if((new_ma_addr != ma_addr) && (new_ma_size != ma_size))
TEST_ERROR
/* Free all the allocated blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE2058);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr2, (hsize_t)TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr3, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE2058);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr2, (hsize_t)TBLOCK_SIZE2048 - TBLOCK_SIZE30);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr3, (hsize_t)TBLOCK_SIZE50);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -2732,7 +2686,7 @@ test_mf_aggr_alloc4(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
PASSED()
@@ -2752,6 +2706,7 @@ error:
} /* test_mf_aggr_alloc4() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks are allocated from the aggregator
*
* Allocate first block (30) from meta_aggr: (nothing in the aggregator)
@@ -2772,6 +2727,7 @@ error:
* The block of 1970 is allocated from there
* There is space of 2046 left in meta_aggr
*
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_alloc5(const char *env_h5_drvr, hid_t fapl)
@@ -2816,34 +2772,34 @@ test_mf_aggr_alloc5(const char *env_h5_drvr, hid_t fapl)
/* Allocate first block from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Allocate second block from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if (addr2+TEST_BLOCK_SIZE50 != ma_addr)
+ if(addr2+TBLOCK_SIZE50 != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50)))
+ if(ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE30+TBLOCK_SIZE50)))
TEST_ERROR
/* Allocate third block from meta_aggr */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1970);
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1970);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if (addr3 != ma_addr) TEST_ERROR
- if ((addr3+TEST_BLOCK_SIZE1970) != new_ma_addr) TEST_ERROR
- if (new_ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE1970 - ma_size)))
+ if(addr3 != ma_addr) TEST_ERROR
+ if((addr3+TBLOCK_SIZE1970) != new_ma_addr) TEST_ERROR
+ if(new_ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE1970 - ma_size)))
TEST_ERROR
/* Free all the allocated blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE1970);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE1970);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -2853,7 +2809,7 @@ test_mf_aggr_alloc5(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
PASSED()
@@ -2873,6 +2829,7 @@ error:
} /* test_mf_aggr_alloc5() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks are allocated from the aggregator
*
* Allocate first block (30) from meta_aggr: (nothing in the aggregator)
@@ -2903,6 +2860,7 @@ error:
* sdata_aggr is untouched
* meta_aggr's unused space of [880, 1968] is freed to free-space
* meta_aggr is updated to point to the new block
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_alloc6(const char *env_h5_drvr, hid_t fapl)
@@ -2915,7 +2873,7 @@ test_mf_aggr_alloc6(const char *env_h5_drvr, hid_t fapl)
haddr_t addr1, addr2, addr3, saddr1;
haddr_t ma_addr=HADDR_UNDEF, new_ma_addr=HADDR_UNDEF, sdata_addr=HADDR_UNDEF;
hsize_t ma_size=0, new_ma_size=0, sdata_size=0;
- frspace_state_t state;
+ H5FS_stat_t state;
hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator:test 6");
@@ -2949,44 +2907,44 @@ test_mf_aggr_alloc6(const char *env_h5_drvr, hid_t fapl)
type = H5FD_MEM_SUPER;
/* Allocate first block from meta_aggr */
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Allocate second block from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if (addr2+TEST_BLOCK_SIZE50 != ma_addr)
+ if(addr2+TBLOCK_SIZE50 != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50)))
+ if(ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE30 + TBLOCK_SIZE50)))
TEST_ERROR
/* Allocate first block from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr1+TEST_BLOCK_SIZE30) != sdata_addr) TEST_ERROR
- if (sdata_size != (TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30)) TEST_ERROR
+ if((saddr1+TBLOCK_SIZE30) != sdata_addr) TEST_ERROR
+ if(sdata_size != (TBLOCK_SIZE2048 - TBLOCK_SIZE30)) TEST_ERROR
/* Allocate third block from meta_aggr */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1970);
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1970);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if ((addr3+TEST_BLOCK_SIZE1970) != new_ma_addr) TEST_ERROR
- if (addr3 != (sdata_addr+sdata_size)) TEST_ERROR
+ if((addr3+TBLOCK_SIZE1970) != new_ma_addr) TEST_ERROR
+ if(addr3 != (sdata_addr+sdata_size)) TEST_ERROR
- if ((ma_addr+TEST_BLOCK_SIZE1970) == new_ma_addr) TEST_ERROR
- if (new_ma_size != (TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE1970))
+ if((ma_addr+TBLOCK_SIZE1970) == new_ma_addr) TEST_ERROR
+ if(new_ma_size != (TBLOCK_SIZE2048 - TBLOCK_SIZE1970))
TEST_ERROR
/* Verify that meta_aggr's unused space of 1968 is freed to free-space */
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50));
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += (TBLOCK_SIZE2048 - (TBLOCK_SIZE30+TBLOCK_SIZE50));
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -2994,10 +2952,10 @@ test_mf_aggr_alloc6(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Free all the allocated blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE1970);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE1970);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE30);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3007,7 +2965,7 @@ test_mf_aggr_alloc6(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (new_file_size != file_size)
+ if(new_file_size != file_size)
TEST_ERROR
PASSED()
@@ -3027,13 +2985,14 @@ error:
} /* test_mf_aggr_alloc6() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks are allocated from the aggregator
*
- * Allocate first block (30) from meta_aggr: (nothing in meta_aggr)
- * request-size is > what is left in meta_aggr and < meta_aggr->alloc_size
- * Result:
- * A block of meta_aggr->alloc_size is obtained via file allocation
- * The first block of 30 is allocated from there
+ * Allocate first block (30) from meta_aggr: (nothing in meta_aggr)
+ * request-size is > what is left in meta_aggr and < meta_aggr->alloc_size
+ * Result:
+ * A block of meta_aggr->alloc_size is obtained via file allocation
+ * The first block of 30 is allocated from there
*
* Allocate second block (50) from meta_aggr:
* request-size is <= what is left in meta_aggr
@@ -3068,6 +3027,7 @@ error:
* The block does not adjoin meta_aggr
* meta_aggr's unused space of [880, 1968] is freed to free-space
* meta_aggr is updated to point to the new block
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_alloc7(const char *env_h5_drvr, hid_t fapl)
@@ -3080,7 +3040,7 @@ test_mf_aggr_alloc7(const char *env_h5_drvr, hid_t fapl)
haddr_t addr1, addr2, addr3, saddr1, saddr2, saddr3;
haddr_t ma_addr=HADDR_UNDEF, sdata_addr=HADDR_UNDEF;
hsize_t ma_size=0, sdata_size=0;
- frspace_state_t state;
+ H5FS_stat_t state;
hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
TESTING("H5MF_alloc() of meta/sdata aggregator:test 7");
@@ -3113,51 +3073,51 @@ test_mf_aggr_alloc7(const char *env_h5_drvr, hid_t fapl)
/* Allocate the first block from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if ((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Allocate the second block from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if (addr2+TEST_BLOCK_SIZE50 != ma_addr)
+ if (addr2+TBLOCK_SIZE50 != ma_addr)
TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50)))
+ if (ma_size != (TBLOCK_SIZE2048 - (TBLOCK_SIZE30 + TBLOCK_SIZE50)))
TEST_ERROR
/* Allocate the first block from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr1+TEST_BLOCK_SIZE30) != sdata_addr)
+ if ((saddr1+TBLOCK_SIZE30) != sdata_addr)
TEST_ERROR
/* Allocate the second block from sdata_aggr */
- saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30);
+ saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2048 - TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr2+(TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30)) != sdata_addr)
+ if ((saddr2+(TBLOCK_SIZE2048 - TBLOCK_SIZE30)) != sdata_addr)
TEST_ERROR
if (sdata_size != 0) TEST_ERROR
/* Allocate the third block from sdata_aggr */
- saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr3+TEST_BLOCK_SIZE50) != sdata_addr)
+ if ((saddr3+TBLOCK_SIZE50) != sdata_addr)
TEST_ERROR
- if (sdata_size != (TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE50))
+ if (sdata_size != (TBLOCK_SIZE2048 - TBLOCK_SIZE50))
TEST_ERROR
/* Allocate the third block from meta_aggr */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1970);
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1970);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
if (addr3 != sdata_addr) TEST_ERROR
- if ((addr3 + TEST_BLOCK_SIZE1970) != ma_addr) TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE1970)) TEST_ERROR
+ if ((addr3 + TBLOCK_SIZE1970) != ma_addr) TEST_ERROR
+ if (ma_size != (TBLOCK_SIZE2048 - TBLOCK_SIZE1970)) TEST_ERROR
/* sdata_aggr info is reset to 0 */
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
@@ -3165,8 +3125,8 @@ test_mf_aggr_alloc7(const char *env_h5_drvr, hid_t fapl)
if (sdata_size != 0) TEST_ERROR
/* Verify that meta_aggr's unused space of 1968 is freed to free-space */
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += (TEST_BLOCK_SIZE2048 - (TEST_BLOCK_SIZE30 + TEST_BLOCK_SIZE50));
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += (TBLOCK_SIZE2048 - (TBLOCK_SIZE30 + TBLOCK_SIZE50));
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -3174,12 +3134,12 @@ test_mf_aggr_alloc7(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Free all the allocated blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE1970);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr2, (hsize_t)(TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30));
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr3, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE1970);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr2, (hsize_t)(TBLOCK_SIZE2048 - TBLOCK_SIZE30));
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr3, (hsize_t)TBLOCK_SIZE50);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3209,6 +3169,7 @@ error:
} /* test_mf_aggr_alloc7() */
/*
+ *-------------------------------------------------------------------------
* To verify that a block can be extended from the aggregator
*
* Test 1: Allocate block A from meta_aggr which is at end of file
@@ -3229,6 +3190,7 @@ error:
* Allocate block B from sdata_aggr so that meta_aggr is not at end of file
* Try to extend a block which adjoins meta_aggr but meta_aggr cannot fulfill the extended-request
* H5MF_try_extend() fails
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
@@ -3274,9 +3236,9 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
/* Allocate the first block from meta_aggr */
type = H5FD_MEM_SUPER;
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr+TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Adjust meta_aggr's info info for testing */
@@ -3286,7 +3248,7 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
new_addr = addr - 10;
/* Try to extend the block by an amount < (% * aggr->alloc_size) */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TEST_BLOCK_SIZE50));
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TBLOCK_SIZE50));
/* should succeed */
if(!was_extended)
@@ -3294,15 +3256,15 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if (new_ma_addr != (addr+TEST_BLOCK_SIZE50))
+ if(new_ma_addr != (addr+TBLOCK_SIZE50))
TEST_ERROR
- if (new_ma_size != (f->shared->meta_aggr.alloc_size - TEST_BLOCK_SIZE50)) TEST_ERROR
+ if(new_ma_size != (f->shared->meta_aggr.alloc_size - TBLOCK_SIZE50)) TEST_ERROR
/* Free the allocated blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE50);
/* Try to extend the block by an amount > (% * aggr->alloc_size) but amount < aggr->alloc_size */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TEST_BLOCK_SIZE700));
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TBLOCK_SIZE700));
/* should succeed */
if(!was_extended)
@@ -3310,15 +3272,15 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if (new_ma_addr != (addr + TEST_BLOCK_SIZE700))
+ if(new_ma_addr != (addr + TBLOCK_SIZE700))
TEST_ERROR
- if (new_ma_size != (f->shared->meta_aggr.alloc_size * 2 - TEST_BLOCK_SIZE700)) TEST_ERROR
+ if(new_ma_size != (f->shared->meta_aggr.alloc_size * 2 - TBLOCK_SIZE700)) TEST_ERROR
/* Free the allocated blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE700);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE700);
/* Try to extend the block by an amount > (% * aggr->alloc_size) but amount > aggr->alloc_size */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TEST_BLOCK_SIZE2058));
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TBLOCK_SIZE2058));
/* should succeed */
if(!was_extended)
@@ -3326,12 +3288,12 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if (new_ma_addr != (addr + TEST_BLOCK_SIZE2058))
+ if (new_ma_addr != (addr + TBLOCK_SIZE2058))
TEST_ERROR
if (new_ma_size != f->shared->meta_aggr.size) TEST_ERROR
/* Free the allocated blocks */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE2058);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE2058);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3365,16 +3327,16 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
/* Allocate the first block from meta_aggr */
type = H5FD_MEM_SUPER;
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr+TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Allocate the first block from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ saddr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr+TEST_BLOCK_SIZE50) != sdata_addr)
+ if((saddr+TBLOCK_SIZE50) != sdata_addr)
TEST_ERROR
/* Adjust meta_aggr's info info for testing */
@@ -3384,24 +3346,24 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
new_addr = addr - 10;
/* should be able to fulfill request from the aggreqator itself */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TEST_BLOCK_SIZE50));
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TBLOCK_SIZE50));
if(!was_extended)
TEST_ERROR
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if (new_ma_addr != (addr+TEST_BLOCK_SIZE50))
+ if(new_ma_addr != (addr+TBLOCK_SIZE50))
TEST_ERROR
- if (new_ma_size != (f->shared->meta_aggr.alloc_size-TEST_BLOCK_SIZE50))
+ if(new_ma_size != (f->shared->meta_aggr.alloc_size-TBLOCK_SIZE50))
TEST_ERROR
/* Restore info for meta_aggr */
f->shared->meta_aggr.addr = ma_addr;
f->shared->meta_aggr.size = ma_size;
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr, (hsize_t)TBLOCK_SIZE50);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3411,7 +3373,7 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (file_size != empty_size)
+ if(file_size != empty_size)
TEST_ERROR
PASSED()
@@ -3435,16 +3397,16 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
/* Allocate first block from meta_aggr */
type = H5FD_MEM_SUPER;
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr+TEST_BLOCK_SIZE30) != ma_addr)
+ if ((addr+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Allocate first block from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ saddr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr+TEST_BLOCK_SIZE50) != sdata_addr)
+ if((saddr+TBLOCK_SIZE50) != sdata_addr)
TEST_ERROR
/* Adjust meta_aggr's info info for testing */
@@ -3454,7 +3416,7 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
new_addr = addr - 10;
/* unable to fulfill request from the aggreqator itself */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TEST_BLOCK_SIZE50));
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)new_addr, (hsize_t)10, (hsize_t)(TBLOCK_SIZE50));
if(was_extended)
TEST_ERROR
@@ -3468,8 +3430,8 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
f->shared->meta_aggr.addr = ma_addr;
f->shared->meta_aggr.size = ma_size;
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr, (hsize_t)TBLOCK_SIZE50);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3479,7 +3441,7 @@ test_mf_aggr_extend(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (file_size != empty_size)
+ if(file_size != empty_size)
TEST_ERROR
PASSED()
@@ -3499,6 +3461,7 @@ error:
} /* test_mf_aggr_extend() */
/*
+ *-------------------------------------------------------------------------
* To verify that a block is absorbed into an aggregator
*
* MF_try_shrink() only allows blocks to be absorbed into an aggregator
@@ -3517,10 +3480,7 @@ error:
* H5MF_alloc() block C from meta_aggr
* H5MF_try_shrink() block B should fail since it does not adjoin the
* beginning nor the end of meta_aggr
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Changes due to the switch to H5FD_FLMAP_DICHOTOMY
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_aggr_absorb(const char *env_h5_drvr, hid_t fapl)
@@ -3567,20 +3527,20 @@ test_mf_aggr_absorb(const char *env_h5_drvr, hid_t fapl)
/* Allocate block A from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- ma_addr = new_ma_addr - TEST_BLOCK_SIZE30;
+ ma_addr = new_ma_addr - TBLOCK_SIZE30;
- if((addr1 + TEST_BLOCK_SIZE30) != new_ma_addr)
+ if((addr1 + TBLOCK_SIZE30) != new_ma_addr)
TEST_ERROR
/* should succeed */
- if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30) <= 0)
+ if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30) <= 0)
TEST_ERROR
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if (new_ma_addr != ma_addr) TEST_ERROR
+ if(new_ma_addr != ma_addr) TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3590,7 +3550,7 @@ test_mf_aggr_absorb(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (file_size != empty_size)
+ if(file_size != empty_size)
TEST_ERROR
PASSED()
@@ -3614,32 +3574,32 @@ test_mf_aggr_absorb(const char *env_h5_drvr, hid_t fapl)
/* Allocate block A from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr) TEST_ERROR
- if (ma_size != (TEST_BLOCK_SIZE2048 - TEST_BLOCK_SIZE30)) TEST_ERROR
+ if((addr1+TBLOCK_SIZE30) != ma_addr) TEST_ERROR
+ if(ma_size != (TBLOCK_SIZE2048 - TBLOCK_SIZE30)) TEST_ERROR
/* Allocate block B from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->sdata_aggr), NULL, &sdata_size);
/* should succeed */
- if(H5MF_try_shrink(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE50) <= 0)
+ if(H5MF_try_shrink(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE50) <= 0)
TEST_ERROR
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &new_sdata_addr, &new_sdata_size);
- if (new_sdata_addr != saddr1) TEST_ERROR
- if (new_sdata_size != sdata_size + TEST_BLOCK_SIZE50) TEST_ERROR
+ if(new_sdata_addr != saddr1) TEST_ERROR
+ if(new_sdata_size != sdata_size + TBLOCK_SIZE50) TEST_ERROR
/* meta_aggr info should be updated because the block is absorbed into the meta_aggr */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
if (new_ma_addr != ma_addr) TEST_ERROR
if (new_ma_size != (ma_size)) TEST_ERROR
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3649,7 +3609,7 @@ test_mf_aggr_absorb(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (file_size != empty_size)
+ if(file_size != empty_size)
TEST_ERROR
PASSED()
@@ -3673,35 +3633,35 @@ test_mf_aggr_absorb(const char *env_h5_drvr, hid_t fapl)
/* Allocate block A from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* Allocate block B from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr2+TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr2+TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
/* Allocate block C from meta_aggr */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)(TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50));
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE30+TBLOCK_SIZE50));
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr3+TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr3+TBLOCK_SIZE30+TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
/* should not succeed */
- if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50) > 0)
+ if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50) > 0)
TEST_ERROR
/* aggregator info should be the same as before */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &new_ma_addr, &new_ma_size);
- if (new_ma_addr != ma_addr) TEST_ERROR
+ if(new_ma_addr != ma_addr) TEST_ERROR
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)(TEST_BLOCK_SIZE30+TEST_BLOCK_SIZE50));
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)(TBLOCK_SIZE30+TBLOCK_SIZE50));
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3711,7 +3671,7 @@ test_mf_aggr_absorb(const char *env_h5_drvr, hid_t fapl)
TEST_ERROR
/* Verify the file is the correct size */
- if (file_size != empty_size)
+ if(file_size != empty_size)
TEST_ERROR
PASSED()
@@ -3731,6 +3691,7 @@ error:
} /* test_mf_aggr_absorb() */
/*
+ *-------------------------------------------------------------------------
* To verify that a block allocated from file allocation is aligned, can be shrunk and extended
*
* Alignment = 1024 or 4096
@@ -3759,6 +3720,7 @@ error:
* Allocate a block which should be from file allocation
* The return address should be aligned
* H5MF_try_extend() the block with aligned address should succeed
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
@@ -3773,7 +3735,7 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
haddr_t ma_addr=HADDR_UNDEF;
hsize_t ma_size=0;
htri_t was_extended;
- frspace_state_t state;
+ H5FS_stat_t state;
hsize_t alignment=0, mis_align=0, tmp=0, accum=0;
hbool_t have_alloc_vfd; /* Whether VFD used has an 'alloc' callback */
@@ -3819,23 +3781,23 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
FAIL_STACK_ERROR
/* calculate fragment for alignment of block 30 */
- if ((tmp = (hsize_t)file_size % alignment))
+ if((tmp = (hsize_t)file_size % alignment))
mis_align = alignment - tmp;
- accum = mis_align + TEST_BLOCK_SIZE30;
+ accum = mis_align + TBLOCK_SIZE30;
/* Allocate a block of 30 from file allocation */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
- if (addr1 % alignment) TEST_ERROR
+ if(addr1 % alignment) TEST_ERROR
/* there should be nothing in the aggregator */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if (ma_addr || ma_size) TEST_ERROR
+ if(ma_addr || ma_size) TEST_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
if (mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
@@ -3848,18 +3810,18 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
mis_align = 0;
if ((tmp = ((hsize_t)file_size + accum) % alignment))
mis_align = alignment - tmp;
- accum += (mis_align + TEST_BLOCK_SIZE50);
+ accum += (mis_align + TBLOCK_SIZE50);
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* Verify that the allocated block is aligned */
if (addr2 % alignment) TEST_ERROR
/* there should be nothing in the aggregator */
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if (ma_addr || ma_size) TEST_ERROR
+ if(ma_addr || ma_size) TEST_ERROR
- if (mis_align) {
+ if(mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -3867,8 +3829,8 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
TEST_ERROR
}
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3902,7 +3864,7 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
/* allocate a block of 50 from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* address should be aligned */
if (addr1 % alignment) TEST_ERROR
@@ -3923,7 +3885,7 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
FAIL_STACK_ERROR
/* shrink the block */
- if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE50) <= 0)
+ if(H5MF_try_shrink(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE50) <= 0)
TEST_ERROR
if(H5Fclose(file) < 0)
@@ -3932,7 +3894,7 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
if((new_file_size = h5_get_file_size(filename, fapl1)) < 0)
TEST_ERROR
- if (new_file_size != (file_size-TEST_BLOCK_SIZE50)) TEST_ERROR
+ if (new_file_size != (file_size-TBLOCK_SIZE50)) TEST_ERROR
PASSED()
} /* end if */
@@ -3957,10 +3919,10 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
/* allocate a block of 50 */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* address should be aligned */
- if (addr1 % alignment) TEST_ERROR
+ if(addr1 % alignment) TEST_ERROR
/* Close file */
if(H5Fclose(file) < 0)
@@ -3978,9 +3940,9 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
FAIL_STACK_ERROR
/* try to extend the block */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)addr1, (hsize_t)TEST_BLOCK_SIZE50, (hsize_t)TEST_BLOCK_SIZE30);
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)addr1, (hsize_t)TBLOCK_SIZE50, (hsize_t)TBLOCK_SIZE30);
- if (was_extended <=0) TEST_ERROR
+ if(was_extended <=0) TEST_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -3988,7 +3950,7 @@ test_mf_align_eoa(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
if((new_file_size = h5_get_file_size(filename, fapl1)) < 0)
TEST_ERROR
- if (new_file_size != (file_size+TEST_BLOCK_SIZE30)) TEST_ERROR
+ if (new_file_size != (file_size+TBLOCK_SIZE30)) TEST_ERROR
PASSED()
} /* end if */
@@ -4007,6 +3969,7 @@ error:
} /* test_mf_align_eoa() */
/*
+ *-------------------------------------------------------------------------
* To verify that a block allocated from the free-space manager is aligned
*
* Alignment = 1024 or 4096
@@ -4035,10 +3998,7 @@ error:
* Allocate a block of size=40
* The free-space manager is unable to fulfill the request
* The block is allocated from file allocation and should be aligned
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Initialize the new field "allow_eoa_shrink_only" for user data.
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_align_fs(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
@@ -4047,12 +4007,10 @@ test_mf_align_fs(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
char filename[FILENAME_LEN]; /* Filename to use */
h5_stat_size_t file_size;
H5F_t *f = NULL; /* Internal file object pointer */
- H5FD_mem_t type;
H5MF_free_section_t *sect_node = NULL;
haddr_t addr;
- frspace_state_t state;
- H5MF_sect_ud_t udata;
- htri_t was_extended;
+ H5FS_stat_t state;
+ htri_t was_extended;
hsize_t alignment=0, tmp=0, mis_align=0;
hbool_t have_alloc_vfd; /* Whether VFD used has an 'alloc' callback */
@@ -4081,58 +4039,51 @@ test_mf_align_fs(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
-
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
- TEST_ERROR
-
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- sect_node = H5MF_sect_simple_new((haddr_t)alignment, (hsize_t)TEST_BLOCK_SIZE50);
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
+ TEST_ERROR
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ /* Create section A */
+ sect_node = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)alignment, (hsize_t)TBLOCK_SIZE50);
/* Add section A to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE50;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE50;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Allocate a block of 50 */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* Verify that the allocated block is section A in free-space */
- if (addr != (haddr_t)alignment) TEST_ERROR
- if (addr % alignment) TEST_ERROR
+ if(addr != (haddr_t)alignment) TEST_ERROR
+ if(addr % alignment) TEST_ERROR
- state.tot_space -= TEST_BLOCK_SIZE50;
+ state.tot_space -= TBLOCK_SIZE50;
state.tot_sect_count -= 1;
state.serial_sect_count -= 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the block to free-space */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)TEST_BLOCK_SIZE50);
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)TBLOCK_SIZE50);
- state.tot_space += TEST_BLOCK_SIZE50;
+ state.tot_space += TBLOCK_SIZE50;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
if(H5Fclose(file) < 0)
@@ -4151,70 +4102,63 @@ test_mf_align_fs(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
+ TEST_ERROR
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
- TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
- TEST_ERROR
-
- sect_node = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE8000);
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ /* Create section A */
+ sect_node = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE8000);
/* Add section A to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE8000;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE8000;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Allocate a block of 600 */
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE600);
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE600);
/* Verify that the allocated block is aligned */
if (addr % alignment) TEST_ERROR
/* should have 1 more section in free-space */
- state.tot_space -= TEST_BLOCK_SIZE600;
+ state.tot_space -= TBLOCK_SIZE600;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* try to extend the block */
- was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, type, (haddr_t)addr, (hsize_t)TEST_BLOCK_SIZE600, (hsize_t)TEST_BLOCK_SIZE200);
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_SUPER, (haddr_t)addr, (hsize_t)TBLOCK_SIZE600, (hsize_t)TBLOCK_SIZE200);
- if (was_extended <=0) TEST_ERROR
+ if(was_extended <=0) TEST_ERROR
/* space should be decreased by 200, # of sections remain the same */
- state.tot_space -= TEST_BLOCK_SIZE200;
+ state.tot_space -= TBLOCK_SIZE200;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/* Free the block to free-space manager */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TEST_BLOCK_SIZE600+TEST_BLOCK_SIZE200));
+ H5MF_xfree(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, addr, (hsize_t)(TBLOCK_SIZE600+TBLOCK_SIZE200));
/* only 1 section in free-space because of merging */
- state.tot_space += (TEST_BLOCK_SIZE600+TEST_BLOCK_SIZE200);
+ state.tot_space += (TBLOCK_SIZE600+TBLOCK_SIZE200);
state.tot_sect_count = 1;
state.serial_sect_count = 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
if(H5Fclose(file) < 0)
@@ -4241,45 +4185,38 @@ test_mf_align_fs(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- type = H5FD_MEM_SUPER;
-
- if(H5MF_alloc_start(f, H5AC_ind_read_dxpl_id, type) < 0)
- TEST_ERROR
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_start_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)H5FD_MEM_SUPER) < 0)
+ FAIL_STACK_ERROR
- if (f->shared->fs_state[type] != H5F_FS_STATE_OPEN)
+ if(f->shared->fs_state[H5FD_MEM_SUPER] != H5F_FS_STATE_OPEN)
TEST_ERROR
- if (f->shared->fs_man[type]->client != H5FS_CLIENT_FILE_ID)
+ if(f->shared->fs_man[H5FD_MEM_SUPER]->client != H5FS_CLIENT_FILE_ID)
TEST_ERROR
- sect_node = H5MF_sect_simple_new((haddr_t)TEST_BLOCK_ADDR70, (hsize_t)TEST_BLOCK_SIZE700);
-
- /* Construct user data for callbacks */
- udata.f = f;
- udata.dxpl_id = H5AC_ind_read_dxpl_id;
- udata.alloc_type = type;
- udata.allow_sect_absorb = TRUE;
- udata.allow_eoa_shrink_only = FALSE;
+ /* Create section A */
+ sect_node = H5MF_sect_new(H5MF_FSPACE_SECT_SIMPLE, (haddr_t)TBLOCK_ADDR70, (hsize_t)TBLOCK_SIZE700);
/* Add section A to free-space manager */
- if (H5FS_sect_add(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type], (H5FS_section_info_t *)sect_node, H5FS_ADD_RETURNED_SPACE, &udata))
- FAIL_STACK_ERROR
+ if(H5MF_add_sect(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, f->shared->fs_man[H5FD_MEM_SUPER], sect_node))
+ FAIL_STACK_ERROR
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE700;
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ state.tot_space += TBLOCK_SIZE700;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
/*
* Allocate a block of 40
* Since free-space manager cannot fulfull the request because of alignment,
* the block is obtained from file allocation
*/
- addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)(TEST_BLOCK_SIZE40));
+ addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)(TBLOCK_SIZE40));
/* Verify that the allocated block is aligned */
- if (addr % alignment)
+ if(addr % alignment)
TEST_ERROR
/* verify that the allocated block is from file allocation, not section A in free-space */
@@ -4296,7 +4233,7 @@ test_mf_align_fs(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
}
/* free-space info should be the same */
- if(check_stats(f, f->shared->fs_man[type], &state))
+ if(check_stats(f, f->shared->fs_man[H5FD_MEM_SUPER], &state))
TEST_ERROR
if(H5Fclose(file) < 0)
@@ -4319,6 +4256,7 @@ error:
} /* test_mf_align_fs() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks allocated from the aggregator are aligned
*
* Alignment = 1024 aggr->alloc_size = 2048
@@ -4413,6 +4351,7 @@ error:
* Fragment from alignment of aggregator allocation is freed to free-space:[12368, 4016]
* There is space of 2018 left in meta_aggr
* EOA is at 20372
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_align_alloc1(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
@@ -4424,7 +4363,7 @@ test_mf_align_alloc1(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5FD_mem_t type;
haddr_t addr1, addr2, addr3, addr4;
- frspace_state_t state;
+ H5FS_stat_t state;
haddr_t ma_addr=HADDR_UNDEF;
hsize_t ma_size=0, mis_align=0;
hsize_t alignment=0, tmp=0;
@@ -4467,19 +4406,19 @@ test_mf_align_alloc1(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
FAIL_STACK_ERROR
/* calculate fragment for alignment of block 30 */
- if ((tmp = (hsize_t)file_size % alignment))
+ if((tmp = (hsize_t)file_size % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 30 from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
- if (addr1 % alignment) TEST_ERROR
+ if(addr1 % alignment) TEST_ERROR
/* fragment for alignment of block 30 is freed to free-space */
- HDmemset(&state, 0, sizeof(frspace_state_t));
- if (mis_align) {
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ if(mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -4487,22 +4426,22 @@ test_mf_align_alloc1(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1 + TEST_BLOCK_SIZE30) != ma_addr)
+ if ((addr1 + TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* calculate fragment for alignment of block 50 */
mis_align = 0;
- if ((tmp = ma_addr % alignment))
+ if((tmp = ma_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 50 from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* Verify that the allocated block is aligned */
- if (addr2 % alignment) TEST_ERROR
+ if(addr2 % alignment) TEST_ERROR
/* fragment for alignment of block 50 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -4510,7 +4449,7 @@ test_mf_align_alloc1(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr2 + TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr2 + TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
/* calculate fragment for alignment of block 80 */
@@ -4518,13 +4457,13 @@ test_mf_align_alloc1(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
if ((tmp = ma_addr % alignment))
mis_align = alignment - tmp;
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE80);
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE80);
/* Verify that the allocated block is aligned */
- if (addr3 % alignment) TEST_ERROR
+ if(addr3 % alignment) TEST_ERROR
/* fragment for alignment of block 80 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -4532,22 +4471,22 @@ test_mf_align_alloc1(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr3 + TEST_BLOCK_SIZE80) != ma_addr)
+ if((addr3 + TBLOCK_SIZE80) != ma_addr)
TEST_ERROR
/* calculate fragment for alignment of block 1970 */
mis_align = 0;
- if ((tmp = ma_addr % alignment))
+ if((tmp = ma_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 1970 from meta_aggr */
- addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1970);
+ addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1970);
/* Verify that the allocated block is aligned */
- if (addr4 % alignment) TEST_ERROR
+ if(addr4 % alignment) TEST_ERROR
/* fragment for alignment of block 1970 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -4555,17 +4494,17 @@ test_mf_align_alloc1(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr4 + TEST_BLOCK_SIZE1970) != ma_addr)
+ if((addr4 + TBLOCK_SIZE1970) != ma_addr)
TEST_ERROR
/* Verify total size of free space after all the allocations */
if(check_stats(f, f->shared->fs_man[type], &state))
TEST_ERROR
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE80);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE1970);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE80);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE1970);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -4587,6 +4526,7 @@ error:
} /* test_mf_align_alloc1() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks allocated from the aggregator are aligned
*
* Alignment = 1024 aggr->alloc_size = 2048
@@ -4669,10 +4609,7 @@ error:
* The third block of 80 is allocated from the aggregator and should be aligned
* There is space of 1968 left in meta_aggr
* EOA is at 18432
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Changes due to the switch to H5FD_FLMAP_DICHOTOMY
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
@@ -4683,7 +4620,7 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
h5_stat_size_t file_size; /* File size */
H5FD_mem_t type, stype;
haddr_t addr1, addr2, addr3, saddr1;
- frspace_state_t state[H5FD_MEM_NTYPES];
+ H5FS_stat_t state[H5FD_MEM_NTYPES];
haddr_t ma_addr=HADDR_UNDEF, sdata_addr=HADDR_UNDEF;
hsize_t ma_size=0, sdata_size=0, mis_align=0;
hsize_t alignment=0, tmp=0;
@@ -4725,19 +4662,19 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
FAIL_STACK_ERROR
/* calculate fragment for alignment of block 30 */
- if ((tmp = (hsize_t)file_size % alignment))
+ if((tmp = (hsize_t)file_size % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 30 from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
- if (addr1 % alignment) TEST_ERROR
+ if(addr1 % alignment) TEST_ERROR
/* fragment for alignment of block 30 is freed to free-space */
- HDmemset(&state, 0, sizeof(frspace_state_t) * H5FD_MEM_NTYPES);
- if (mis_align) {
+ HDmemset(&state, 0, sizeof(H5FS_stat_t) * H5FD_MEM_NTYPES);
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
@@ -4745,21 +4682,21 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1 + TEST_BLOCK_SIZE30) != ma_addr)
+ if((addr1 + TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* fragment for alignment of block 50 is freed to free-space */
mis_align = 0;
- if ((tmp = ma_addr % alignment))
+ if((tmp = ma_addr % alignment))
mis_align = alignment - tmp;
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* Verify that the allocated block is aligned */
- if (addr2 % alignment) TEST_ERROR
+ if(addr2 % alignment) TEST_ERROR
/* fragment for alignment of block 50 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
@@ -4767,7 +4704,7 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr2 + TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr2 + TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
/*
@@ -4782,17 +4719,17 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
* block 30 is allocated from ma_addr
*/
mis_align = 0;
- if ((alignment == TEST_ALIGN1024) && (tmp = ((ma_addr + ma_size) % alignment)))
+ if((alignment == TEST_ALIGN1024) && (tmp = ((ma_addr + ma_size) % alignment)))
mis_align = alignment - tmp;
else if ((alignment == TEST_ALIGN4096) && (tmp = (ma_addr % alignment)))
mis_align = alignment - tmp;
/* Allocate a block of 30 from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* fragment for alignment of block 30 for sdata_aggr is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[stype].tot_space += mis_align;
state[stype].tot_sect_count += 1;
state[stype].serial_sect_count += 1;
@@ -4804,7 +4741,7 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if (sdata_addr != (saddr1 + TEST_BLOCK_SIZE30)) TEST_ERROR
+ if(sdata_addr != (saddr1 + TBLOCK_SIZE30)) TEST_ERROR
/*
* Calculate fragment for the allocation of block 80 from meta_aggr:
@@ -4816,19 +4753,19 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
* block 30 is allocated from ma_addr
*/
mis_align = 0;
- if ((alignment == TEST_ALIGN1024) && (tmp = (ma_addr % alignment)))
+ if((alignment == TEST_ALIGN1024) && (tmp = (ma_addr % alignment)))
mis_align = alignment - tmp;
else if ((alignment == TEST_ALIGN4096) && (tmp = ((sdata_addr + sdata_size) % alignment)))
mis_align = alignment - tmp;
/* Allocate a block of 80 from meta_aggr */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE80);
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE80);
/* Verify that the allocated block is aligned */
if (addr3 % alignment) TEST_ERROR
/* fragment for alignment of block 80 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
@@ -4836,7 +4773,7 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr3 + TEST_BLOCK_SIZE80) != ma_addr)
+ if((addr3 + TBLOCK_SIZE80) != ma_addr)
TEST_ERROR
/* Verify total size of free space after all the allocations */
@@ -4850,10 +4787,10 @@ test_mf_align_alloc2(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
TEST_ERROR
}
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE50);
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE80);
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE50);
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE80);
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE30);
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
@@ -4875,6 +4812,7 @@ error:
} /* test_mf_align_alloc2() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks allocated from the aggregator are aligned
*
* Alignment = 1024 aggr->alloc_size = 2048
@@ -5008,11 +4946,7 @@ error:
* The meta_aggr is updated to point to the new space
* The block of 1034 is allocated from the new block and should be aligned
* There is space of 1014 left in meta_aggr
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Changes due to the switch to H5FD_FLMAP_DICHOTOMY
- *
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_align_alloc3(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
@@ -5024,7 +4958,7 @@ test_mf_align_alloc3(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5FD_mem_t type, stype;
haddr_t addr1, addr2, addr3;
haddr_t saddr1, saddr2, saddr3;
- frspace_state_t state[H5FD_MEM_NTYPES];
+ H5FS_stat_t state[H5FD_MEM_NTYPES];
haddr_t ma_addr=HADDR_UNDEF, sdata_addr=HADDR_UNDEF;
hsize_t ma_size=0, sdata_size=0, mis_align=0;
hsize_t alignment=0, tmp=0;
@@ -5067,48 +5001,48 @@ test_mf_align_alloc3(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
FAIL_STACK_ERROR
/* calculate fragment for alignment of block 30 */
- if ((tmp = (hsize_t)file_size % alignment))
+ if((tmp = (hsize_t)file_size % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 30 from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
- if (addr1 % alignment) TEST_ERROR
+ if(addr1 % alignment) TEST_ERROR
/* fragment for alignment of block 30 is freed to free-space */
- HDmemset(&state, 0, sizeof(frspace_state_t) * H5FD_MEM_NTYPES);
- if (mis_align) {
+ HDmemset(&state, 0, sizeof(H5FS_stat_t) * H5FD_MEM_NTYPES);
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
}
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1 + TEST_BLOCK_SIZE30) != ma_addr)
+ if ((addr1 + TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* calculate fragment for alignment of block 50 */
mis_align = 0;
- if ((tmp = ma_addr % alignment))
+ if((tmp = ma_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 50 from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* Verify that the allocated block is aligned */
- if (addr2 % alignment) TEST_ERROR
+ if(addr2 % alignment) TEST_ERROR
/* fragment for alignment of block 50 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
}
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr2 + TEST_BLOCK_SIZE50) != ma_addr)
+ if((addr2 + TBLOCK_SIZE50) != ma_addr)
TEST_ERROR
/*
@@ -5123,84 +5057,84 @@ test_mf_align_alloc3(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
* block 30 is allocated from ma_addr
*/
mis_align = 0;
- if ((alignment == TEST_ALIGN1024) && (tmp = ((ma_addr + ma_size) % alignment)))
+ if((alignment == TEST_ALIGN1024) && (tmp = ((ma_addr + ma_size) % alignment)))
mis_align = alignment - tmp;
else if ((alignment == TEST_ALIGN4096) && (tmp = ma_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 30 from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
- if (saddr1 % alignment) TEST_ERROR
+ if(saddr1 % alignment) TEST_ERROR
/* fragment for alignment of block 30 for sdata_aggr is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[stype].tot_space += mis_align;
state[stype].tot_sect_count += 1;
state[stype].serial_sect_count += 1;
}
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if (sdata_addr != (saddr1+TEST_BLOCK_SIZE30)) TEST_ERROR
+ if(sdata_addr != (saddr1+TBLOCK_SIZE30)) TEST_ERROR
/* calculate fragment for alignment of block 50 in sdata_aggr */
mis_align = 0;
- if ((tmp = sdata_addr % alignment))
+ if((tmp = sdata_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 50 from sdata_aggr */
- saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* Verify that the allocated block is aligned */
- if (saddr2 % alignment) TEST_ERROR
+ if(saddr2 % alignment) TEST_ERROR
/* fragment for alignment of block 50 for sdata_aggr is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[stype].tot_space += mis_align;
state[stype].tot_sect_count += 1;
state[stype].serial_sect_count += 1;
}
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if (sdata_addr != (saddr2 + TEST_BLOCK_SIZE50)) TEST_ERROR
+ if(sdata_addr != (saddr2 + TBLOCK_SIZE50)) TEST_ERROR
/* calculate fragment for alignment of block 80 in sdata_aggr */
mis_align = 0;
- if ((tmp = sdata_addr % alignment))
+ if((tmp = sdata_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 80 from sdata_aggr */
- saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE80);
+ saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE80);
/* Verify that the allocated block is aligned */
- if (saddr3 % alignment) TEST_ERROR
+ if(saddr3 % alignment) TEST_ERROR
/* fragment for alignment of block 80 for sdata_aggr is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[stype].tot_space += mis_align;
state[stype].tot_sect_count += 1;
state[stype].serial_sect_count += 1;
}
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr3 + TEST_BLOCK_SIZE80) != sdata_addr)
+ if ((saddr3 + TBLOCK_SIZE80) != sdata_addr)
TEST_ERROR
/* calculate fragment for alignment of block 1034 */
mis_align = 0;
- if ((tmp = sdata_addr % alignment))
+ if((tmp = sdata_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 1034 for meta_aggr */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1034);
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1034);
/* Verify that the allocated block is aligned */
- if (addr3 % alignment) TEST_ERROR
+ if(addr3 % alignment) TEST_ERROR
/* fragment for alignment of block 1034 for meta_aggr is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
@@ -5208,11 +5142,11 @@ test_mf_align_alloc3(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
/* calculate unused space in meta_aggr that is freed to free-space after block 1034 */
mis_align = 0;
- if ((alignment == TEST_ALIGN1024) && (tmp = (ma_addr % alignment)))
+ if((alignment == TEST_ALIGN1024) && (tmp = (ma_addr % alignment)))
mis_align = alignment - tmp;
/* fragment for unused space in meta_aggr after block 1034 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
@@ -5220,7 +5154,7 @@ test_mf_align_alloc3(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr3 + TEST_BLOCK_SIZE1034) != ma_addr)
+ if((addr3 + TBLOCK_SIZE1034) != ma_addr)
TEST_ERROR
/* Verify total size of free space after all allocations */
@@ -5255,6 +5189,7 @@ error:
/*
+ *-------------------------------------------------------------------------
* To verify that blocks allocated from the aggregator are aligned
*
* Alignment = 4096 aggr->alloc_size = 2048
@@ -5317,6 +5252,7 @@ error:
* Fragment from alignment of aggregator allocation is freed to free-space:[10250, 2038]
* There is space of 2023 left in meta_aggr
*
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_align_alloc4(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
@@ -5327,7 +5263,7 @@ test_mf_align_alloc4(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
h5_stat_size_t file_size;
H5FD_mem_t type;
haddr_t addr1, addr2, addr3;
- frspace_state_t state;
+ H5FS_stat_t state;
haddr_t ma_addr=HADDR_UNDEF;
hsize_t ma_size=0, saved_ma_size=0;
hsize_t alignment=0, mis_align=0, tmp=0;
@@ -5370,19 +5306,19 @@ test_mf_align_alloc4(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
TEST_ERROR
/* calculate fragment for alignment of block 30 */
- if ((tmp = (hsize_t)file_size % alignment))
+ if((tmp = (hsize_t)file_size % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 30 from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
- if (addr1 % alignment) TEST_ERROR
+ if(addr1 % alignment) TEST_ERROR
/* fragment for alignment of block 30 is freed to free-space */
- HDmemset(&state, 0, sizeof(frspace_state_t));
- if (mis_align) {
+ HDmemset(&state, 0, sizeof(H5FS_stat_t));
+ if(mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -5390,21 +5326,21 @@ test_mf_align_alloc4(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
saved_ma_size = ma_size;
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr) TEST_ERROR
+ if((addr1+TBLOCK_SIZE30) != ma_addr) TEST_ERROR
/* calculate fragment for alignment of block 2058 */
mis_align = 0;
- if ((tmp = ma_addr % alignment))
+ if((tmp = ma_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 2058 from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2058);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2058);
/* Verify that the allocated block is aligned */
- if (addr2 % alignment) TEST_ERROR
+ if(addr2 % alignment) TEST_ERROR
/* fragment for alignment of block 2058 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
@@ -5412,28 +5348,28 @@ test_mf_align_alloc4(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr2 + TEST_BLOCK_SIZE2058) != ma_addr) TEST_ERROR
+ if((addr2 + TBLOCK_SIZE2058) != ma_addr) TEST_ERROR
/* meta_aggr->size remains the same */
- if (ma_size != saved_ma_size) TEST_ERROR
+ if(ma_size != saved_ma_size) TEST_ERROR
/* calculate fragment for alignment of block 5 from meta_aggr */
mis_align = 0;
- if ((tmp = ma_addr % alignment))
+ if((tmp = ma_addr % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 5 from meta_aggr */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE5);
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5);
/* fragment for alignment of block 5 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state.tot_space += mis_align;
state.tot_sect_count += 1;
state.serial_sect_count += 1;
}
/* Verify that the allocated block is aligned */
- if (addr3 % alignment) TEST_ERROR
+ if(addr3 % alignment) TEST_ERROR
/* Verify total size of free space after all allocations */
if(f->shared->fs_man[type]) {
@@ -5461,6 +5397,7 @@ error:
} /* test_mf_align_alloc4() */
/*
+ *-------------------------------------------------------------------------
* To verify that blocks allocated from the aggregator are aligned
*
* Alignment = 1024 aggr->alloc_size = 2048
@@ -5525,10 +5462,7 @@ error:
* sdata_aggr is reset to 0
* EOA is 14346
* meta_aggr and sdata_aggr are all 0
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Changes due to the switch to H5FD_FLMAP_DICHOTOMY
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_align_alloc5(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
@@ -5539,7 +5473,7 @@ test_mf_align_alloc5(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
h5_stat_size_t file_size;
H5FD_mem_t type, stype;
haddr_t addr1, addr2, saddr1;
- frspace_state_t state[H5FD_MEM_NTYPES];
+ H5FS_stat_t state[H5FD_MEM_NTYPES];
haddr_t ma_addr=HADDR_UNDEF, new_ma_addr=HADDR_UNDEF;
haddr_t sdata_addr=HADDR_UNDEF, new_sdata_addr=HADDR_UNDEF;
hsize_t ma_size=0, new_ma_size=0, sdata_size=0, new_sdata_size=0;
@@ -5583,23 +5517,23 @@ test_mf_align_alloc5(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
TEST_ERROR
/* calculate fragment for alignment of block 30 */
- if ((tmp = (hsize_t)file_size % alignment))
+ if((tmp = (hsize_t)file_size % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 30 from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
- if (addr1 % alignment) TEST_ERROR
+ if(addr1 % alignment) TEST_ERROR
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1 + TEST_BLOCK_SIZE30) != ma_addr) TEST_ERROR
+ if((addr1 + TBLOCK_SIZE30) != ma_addr) TEST_ERROR
/* fragment for alignment of block 30 is freed to free-space */
- HDmemset(&state, 0, sizeof(frspace_state_t) * H5FD_MEM_NTYPES);
- if (mis_align) {
+ HDmemset(&state, 0, sizeof(H5FS_stat_t) * H5FD_MEM_NTYPES);
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
@@ -5607,39 +5541,39 @@ test_mf_align_alloc5(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
/* calculate fragment for alignment of block 30 from sdata_aggr */
mis_align = 0;
- if ((tmp = (ma_addr + ma_size) % alignment))
+ if((tmp = (ma_addr + ma_size) % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 30 from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
- if (saddr1 % alignment) TEST_ERROR
+ if(saddr1 % alignment) TEST_ERROR
/* fragment of alignment for block 30 in sdata_aggr is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[stype].tot_space += mis_align;
state[stype].tot_sect_count += 1;
state[stype].serial_sect_count += 1;
}
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if ((saddr1+TEST_BLOCK_SIZE30) != sdata_addr) TEST_ERROR
+ if((saddr1+TBLOCK_SIZE30) != sdata_addr) TEST_ERROR
/* calculate fragment for alignment of block 2058 from meta_aggr */
mis_align = 0;
- if ((tmp = (sdata_addr + sdata_size) % alignment))
+ if((tmp = (sdata_addr + sdata_size) % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 2058 from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2058);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2058);
/* Verify that the allocated block is aligned */
if (addr2 % alignment) TEST_ERROR
/* fragment for alignment of block 2058 is freed to free-space */
- if (mis_align) {
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
@@ -5687,6 +5621,7 @@ error:
/*
+ *-------------------------------------------------------------------------
* To verify that blocks allocated from the aggregator are aligned
*
* Alignment = 1024 aggr->alloc_size = 2048
@@ -5792,10 +5727,7 @@ error:
* Fragment from alignment of file allocation is freed to free-space:[16464, 4016]
* EOA is at 22538
* meta_aggr is unchanged
- *
- * Modifications:
- * Vailin Choi; July 2012
- * Changes due to the switch to H5FD_FLMAP_DICHOTOMY
+ *-------------------------------------------------------------------------
*/
static unsigned
test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
@@ -5807,7 +5739,7 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
H5FD_mem_t type, stype;
haddr_t addr1, addr2;
haddr_t saddr1, saddr2, saddr3;
- frspace_state_t state[H5FD_MEM_NTYPES];
+ H5FS_stat_t state[H5FD_MEM_NTYPES];
haddr_t ma_addr=HADDR_UNDEF, new_ma_addr=HADDR_UNDEF, sdata_addr=HADDR_UNDEF;
hsize_t ma_size=0, new_ma_size=0, sdata_size=0;
hsize_t alignment=0, mis_align=0, tmp=0;
@@ -5849,26 +5781,26 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
TEST_ERROR
/* calculate fragment for alignment of block 30 */
- if ((tmp = (hsize_t)file_size % alignment))
+ if((tmp = (hsize_t)file_size % alignment))
mis_align = alignment - tmp;
/* Allocate a block of 30 from meta_aggr */
type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
if (addr1 % alignment) TEST_ERROR
/* fragment for alignment of block 30 in meta_aggr is freed to free-space */
- HDmemset(&state, 0, sizeof(frspace_state_t) * H5FD_MEM_NTYPES);
- if (mis_align) {
+ HDmemset(&state, 0, sizeof(H5FS_stat_t) * H5FD_MEM_NTYPES);
+ if(mis_align) {
state[type].tot_space += mis_align;
state[type].tot_sect_count += 1;
state[type].serial_sect_count += 1;
}
H5MF_aggr_query(f, &(f->shared->meta_aggr), &ma_addr, &ma_size);
- if ((addr1+TEST_BLOCK_SIZE30) != ma_addr)
+ if ((addr1+TBLOCK_SIZE30) != ma_addr)
TEST_ERROR
/* calculate fragment for alignment of block 30 in sdata_aggr */
@@ -5878,7 +5810,7 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
/* Allocate a block of 30 from sdata_aggr */
stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
/* Verify that the allocated block is aligned */
if (saddr1 % alignment) TEST_ERROR
@@ -5891,7 +5823,7 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
}
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if (sdata_addr != (saddr1+TEST_BLOCK_SIZE30)) TEST_ERROR
+ if (sdata_addr != (saddr1+TBLOCK_SIZE30)) TEST_ERROR
/* calculate fragment for alignment of block 50 in sdata_aggr */
mis_align = 0;
@@ -5899,7 +5831,7 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
mis_align = alignment - tmp;
/* Allocate a block of 50 from sdata_aggr */
- saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
/* Verify that the allocated block is aligned */
if (saddr2 % alignment) TEST_ERROR
@@ -5912,7 +5844,7 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
}
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if (sdata_addr != (saddr2+TEST_BLOCK_SIZE50)) TEST_ERROR
+ if (sdata_addr != (saddr2+TBLOCK_SIZE50)) TEST_ERROR
/* calculate fragment for alignment of block 80 in sdata_aggr */
mis_align = 0;
@@ -5920,7 +5852,7 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
mis_align = alignment - tmp;
/* Allocate a block of 80 from sdata_aggr */
- saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE80);
+ saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE80);
/* Verify that the allocated block is aligned */
if (saddr3 % alignment) TEST_ERROR
@@ -5933,7 +5865,7 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
}
H5MF_aggr_query(f, &(f->shared->sdata_aggr), &sdata_addr, &sdata_size);
- if (sdata_addr != (saddr3+TEST_BLOCK_SIZE80)) TEST_ERROR
+ if (sdata_addr != (saddr3+TBLOCK_SIZE80)) TEST_ERROR
/* calculate fragment for alignment of block 2058 */
/* remaining space in sdata_aggr is freed and shrunk */
@@ -5942,7 +5874,7 @@ test_mf_align_alloc6(const char *env_h5_drvr, hid_t fapl, hid_t new_fapl)
mis_align = alignment - tmp;
/* Allocate a block of 2058 from meta_aggr */
- addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2058);
+ addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2058);
/* Verify that the allocated block is aligned */
if (addr2 % alignment) TEST_ERROR
@@ -5993,7 +5925,6 @@ error:
return(1);
} /* test_mf_align_alloc6() */
-
/*
* Test a bug that occurs when an allocator with zero size left and an unaligned
* endpoint is extended to allocate an aligned object
@@ -6156,334 +6087,111 @@ error:
return(1);
} /* test_mf_bug1() */
-
/*
- * Verify that the file's free-space manager persists where there are free sections in the manager
+ * Verify that the file's free-space manager(s) are persistent for a split-file
+ *-------------------------------------------------------------------------
*/
static unsigned
-test_mf_fs_persist(hid_t fapl_new, hid_t fcpl)
+test_mf_fs_persist_split(void)
{
- hid_t file = -1; /* File ID */
- char filename[FILENAME_LEN]; /* Filename to use */
- H5F_t *f = NULL; /* Internal file object pointer */
- H5FD_mem_t type; /* File allocation type */
- H5FS_stat_t fs_stat; /* Information for free-space manager */
- haddr_t addr1, addr2, addr3, addr4, addr5, addr6; /* File address for H5FD_MEM_SUPER */
- haddr_t tmp_addr; /* Temporary variable for address */
-
- TESTING("file's free-space manager is persistent");
-
- /* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[0], fapl_new, filename, sizeof(filename));
-
- /* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_new)) < 0)
- FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
-
- /* Allocate 6 blocks */
- type = H5FD_MEM_SUPER;
- if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr5 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE5)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr6 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE6)))
- FAIL_STACK_ERROR
-
- /* Put block #1, #3, #5 to H5FD_MEM_SUPER free-space manager */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr5, (hsize_t)TEST_BLOCK_SIZE5) < 0)
- FAIL_STACK_ERROR
-
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
-
- /* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
- FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
-
- /* Verify that H5FD_MEM_SUPER free-space manager is there */
- if(!H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
-
- /* Start up H5FD_MEM_SUPER free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
- FAIL_STACK_ERROR
-
- /* Get info for free-space manager */
- if(H5FS_stat_info(f, f->shared->fs_man[type], &fs_stat) < 0)
- FAIL_STACK_ERROR
-
- /* Verify free-space info */
- if(fs_stat.tot_space < (TEST_BLOCK_SIZE1+TEST_BLOCK_SIZE3+TEST_BLOCK_SIZE5))
- TEST_ERROR
-
- if(fs_stat.serial_sect_count < 3)
- TEST_ERROR
-
- /* Retrieve block #3 from H5FD_MEM_SUPER free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(tmp_addr != addr3)
- TEST_ERROR
-
- /* Retrieve block #1 from H5FD_MEM_SUPER free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(tmp_addr != addr1)
- TEST_ERROR
-
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
-
- /* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
- FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
+ hid_t file = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list ID */
+ hid_t fapl = -1; /* File access property list ID */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5FD_mem_t type, stype, btype; /* File allocation type */
+ H5FS_stat_t fs_stat; /* Information for free-space manager */
+ haddr_t addr1, addr2, addr3, addr4; /* File address for H5FD_MEM_SUPER */
+ haddr_t saddr1, saddr2, saddr3, saddr4; /* File address for H5FD_MEM_DRAW */
+ haddr_t baddr5, baddr6, baddr7, baddr8; /* File address for H5FD_MEM_BTREE */
+ haddr_t tmp_addr; /* temporary variable for address */
+
+ TESTING("File's free-space managers are persistent for split-file");
+
+ /* for now, we don't support persistant free space managers
+ * with the split file driver.
+ */
+ SKIPPED();
+ HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n");
+ return 0; /* <========== note return */
- /* Verify that H5FD_MEM_SUPER free-space manager is there */
- if(!H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
+ /* File creation property list template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
- /* Retrieve block #5 from H5FD_MEM_SUPER free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE5)))
- FAIL_STACK_ERROR
- if(tmp_addr != addr5)
- TEST_ERROR
+ /* for now, we don't support persistant free space managers
+ * with the split file driver.
+ */
+ SKIPPED();
+ HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n");
+ return 0; /* <========== note return */
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
+ /* File creation property list template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
- PASSED()
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR
- return(0);
+ /* Set up split driver */
+ if(H5Pset_fapl_split(fapl, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT)<0)
+ FAIL_STACK_ERROR
-error:
- H5E_BEGIN_TRY {
- H5Fclose(file);
- } H5E_END_TRY;
- return(1);
-} /* test_mf_fs_persist() */
-
-/*
- * Verify that the free-space manager goes away
- */
-static unsigned
-test_mf_fs_gone(hid_t fapl_new, hid_t fcpl)
-{
- hid_t file = -1; /* File ID */
- char filename[FILENAME_LEN]; /* Filename to use */
- H5F_t *f = NULL; /* Internal file object pointer */
- H5FD_mem_t type; /* File allocation type */
- H5FS_stat_t fs_stat; /* Information for free-space manager */
- haddr_t addr1, addr2, addr3, addr4; /* File address for H5FD_MEM_SUPER */
+ /* File creation property list template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
- TESTING("file's free-space manager is going away");
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR
/* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[0], fapl_new, filename, sizeof(filename));
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_new)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
if(NULL == (f = (H5F_t *)H5I_object(file)))
FAIL_STACK_ERROR
- /* Allocate 4 blocks */
+ /* Allocate 4 blocks of type H5FD_MEM_SUPER */
type = H5FD_MEM_SUPER;
- if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
-
- /* Put block #1, #3 to H5FD_MEM_SUPER free-space manager */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
-
- /* Retrieve block #1, #3 from H5FD_MEM_SUPER free-space manager */
- if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
-
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
-
- /* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
-
- /* Verify that the H5FD_MEM_SUPER free-space manager is not there */
- if(H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
-
- /* Put block #3 to H5FD_MEM_SUPER free-space manager */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
-
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
-
- /* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
+ if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
FAIL_STACK_ERROR
-
- /* Verify that H5FD_MEM_SUPER free-space manager is there */
- if(!H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
-
- /* Start up H5FD_MEM_SUPER free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
FAIL_STACK_ERROR
- /* Get info for H5FD_MEM_SUPER free-space manager */
- if(H5FS_stat_info(f, f->shared->fs_man[type], &fs_stat) < 0)
+ /* Put block #1, #3 into H5FD_MEM_SUPER free-space manager */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE1) < 0)
FAIL_STACK_ERROR
-
- /* Verify free-space info */
- if(!H5F_addr_defined(fs_stat.addr) || !H5F_addr_defined(fs_stat.sect_addr))
- TEST_ERROR
- if(fs_stat.tot_space < TEST_BLOCK_SIZE3)
- TEST_ERROR
-
- /* Put block #4 to H5FD_MEM_SUPER free-space manager */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr4, (hsize_t)TEST_BLOCK_SIZE4) < 0)
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE3) < 0)
FAIL_STACK_ERROR
- /* The H5FD_MEM_SUPER free-space manager will go away at H5MF_close() */
- if(H5Fclose(file) < 0)
+ /* Allocate 4 blocks of type H5FD_MEM_DRAW */
+ stype = H5FD_MEM_DRAW;
+ if(HADDR_UNDEF == (saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
FAIL_STACK_ERROR
-
- /* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if(HADDR_UNDEF == (saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
+ if(HADDR_UNDEF == (saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
FAIL_STACK_ERROR
-
- /* Verify that the H5FD_MEM_SUPER free-space manager is not there */
- if(H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
-
- if(H5Fclose(file) < 0)
+ if(HADDR_UNDEF == (saddr4 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
FAIL_STACK_ERROR
- PASSED()
-
- return(0);
-
-error:
- H5E_BEGIN_TRY {
- H5Fclose(file);
- } H5E_END_TRY;
- return(1);
-} /* test_mf_fs_gone() */
-
-
-/*
- * Verify that the file's free-space manager(s) are persistent for a split-file
- */
-static unsigned
-test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
-{
- hid_t file = -1; /* File ID */
- char filename[FILENAME_LEN]; /* Filename to use */
- H5F_t *f = NULL; /* Internal file object pointer */
- H5FD_mem_t type, stype, btype; /* File allocation type */
- H5FS_stat_t fs_stat; /* Information for free-space manager */
- haddr_t addr1, addr2, addr3, addr4; /* File address for H5FD_MEM_SUPER */
- haddr_t saddr1, saddr2, saddr3, saddr4; /* File address for H5FD_MEM_DRAW */
- haddr_t baddr5, baddr6, baddr7, baddr8; /* File address for H5FD_MEM_BTREE */
- haddr_t tmp_addr; /* temporary variable for address */
-
- TESTING("file's free-space managers are persistent for split-file");
-
- /* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[0], fapl_new, filename, sizeof(filename));
-
- /* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_new)) < 0)
+ /* Put block #1, #3 into H5FD_MEM_DRAW free-space manager */
+ if(H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE1) < 0)
FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
+ if(H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr3, (hsize_t)TBLOCK_SIZE3) < 0)
FAIL_STACK_ERROR
- /* Allocate 4 blocks of type H5FD_MEM_SUPER */
- type = H5FD_MEM_SUPER;
- if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
-
- /* Put block #1, #3 into H5FD_MEM_SUPER free-space manager */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
-
- /* Allocate 4 blocks of type H5FD_MEM_DRAW */
- stype = H5FD_MEM_DRAW;
- if(HADDR_UNDEF == (saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (saddr4 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
-
- /* Put block #1, #3 into H5FD_MEM_DRAW free-space manager */
- if(H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
-
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
/* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -6495,7 +6203,7 @@ test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
TEST_ERROR
/* Start up H5FD_MEM_SUPER free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)type) < 0)
FAIL_STACK_ERROR
/* Get free-space info */
@@ -6503,23 +6211,24 @@ test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < (TEST_BLOCK_SIZE1+TEST_BLOCK_SIZE3))
- TEST_ERROR
+ if(fs_stat.tot_space < (TBLOCK_SIZE1+TBLOCK_SIZE3))
+ TEST_ERROR
if(fs_stat.serial_sect_count < 2)
- TEST_ERROR
+ TEST_ERROR
- /* Retrieve block #1 from H5FD_MEM_SUPER free-space manager; block #2 still in free-space */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
+ /* Retrieve block #1 from H5FD_MEM_SUPER free-space manager; block #3 still in free-space */
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
if(tmp_addr != addr1)
- TEST_ERROR
+ TEST_ERROR
/* Verify that the free-space manager for H5FD_MEM_DRAW is there */
if(!H5F_addr_defined(f->shared->fs_addr[stype]))
TEST_ERROR
+
/* Start up H5FD_MEM_DRAW free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, stype) < 0)
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)stype) < 0)
FAIL_STACK_ERROR
/* Get free-space info */
@@ -6527,47 +6236,47 @@ test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < (TEST_BLOCK_SIZE1+TEST_BLOCK_SIZE3))
- TEST_ERROR
+ if(fs_stat.tot_space < (TBLOCK_SIZE1+TBLOCK_SIZE3))
+ TEST_ERROR
if(fs_stat.serial_sect_count < 2)
- TEST_ERROR
+ TEST_ERROR
/* Retrieve blocks #1 from H5FD_MEM_DRAW free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
if(tmp_addr != saddr1)
- TEST_ERROR
+ TEST_ERROR
/* Retrieve blocks #3 from H5FD_MEM_DRAW free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
if(tmp_addr != saddr3)
- TEST_ERROR
+ TEST_ERROR
/* H5FD_MEM_DRAW free-space manager is going away at closing */
/* works for this one because the freeing of sect_addr is to H5FD_MEM_SUPER fs, not against itself */
/* Allocate 4 blocks of type H5FD_MEM_BTREE */
btype = H5FD_MEM_BTREE;
- if(HADDR_UNDEF == (baddr5 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE5)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (baddr6 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE6)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (baddr7 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE7)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (baddr8 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE8)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (baddr5 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (baddr6 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE6)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (baddr7 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE7)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (baddr8 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE8)))
+ FAIL_STACK_ERROR
/* Put block #5 & #7 into H5FD_MEM_BTREE free-space manager */
- if(H5MF_xfree(f, btype, H5AC_ind_read_dxpl_id, baddr5, (hsize_t)TEST_BLOCK_SIZE5) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, btype, H5AC_ind_read_dxpl_id, baddr7, (hsize_t)TEST_BLOCK_SIZE7) < 0)
- FAIL_STACK_ERROR
+ if(H5MF_xfree(f, btype, H5AC_ind_read_dxpl_id, baddr5, (hsize_t)TBLOCK_SIZE5) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, btype, H5AC_ind_read_dxpl_id, baddr7, (hsize_t)TBLOCK_SIZE7) < 0)
+ FAIL_STACK_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
/* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -6583,7 +6292,7 @@ test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
TEST_ERROR
/* Start up H5FD_MEM_SUPER free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)type) < 0)
FAIL_STACK_ERROR
/* Get free-space info */
@@ -6591,27 +6300,27 @@ test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < (TEST_BLOCK_SIZE3+TEST_BLOCK_SIZE5+TEST_BLOCK_SIZE7))
- TEST_ERROR
+ if(fs_stat.tot_space < (TBLOCK_SIZE3+TBLOCK_SIZE5+TBLOCK_SIZE7))
+ TEST_ERROR
/* Retrieve block #3 from H5FD_MEM_SUPER free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
if(tmp_addr != addr3)
- TEST_ERROR
+ TEST_ERROR
/* Retrieve block #7 from H5FD_MEM_BTREE free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE7)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE7)))
+ FAIL_STACK_ERROR
if(tmp_addr != baddr7)
- TEST_ERROR
+ TEST_ERROR
/* There should still be block #5 of H5FD_MEM_BTREE in H5FD_MEM_BTREE free-space manager */
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
/* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -6620,10 +6329,10 @@ test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
/* Verify that the H5FD_MEM_SUPER free-space manager is there */
if(!H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
+ TEST_ERROR
/* Start up H5FD_MEM_SUPER free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)type) < 0)
FAIL_STACK_ERROR
/* Get free-space info */
@@ -6631,11 +6340,16 @@ test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < TEST_BLOCK_SIZE5)
- TEST_ERROR
+ if(fs_stat.tot_space < TBLOCK_SIZE5)
+ TEST_ERROR
+ /* Closing */
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
PASSED()
@@ -6643,37 +6357,113 @@ test_mf_fs_split(hid_t fapl_new, hid_t fcpl)
error:
H5E_BEGIN_TRY {
- H5Fclose(file);
+ H5Fclose(file);
+ H5Pclose(fcpl);
+ H5Pclose(fapl);
} H5E_END_TRY;
return(1);
-} /* test_mf_fs_split() */
+} /* test_mf_fs_persist_split() */
+
+#define MULTI_SETUP(memb_map, memb_fapl, memb_name, memb_addr, sv) { \
+ H5FD_mem_t mt; \
+ HDmemset(memb_map, 0, sizeof memb_map); \
+ HDmemset(memb_fapl, 0, sizeof memb_fapl); \
+ HDmemset(memb_name, 0, sizeof memb_name); \
+ HDmemset(memb_addr, 0, sizeof memb_addr); \
+ HDmemset(sv, 0, sizeof sv); \
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt)) { \
+ memb_map[mt] = H5FD_MEM_SUPER; \
+ memb_fapl[mt] = H5P_DEFAULT; \
+ } \
+ memb_map[H5FD_MEM_BTREE] = H5FD_MEM_BTREE; \
+ memb_map[H5FD_MEM_DRAW] = H5FD_MEM_DRAW; \
+ memb_map[H5FD_MEM_GHEAP] = H5FD_MEM_GHEAP; \
+ memb_map[H5FD_MEM_LHEAP] = H5FD_MEM_LHEAP; \
+ sprintf(sv[H5FD_MEM_SUPER], "%%s-%c.h5", 's'); \
+ memb_name[H5FD_MEM_SUPER] = sv[H5FD_MEM_SUPER]; \
+ memb_addr[H5FD_MEM_SUPER] = 0; \
+ sprintf(sv[H5FD_MEM_BTREE], "%%s-%c.h5", 'b'); \
+ memb_name[H5FD_MEM_BTREE] = sv[H5FD_MEM_BTREE]; \
+ memb_addr[H5FD_MEM_BTREE] = HADDR_MAX/6; \
+ sprintf(sv[H5FD_MEM_DRAW], "%%s-%c.h5", 'r'); \
+ memb_name[H5FD_MEM_DRAW] = sv[H5FD_MEM_DRAW]; \
+ memb_addr[H5FD_MEM_DRAW] = HADDR_MAX/3; \
+ sprintf(sv[H5FD_MEM_GHEAP], "%%s-%c.h5", 'g'); \
+ memb_name[H5FD_MEM_GHEAP] = sv[H5FD_MEM_GHEAP]; \
+ memb_addr[H5FD_MEM_GHEAP] = HADDR_MAX/2; \
+ sprintf(sv[H5FD_MEM_LHEAP], "%%s-%c.h5", 'l'); \
+ memb_name[H5FD_MEM_LHEAP] = sv[H5FD_MEM_LHEAP]; \
+ memb_addr[H5FD_MEM_LHEAP] = HADDR_MAX*2/3; \
+ sprintf(sv[H5FD_MEM_OHDR], "%%s-%c.h5", 'o'); \
+ memb_name[H5FD_MEM_OHDR] = sv[H5FD_MEM_OHDR]; \
+ memb_addr[H5FD_MEM_OHDR] = HADDR_MAX*5/6; \
+}
/*
+ *-------------------------------------------------------------------------
* Verify that the file's free-space manager(s) are persistent for a multi-file
+ *-------------------------------------------------------------------------
*/
static unsigned
-test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
+test_mf_fs_persist_multi(void)
{
- hid_t file = -1; /* File ID */
- char filename[FILENAME_LEN]; /* Filename to use */
- H5F_t *f = NULL; /* Internal file object pointer */
- H5FD_mem_t type, stype, btype, gtype; /* File allocation type */
- H5FS_stat_t fs_stat; /* Information for free-space manager */
- haddr_t addr1, addr2, addr3, addr4; /* File allocation type */
- haddr_t saddr1, saddr2, saddr3, saddr4; /* File address for H5FD_MEM_SUPER */
- haddr_t baddr1, baddr2, baddr3, baddr4; /* File address for H5FD_MEM_DRAW */
- haddr_t gaddr1, gaddr2; /* File address for H5FD_MEM_GHEAP */
- haddr_t tmp_addr; /* Temporary variable for address */
- H5FS_section_info_t *node; /* Free space section node */
- htri_t node_found = FALSE; /* Indicate section is in free-space */
-
- TESTING("file's free-space managers are persistent for multi-file");
+ hid_t file = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list ID */
+ hid_t fapl = -1; /* File access property list ID */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5FD_mem_t type, stype, btype, gtype; /* File allocation type */
+ H5FS_stat_t fs_stat; /* Information for free-space manager */
+ haddr_t addr1, addr2, addr3, addr4; /* File allocation type */
+ haddr_t saddr1, saddr2, saddr3, saddr4; /* File address for H5FD_MEM_SUPER */
+ haddr_t baddr1, baddr2, baddr3, baddr4; /* File address for H5FD_MEM_DRAW */
+ haddr_t gaddr1, gaddr2; /* File address for H5FD_MEM_GHEAP */
+ haddr_t tmp_addr; /* Temporary variable for address */
+ H5FS_section_info_t *node; /* Free space section node */
+ htri_t node_found = FALSE; /* Indicate section is in free-space */
+ H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; /* Memory usage map */
+ hid_t memb_fapl[H5FD_MEM_NTYPES]; /* Member access properties */
+ char sv[H5FD_MEM_NTYPES][64]; /* Name generators */
+ const char *memb_name[H5FD_MEM_NTYPES]; /* Name generators */
+ haddr_t memb_addr[H5FD_MEM_NTYPES]; /* Member starting address */
+
+
+ TESTING("File's free-space managers are persistent for multi-file");
+
+ /* for now, we don't support persistant free space managers
+ * with the multi file driver.
+ */
+ SKIPPED();
+ HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n");
+ return 0; /* <========== note return */
+
+ /* for now, we don't support persistant free space managers
+ * with the multi file driver.
+ */
+ SKIPPED();
+ HDfprintf(stdout, " Persistant FSMs disabled in multi file driver.\n");
+ return 0; /* <========== note return */
+
+ /* File creation property list template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR
+
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ FAIL_STACK_ERROR
+
+ MULTI_SETUP(memb_map, memb_fapl, memb_name, memb_addr, sv)
+
+ if(H5Pset_fapl_multi(fapl, memb_map, memb_fapl, memb_name, memb_addr, TRUE) < 0)
+ TEST_ERROR;
/* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[0], fapl_new, filename, sizeof(filename));
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_new)) < 0)
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -6682,43 +6472,43 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
/* Allocate 4 blocks of type H5FD_MEM_SUPER */
type = H5FD_MEM_SUPER;
- if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
+ FAIL_STACK_ERROR
/* Put block #1, #3 into H5FD_MEM_SUPER free-space manager */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE1) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE3) < 0)
+ FAIL_STACK_ERROR
/* Allocate 4 blocks of type H5FD_MEM_DRAW */
stype = H5FD_MEM_DRAW;
- if(HADDR_UNDEF == (saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (saddr4 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (saddr3 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (saddr4 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
+ FAIL_STACK_ERROR
/* Put block #1, #3 into H5FD_MEM_DRAW free-space manager */
- if(H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
+ if(H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE1) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr3, (hsize_t)TBLOCK_SIZE3) < 0)
+ FAIL_STACK_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
/* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -6730,7 +6520,7 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
TEST_ERROR
/* Start up H5FD_MEM_SUPER free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)type) < 0)
FAIL_STACK_ERROR
/* Get free-space info */
@@ -6738,23 +6528,23 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < (TEST_BLOCK_SIZE1+TEST_BLOCK_SIZE3))
- TEST_ERROR
+ if(fs_stat.tot_space < (TBLOCK_SIZE1+TBLOCK_SIZE3))
+ TEST_ERROR
if(fs_stat.serial_sect_count < 2)
- TEST_ERROR
+ TEST_ERROR
- /* Retrieve block #1 from H5FD_MEM_SUPER free-space manager; block #2 still in free-space */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
+ /* Retrieve block #1 from H5FD_MEM_SUPER free-space manager; block #3 still in free-space */
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
if(tmp_addr != addr1)
- TEST_ERROR
+ TEST_ERROR
/* Verify that the free-space manager for H5FD_MEM_DRAW is there */
if(!H5F_addr_defined(f->shared->fs_addr[stype]))
TEST_ERROR
/* Start up H5FD_MEM_DRAW free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, stype) < 0)
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)stype) < 0)
FAIL_STACK_ERROR
/* Get free-space info */
@@ -6762,45 +6552,45 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < (TEST_BLOCK_SIZE1+TEST_BLOCK_SIZE3))
- TEST_ERROR
+ if(fs_stat.tot_space < (TBLOCK_SIZE1+TBLOCK_SIZE3))
+ TEST_ERROR
if(fs_stat.serial_sect_count < 2)
- TEST_ERROR
+ TEST_ERROR
/* Retrieve blocks #1 from H5FD_MEM_DRAW free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
if(tmp_addr != saddr1)
- TEST_ERROR
+ TEST_ERROR
/* Retrieve blocks #3 from H5FD_MEM_DRAW free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
if(tmp_addr != saddr3)
- TEST_ERROR
+ TEST_ERROR
/* Allocate 4 blocks of type H5FD_MEM_BTREE */
btype = H5FD_MEM_BTREE;
- if(HADDR_UNDEF == (baddr1 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (baddr2 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (baddr3 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (baddr4 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (baddr1 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (baddr2 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (baddr3 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (baddr4 = H5MF_alloc(f, btype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
+ FAIL_STACK_ERROR
/* Put block #1 & #3 into H5FD_MEM_BTREE free-space manager */
- if(H5MF_xfree(f, btype, H5AC_ind_read_dxpl_id, baddr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, btype, H5AC_ind_read_dxpl_id, baddr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
+ if(H5MF_xfree(f, btype, H5AC_ind_read_dxpl_id, baddr1, (hsize_t)TBLOCK_SIZE1) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, btype, H5AC_ind_read_dxpl_id, baddr3, (hsize_t)TBLOCK_SIZE3) < 0)
+ FAIL_STACK_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
/* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -6812,7 +6602,7 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
TEST_ERROR
/* Start up H5FD_MEM_SUPER free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)type) < 0)
FAIL_STACK_ERROR
/* Get free-space info */
@@ -6820,14 +6610,14 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < TEST_BLOCK_SIZE3)
- TEST_ERROR
+ if(fs_stat.tot_space < TBLOCK_SIZE3)
+ TEST_ERROR
/* Retrieve block #3 from H5FD_MEM_SUPER free-space manager */
- if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
if(tmp_addr != addr3)
- TEST_ERROR
+ TEST_ERROR
/* Verify that the free-space manager for H5FD_MEM_DRAW is not there */
if(H5F_addr_defined(f->shared->fs_addr[stype]))
@@ -6838,7 +6628,7 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
TEST_ERROR
/* Start up H5FD_MEM_BTREE free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, btype) < 0)
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)btype) < 0)
FAIL_STACK_ERROR
/* Get free-space info */
@@ -6846,27 +6636,27 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < (TEST_BLOCK_SIZE1+TEST_BLOCK_SIZE3))
- TEST_ERROR
+ if(fs_stat.tot_space < (TBLOCK_SIZE1+TBLOCK_SIZE3))
+ TEST_ERROR
if(fs_stat.serial_sect_count < 2)
- TEST_ERROR
+ TEST_ERROR
/* Allocate 2 blocks of type H5FD_MEM_GHEAP */
gtype = H5FD_MEM_GHEAP;
- if(HADDR_UNDEF == (gaddr2 = H5MF_alloc(f, gtype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (gaddr1 = H5MF_alloc(f, gtype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (gaddr2 = H5MF_alloc(f, gtype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (gaddr1 = H5MF_alloc(f, gtype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
/* Put block #2 into H5FD_MEM_GHEAP free-space manager */
- if(H5MF_xfree(f, gtype, H5AC_ind_read_dxpl_id, gaddr2, (hsize_t)TEST_BLOCK_SIZE2) < 0)
- FAIL_STACK_ERROR
+ if(H5MF_xfree(f, gtype, H5AC_ind_read_dxpl_id, gaddr2, (hsize_t)TBLOCK_SIZE2) < 0)
+ FAIL_STACK_ERROR
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
/* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
@@ -6875,39 +6665,44 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
/* If H5FD_MEM_SUPER is there, should not find block #1 & #3 */
if(H5F_addr_defined(f->shared->fs_addr[type])) {
- /* Start up H5FD_MEM_SUPER free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
- FAIL_STACK_ERROR
-
- if((node_found = H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)TEST_BLOCK_SIZE1, (H5FS_section_info_t **)&node)) < 0)
- FAIL_STACK_ERROR
- if(node_found) TEST_ERROR
-
- if((node_found = H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
- (hsize_t)TEST_BLOCK_SIZE3, (H5FS_section_info_t **)&node)) < 0)
- FAIL_STACK_ERROR
- if(node_found) TEST_ERROR
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)type) < 0)
+ FAIL_STACK_ERROR
+
+ if((node_found = H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
+ (hsize_t)TBLOCK_SIZE1, (H5FS_section_info_t **)&node)) < 0)
+ FAIL_STACK_ERROR
+ if(node_found) TEST_ERROR
+
+ if((node_found = H5FS_sect_find(f, H5AC_ind_read_dxpl_id, f->shared->fs_man[type],
+ (hsize_t)TBLOCK_SIZE3, (H5FS_section_info_t **)&node)) < 0)
+ FAIL_STACK_ERROR
+ if(node_found) TEST_ERROR
}
/* Verify that the H5FD_MEM_GHEAP free-space manager is there */
if(!H5F_addr_defined(f->shared->fs_addr[gtype]))
- TEST_ERROR
+ TEST_ERROR
/* Start up H5FD_MEM_GHEAP free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, gtype) < 0)
- FAIL_STACK_ERROR
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)gtype) < 0)
+ FAIL_STACK_ERROR
/* Get free-space info */
if(H5FS_stat_info(f, f->shared->fs_man[gtype], &fs_stat) < 0)
FAIL_STACK_ERROR
/* Verify free-space info */
- if(fs_stat.tot_space < TEST_BLOCK_SIZE2)
- TEST_ERROR
+ if(fs_stat.tot_space < TBLOCK_SIZE2)
+ TEST_ERROR
+ /* Closing */
if(H5Fclose(file) < 0)
FAIL_STACK_ERROR
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
PASSED()
@@ -6915,460 +6710,540 @@ test_mf_fs_multi(hid_t fapl_new, hid_t fcpl)
error:
H5E_BEGIN_TRY {
- H5Fclose(file);
+ H5Fclose(file);
+ H5Pclose(fcpl);
+ H5Pclose(fapl);
} H5E_END_TRY;
return(1);
-} /* test_mf_fs_multi() */
-
-#define MULTI_SETUP(memb_map, memb_fapl, memb_name, memb_addr, sv) { \
- H5FD_mem_t mt; \
- HDmemset(memb_map, 0, sizeof memb_map); \
- HDmemset(memb_fapl, 0, sizeof memb_fapl); \
- HDmemset(memb_name, 0, sizeof memb_name); \
- HDmemset(memb_addr, 0, sizeof memb_addr); \
- HDmemset(sv, 0, sizeof sv); \
- for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt)) { \
- memb_map[mt] = H5FD_MEM_SUPER; \
- memb_fapl[mt] = H5P_DEFAULT; \
- } \
- memb_map[H5FD_MEM_BTREE] = H5FD_MEM_BTREE; \
- memb_map[H5FD_MEM_DRAW] = H5FD_MEM_DRAW; \
- memb_map[H5FD_MEM_GHEAP] = H5FD_MEM_GHEAP; \
- memb_map[H5FD_MEM_LHEAP] = H5FD_MEM_LHEAP; \
- sprintf(sv[H5FD_MEM_SUPER], "%%s-%c.h5", 's'); \
- memb_name[H5FD_MEM_SUPER] = sv[H5FD_MEM_SUPER]; \
- memb_addr[H5FD_MEM_SUPER] = 0; \
- sprintf(sv[H5FD_MEM_BTREE], "%%s-%c.h5", 'b'); \
- memb_name[H5FD_MEM_BTREE] = sv[H5FD_MEM_BTREE]; \
- memb_addr[H5FD_MEM_BTREE] = HADDR_MAX/6; \
- sprintf(sv[H5FD_MEM_DRAW], "%%s-%c.h5", 'r'); \
- memb_name[H5FD_MEM_DRAW] = sv[H5FD_MEM_DRAW]; \
- memb_addr[H5FD_MEM_DRAW] = HADDR_MAX/3; \
- sprintf(sv[H5FD_MEM_GHEAP], "%%s-%c.h5", 'g'); \
- memb_name[H5FD_MEM_GHEAP] = sv[H5FD_MEM_GHEAP]; \
- memb_addr[H5FD_MEM_GHEAP] = HADDR_MAX/2; \
- sprintf(sv[H5FD_MEM_LHEAP], "%%s-%c.h5", 'l'); \
- memb_name[H5FD_MEM_LHEAP] = sv[H5FD_MEM_LHEAP]; \
- memb_addr[H5FD_MEM_LHEAP] = HADDR_MAX*2/3; \
- sprintf(sv[H5FD_MEM_OHDR], "%%s-%c.h5", 'o'); \
- memb_name[H5FD_MEM_OHDR] = sv[H5FD_MEM_OHDR]; \
- memb_addr[H5FD_MEM_OHDR] = HADDR_MAX*5/6; \
-}
+} /* test_mf_fs_persist_multi() */
/*
- * Tests to verify that file's free-space managers are persistent or going away
- * for different drivers.
+ *-------------------------------------------------------------------------
+ * Verify that the file's free-space persists where there are free sections in the manager
+ *-------------------------------------------------------------------------
*/
static unsigned
-test_mf_fs_drivers(hid_t fapl)
+test_mf_fs_persist(const char *env_h5_drvr, hid_t fapl, hbool_t new_format)
{
- hid_t fcpl = -1; /* file creation property list */
- hid_t fapl_new = -1; /* copy of file access property list */
- hid_t fapl2 = -1; /* copy of file access property list */
- unsigned new_format; /* To use new library format or not */
- unsigned ret = 0; /* return value */
-
- H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; /* Memory usage map */
- hid_t memb_fapl[H5FD_MEM_NTYPES]; /* Member access properties */
- char sv[H5FD_MEM_NTYPES][64]; /* Name generators */
- const char *memb_name[H5FD_MEM_NTYPES]; /* Name generators */
- haddr_t memb_addr[H5FD_MEM_NTYPES]; /* Member starting address */
-
- /* Create a non-standard file-creation template */
- if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0) < 0)
- TEST_ERROR
-
- /* Copy the file access property list */
- if((fapl2 = H5Pcopy(fapl)) < 0) TEST_ERROR
-
- /* Set the "use the latest version of the format" bounds for creating objects in the file */
- if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
- TEST_ERROR
-
- /* Test with old and new format */
- for(new_format = FALSE; new_format <= TRUE; new_format++) {
-
- if(new_format)
- HDputs("Testing the following tests for free-space managers with new library format...");
- else
- HDputs("Testing the following tests for free-space managers with old library format...");
-
- /* SEC2 */
- HDputs("Testing free-space manager(s) with sec2 driver");
-
- if((fapl_new = H5Pcopy(new_format ? fapl2 : fapl)) < 0) TEST_ERROR
-
- if(H5Pset_fapl_sec2(fapl_new) < 0)
- FAIL_STACK_ERROR
-
- ret += test_mf_fs_gone(fapl_new, fcpl);
- ret += test_mf_fs_persist(fapl_new, fcpl);
+ hid_t file = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list ID */
+ hid_t fapl2 = -1; /* File access property list ID */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5FD_mem_t type; /* File allocation type */
+ H5FD_mem_t tt; /* File allocation type */
+ H5FS_stat_t fs_stat; /* Information for free-space manager */
+ haddr_t addr1, addr2, addr3, addr4, addr5, addr6; /* File address for H5FD_MEM_SUPER */
+ haddr_t tmp_addr; /* Temporary variable for address */
+
+ if(new_format)
+ TESTING("File's free-space is persistent with new library format")
+ else
+ TESTING("File's free-space is persistent with old library format")
+
+ if(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi")) {
+
+ /* File creation property list template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Copy the file access property list */
+ if((fapl2 = H5Pcopy(fapl)) < 0) FAIL_STACK_ERROR
+
+ if(new_format) {
+ /* Latest format */
+ if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+ /* Set to paged aggregation and persisting free-space */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) < 0)
+ TEST_ERROR
+ } else {
+ /* Setting: aggregation with persisting free-space */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1) < 0)
+ TEST_ERROR
+ }
- h5_clean_files(FILENAME, fapl_new);
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+ /* Create the file to work on */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl2)) < 0)
+ FAIL_STACK_ERROR
- /* STDIO */
- HDputs("Testing free-space managers with stdio driver");
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_stdio(fapl_new) < 0)
- FAIL_STACK_ERROR
+ /* Allocate 6 blocks */
+ type = H5FD_MEM_SUPER;
+ if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr5 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr6 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE6)))
+ FAIL_STACK_ERROR
- ret += test_mf_fs_gone(fapl_new, fcpl);
- ret += test_mf_fs_persist(fapl_new, fcpl);
+ /* Put block #1, #3, #5 to H5FD_MEM_SUPER free-space manager */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE1) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE3) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr5, (hsize_t)TBLOCK_SIZE5) < 0)
+ FAIL_STACK_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
- /* CORE */
- HDputs("Testing free-space managers with core driver");
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl2)) < 0)
+ FAIL_STACK_ERROR
- /* create fapl to be a "core" file */
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_core(fapl_new, (size_t)CORE_INCREMENT, TRUE) < 0)
- FAIL_STACK_ERROR
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
- ret += test_mf_fs_gone(fapl_new, fcpl);
- ret += test_mf_fs_persist(fapl_new, fcpl);
+ H5MF_alloc_to_fs_type(f, type, TBLOCK_SIZE6, (H5F_mem_page_t *)&tt);
- h5_clean_files(FILENAME, fapl_new);
+ /* Verify that H5FD_MEM_SUPER free-space manager is there */
+ if(!H5F_addr_defined(f->shared->fs_addr[tt]))
+ TEST_ERROR
- /* FAMILY */
- HDputs("Testing free-space managers with family driver");
+ /* Since we are about to open a self referential free space
+ * manager prior to the first file space allocation / deallocation
+ * call H5MF_tidy_self_referential_fsm_hack() first so as to avoid
+ * assertion failures on the first file space alloc / dealloc.
+ */
+ if((f->shared->first_alloc_dealloc) &&
+ (SUCCEED !=
+ H5MF_tidy_self_referential_fsm_hack(f, H5AC_ind_read_dxpl_id)))
+ FAIL_STACK_ERROR
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_family(fapl_new, (hsize_t)FAMILY_SIZE, H5P_DEFAULT) < 0)
- FAIL_STACK_ERROR
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(!(f->shared->fs_man[tt]))
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)tt) < 0)
+ FAIL_STACK_ERROR
- ret += test_mf_fs_persist(fapl_new, fcpl);
+ /* Get info for free-space manager */
+ if(H5FS_stat_info(f, f->shared->fs_man[tt], &fs_stat) < 0)
+ FAIL_STACK_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ /* Verify free-space info */
+ if(fs_stat.tot_space < (TBLOCK_SIZE1+TBLOCK_SIZE3+TBLOCK_SIZE5))
+ TEST_ERROR
- /* SPLIT */
- HDputs("Testing free-space managers with split driver");
+ if(fs_stat.serial_sect_count < 3)
+ TEST_ERROR
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_split(fapl_new, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT)<0)
- FAIL_STACK_ERROR
+ /* Retrieve block #3 from H5FD_MEM_SUPER free-space manager */
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(tmp_addr != addr3)
+ TEST_ERROR
- ret += test_mf_fs_persist(fapl_new, fcpl);
- ret += test_mf_fs_split(fapl_new, fcpl);
+ /* Retrieve block #1 from H5FD_MEM_SUPER free-space manager */
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+ if(tmp_addr != addr1)
+ TEST_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
- /* MULTI */
- HDputs("Testing free-space managers with multi driver");
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ FAIL_STACK_ERROR
- MULTI_SETUP(memb_map, memb_fapl, memb_name, memb_addr, sv)
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_multi(fapl_new, memb_map, memb_fapl, memb_name, memb_addr, TRUE) < 0)
- TEST_ERROR;
+ /* Verify that H5FD_MEM_SUPER free-space manager is there */
+ if(!H5F_addr_defined(f->shared->fs_addr[tt]))
+ TEST_ERROR
- ret += test_mf_fs_multi(fapl_new, fcpl);
+ /* Retrieve block #5 from H5FD_MEM_SUPER free-space manager */
+ if(HADDR_UNDEF == (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5)))
+ FAIL_STACK_ERROR
+ if(tmp_addr != addr5)
+ TEST_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(fapl2) < 0)
+ FAIL_STACK_ERROR
- } /* end for new_format */
+ PASSED()
- if(H5Pclose(fcpl) < 0)
- FAIL_STACK_ERROR
- if(H5Pclose(fapl2) < 0)
- FAIL_STACK_ERROR
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support persisting free-space or paged aggregation strategy");
+ }
- return(ret);
+ return(0);
error:
H5E_BEGIN_TRY {
+ H5Fclose(file);
H5Pclose(fcpl);
H5Pclose(fapl2);
- H5Pclose(fapl_new);
} H5E_END_TRY;
return(1);
-} /* test_mf_fs_drivers() */
-
+} /* test_mf_fs_persist() */
/*
- * Verify that file space management performs according to the
- * file space strategy and free space threshold as specified.
+ *-------------------------------------------------------------------------
+ * Verify free-space are merged/shrunk away
+ *-------------------------------------------------------------------------
*/
static unsigned
-test_filespace_strategy_threshold(hid_t fapl_new)
+test_mf_fs_gone(const char *env_h5_drvr, hid_t fapl, hbool_t new_format)
{
- hid_t file = -1; /* File ID */
- hid_t fcpl = -1; /* File creation property list template */
- char filename[FILENAME_LEN]; /* Filename to use */
- H5F_t *f = NULL; /* Internal file object pointer */
- H5FD_mem_t type; /* File allocation type */
- haddr_t addr1, addr2, addr3, addr4, addr5, addr6; /* File address for H5FD_MEM_SUPER */
- haddr_t tmp_addr; /* Temporary variable for address */
- H5F_file_space_type_t fs_type; /* File space handling strategy */
- hsize_t fs_threshold; /* Free space section threshold */
- hsize_t tot_space, saved_tot_space; /* Total amount of free space */
- hsize_t tot_sect_count, saved_tot_sect_count; /* # of free-space sections */
-
- TESTING("file space strategy and threshold");
+ hid_t file = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list */
+ hid_t fapl2 = -1; /* File access property list */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5FD_mem_t type; /* File allocation type */
+ H5FS_stat_t fs_stat; /* Information for free-space manager */
+ haddr_t addr1, addr2, addr3, addr4; /* File address for H5FD_MEM_SUPER */
+ H5FD_mem_t fs_type;
+ hbool_t contig_addr_vfd;
+ hbool_t ran_H5MF_tidy_self_referential_fsm_hack = FALSE;
+
+ if(new_format)
+ TESTING("File's free-space is going away with new library format")
+ else
+ TESTING("File's free-space is going away with old library format")
+
+ /* Current VFD that does not support contigous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
- /* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[0], fapl_new, filename, sizeof(filename));
-
- for(fs_threshold = 0; fs_threshold <= TEST_THRESHOLD10; fs_threshold++) {
-
- for(fs_type = H5F_FILE_SPACE_ALL_PERSIST; fs_type < H5F_FILE_SPACE_NTYPES; H5_INC_ENUM(H5F_file_space_type_t, fs_type)) {
-
- /* Create file-creation template */
- if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
- FAIL_STACK_ERROR
-
- /* Set default file space information */
- if(H5Pset_file_space(fcpl, fs_type, fs_threshold) < 0)
- FAIL_STACK_ERROR
-
- /* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_new)) < 0)
- FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
-
- /* Allocate 6 blocks */
- type = H5FD_MEM_SUPER;
- if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr5 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE5)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr6 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE6)))
- FAIL_STACK_ERROR
-
- /* Put block #1, #3, #5 to H5FD_MEM_SUPER free-space manager */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr5, (hsize_t)TEST_BLOCK_SIZE5) < 0)
- FAIL_STACK_ERROR
-
- /* Retrieve the total amount of free space and # of free-space sections */
- if(f->shared->fs_man[type] &&
- H5FS_sect_stats(f->shared->fs_man[type], &saved_tot_space, &saved_tot_sect_count) < 0)
- FAIL_STACK_ERROR
-
- /* H5F_FILE_SPACE_AGGR_VFD and H5F_FILE_SPACE_VFD: should not have free-space manager */
- if(fs_type > H5F_FILE_SPACE_ALL && f->shared->fs_man[type])
- TEST_ERROR
+ if(contig_addr_vfd) {
- /* Close the file */
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
-
- /* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
- FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
-
- switch(fs_type) {
- case H5F_FILE_SPACE_ALL_PERSIST:
- if(fs_threshold <= TEST_BLOCK_SIZE5) {
- if(!H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
-
- /* Open the free-space manager */
- if(H5MF_alloc_open(f, H5AC_ind_read_dxpl_id, type) < 0)
- FAIL_STACK_ERROR
-
- /* Retrieve the total amount of free space and # of free-space sections */
- if(H5FS_sect_stats(f->shared->fs_man[type], &tot_space, &tot_sect_count) < 0)
- FAIL_STACK_ERROR
-
- /* Verify that tot_space should be >= saved_tot_space */
- /* Verify that tot_sect_count should be >= saved_tot_sect_count */
- if(tot_space < saved_tot_space || tot_sect_count < saved_tot_sect_count)
- TEST_ERROR
-
- /* Retrieve block #5 from H5FD_MEM_SUPER free-space manager */
- if(HADDR_UNDEF ==
- (tmp_addr = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE5)))
- FAIL_STACK_ERROR
-
- /* Should be the same as before */
- if(tmp_addr != addr5)
- TEST_ERROR
- } else if(H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
- break;
-
- case H5F_FILE_SPACE_ALL:
- case H5F_FILE_SPACE_AGGR_VFD:
- case H5F_FILE_SPACE_VFD:
- if(H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
- break;
-
- case H5F_FILE_SPACE_DEFAULT:
- case H5F_FILE_SPACE_NTYPES:
- default:
- TEST_ERROR
- break;
- } /* end switch */
+ /* File creation property list template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
- /* Closing */
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
- if(H5Pclose(fcpl) < 0)
- FAIL_STACK_ERROR
+ /* Copy the file access property list */
+ if((fapl2 = H5Pcopy(fapl)) < 0) FAIL_STACK_ERROR
- } /* end for fs_type */
- } /* end for fs_threshold */
+ if(new_format)
+ if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
- PASSED()
+ /* Set to aggregation and persisting free-space */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl2, filename, sizeof(filename));
+
+ /* Create the file to work on */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Allocate 4 blocks */
+ type = H5FD_MEM_SUPER;
+ if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
+ FAIL_STACK_ERROR
+
+ /* Put block #1, #3 to H5FD_MEM_SUPER free-space manager */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE1) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE3) < 0)
+ FAIL_STACK_ERROR
+
+ /* Retrieve block #1, #3 from H5FD_MEM_SUPER free-space manager */
+ if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ H5MF_alloc_to_fs_type(f, type, TBLOCK_SIZE4, (H5F_mem_page_t *)&fs_type);
+
+ /* Verify that the H5FD_MEM_SUPER free-space manager is not there */
+ if(H5F_addr_defined(f->shared->fs_addr[fs_type]))
+ TEST_ERROR
+
+ /* Put block #3 to H5FD_MEM_SUPER free-space manager */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE3) < 0)
+ FAIL_STACK_ERROR
+
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Verify that H5FD_MEM_SUPER free-space manager is there */
+ if(!H5F_addr_defined(f->shared->fs_addr[fs_type]))
+ TEST_ERROR
+
+ /* Since we are about to open a self referential free space
+ * manager prior to the first file space allocation / deallocation
+ * call H5MF_tidy_self_referential_fsm_hack() first so as to avoid
+ * assertion failures on the first file space alloc / dealloc.
+ */
+ if(f->shared->first_alloc_dealloc){
+ if(SUCCEED!=H5MF_tidy_self_referential_fsm_hack(f,H5AC_ind_read_dxpl_id))
+ FAIL_STACK_ERROR
+ ran_H5MF_tidy_self_referential_fsm_hack = TRUE;
+ }
+
+ /* Start up H5FD_MEM_SUPER free-space manager */
+ if(!(f->shared->fs_man[fs_type]))
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, (H5F_mem_page_t)fs_type) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get info for H5FD_MEM_SUPER free-space manager */
+ if(H5FS_stat_info(f, f->shared->fs_man[fs_type], &fs_stat) < 0)
+ FAIL_STACK_ERROR
+
+ /* if we ran H5MF_tidy_self_referential_fsm_hack(), the
+ * H5FD_MEM_SUPER free space manager must be floating.
+ * Thus fs_stat.addr must be undefined.
+ */
+ if((!ran_H5MF_tidy_self_referential_fsm_hack) &&
+ (!H5F_addr_defined(fs_stat.addr)))
+ TEST_ERROR
+
+ if(fs_stat.tot_space < TBLOCK_SIZE3)
+ TEST_ERROR
+
+ /* Put block #4 to H5FD_MEM_SUPER free-space manager */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr4, (hsize_t)TBLOCK_SIZE4) < 0)
+ FAIL_STACK_ERROR
+
+ /* The H5FD_MEM_SUPER free-space manager will go away at H5MF_close() */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Verify that the H5FD_MEM_SUPER free-space manager is not there */
+ if(H5F_addr_defined(f->shared->fs_addr[fs_type]))
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(fapl2) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED()
+
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support persistent free-space manager");
+ }
return(0);
error:
H5E_BEGIN_TRY {
- H5Pclose(fcpl);
H5Fclose(file);
+ H5Pclose(fcpl);
+ H5Pclose(fapl2);
} H5E_END_TRY;
return(1);
-} /* test_filespace_strategy_threshold() */
+} /* test_mf_fs_gone() */
/*
- * Verify section is merged/shrunk away for
- * H5F_FILE_SPACE_ALL_PERSIST and H5F_FILE_SPACE_ALL strategy.
+ *-------------------------------------------------------------------------
+ * Verify that free-space persist with combinations of
+ * file space strategy and free space threshold as specified.
+ *-------------------------------------------------------------------------
*/
static unsigned
-test_filespace_gone(hid_t fapl_new)
+test_mf_strat_thres_persist(const char *env_h5_drvr, hid_t fapl, hbool_t new_format)
{
- hid_t file = -1; /* File ID */
- hid_t fcpl = -1; /* File creation propertly list template */
- char filename[FILENAME_LEN]; /* Filename to use */
- H5F_t *f = NULL; /* Internal file object pointer */
- H5FD_mem_t type; /* File allocation type */
- haddr_t addr1, addr2, addr3, addr4, addr5, addr6; /* File address for H5FD_MEM_SUPER */
- H5F_file_space_type_t fs_type; /* File space handling strategy */
- hsize_t fs_threshold; /* Free space section threshold */
- frspace_state_t state; /* State of free space manager */
-
- TESTING("file space merge/shrink for section size < threshold");
+ hid_t file = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list template */
+ hid_t fapl2 = -1; /* File access property list template */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5FD_mem_t type; /* File allocation type */
+ H5FD_mem_t tt; /* File allocation type */
+ haddr_t addr1, addr2, addr3, addr4, addr5, addr6; /* File address for H5FD_MEM_SUPER */
+ H5F_fspace_strategy_t fs_type; /* File space handling strategy */
+ hsize_t fs_threshold; /* Free-space section threshold */
+ unsigned fs_persist; /* To persist free-space or not */
+ hbool_t contig_addr_vfd;
+
+ if(new_format)
+ TESTING("File space strategy/persisting/threshold with new library format")
+ else
+ TESTING("File space strategy/persisting/threshold with old library format")
+
+ /* Current VFD that does not support contigous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
+
/* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[0], fapl_new, filename, sizeof(filename));
-
- /* Set free-space threshold */
- fs_threshold = TEST_THRESHOLD3;
-
- for(fs_type = H5F_FILE_SPACE_ALL_PERSIST; fs_type <= H5F_FILE_SPACE_ALL; H5_INC_ENUM(H5F_file_space_type_t, fs_type)) {
- /* Create file-creation template */
- if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
- FAIL_STACK_ERROR
-
- /* Set default file space information */
- if(H5Pset_file_space(fcpl, fs_type, fs_threshold) < 0)
- FAIL_STACK_ERROR
-
- /* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_new)) < 0)
- FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
-
- /* Allocate 6 blocks */
- type = H5FD_MEM_SUPER;
- if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE1)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE2)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE3)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE4)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr5 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE5)))
- FAIL_STACK_ERROR
- if(HADDR_UNDEF == (addr6 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE6)))
- FAIL_STACK_ERROR
-
- /* Put block #3, #5 to H5FD_MEM_SUPER free-space manager */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TEST_BLOCK_SIZE3) < 0)
- FAIL_STACK_ERROR
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr5, (hsize_t)TEST_BLOCK_SIZE5) < 0)
- FAIL_STACK_ERROR
-
- HDmemset(&state, 0, sizeof(frspace_state_t));
- state.tot_space += TEST_BLOCK_SIZE3 + TEST_BLOCK_SIZE5;
- state.tot_sect_count += 2;
- state.serial_sect_count += 2;
-
- if(check_stats(f, f->shared->fs_man[type], &state))
- TEST_ERROR
-
- /* section #2 is less than threshold but is merged into section #3 */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TEST_BLOCK_SIZE2) < 0)
- FAIL_STACK_ERROR
-
- state.tot_space += TEST_BLOCK_SIZE2;
- if(check_stats(f, f->shared->fs_man[type], &state))
- TEST_ERROR
-
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr4, (hsize_t)TEST_BLOCK_SIZE4) < 0)
- FAIL_STACK_ERROR
-
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr6, (hsize_t)TEST_BLOCK_SIZE6) < 0)
- FAIL_STACK_ERROR
-
- /* all sections should be shrunk away except section #1 */
- HDmemset(&state, 0, sizeof(frspace_state_t));
- if(check_stats(f, f->shared->fs_man[type], &state))
- TEST_ERROR
-
- /* section #1 is less than threshold but is shrunk away */
- if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE1) < 0)
- FAIL_STACK_ERROR
-
- /* free-space manager should be empty */
- HDmemset(&state, 0, sizeof(frspace_state_t));
- if(check_stats(f, f->shared->fs_man[type], &state))
- TEST_ERROR
-
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
-
- /* Re-open the file */
- if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
- FAIL_STACK_ERROR
-
- /* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
- FAIL_STACK_ERROR
-
- /* free-space manager should be empty */
- if(H5F_addr_defined(f->shared->fs_addr[type]))
- TEST_ERROR
-
- if(H5Fclose(file) < 0)
- FAIL_STACK_ERROR
-
- if(H5Pclose(fcpl) < 0)
- FAIL_STACK_ERROR
-
- } /* end for fs_type */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Copy the file access property list */
+ if((fapl2 = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ if(new_format)
+ if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Test with TRUE or FALSE for persisting free-space */
+ for(fs_persist = FALSE; fs_persist <= TRUE; fs_persist++) {
+
+ for(fs_threshold = 0; fs_threshold <= TEST_THRESHOLD10; fs_threshold++) {
+
+ /* Testing for H5F_FSPACE_STRATEGY_FSM_AGGR and H5F_FSPACE_STRATEGY_PAGE strategies only */
+ for(fs_type = H5F_FSPACE_STRATEGY_FSM_AGGR; fs_type < H5F_FSPACE_STRATEGY_AGGR; H5_INC_ENUM(H5F_fspace_strategy_t, fs_type)) {
+
+ if(!contig_addr_vfd && (fs_persist || fs_type == H5F_FSPACE_STRATEGY_PAGE))
+ continue;
+
+ /* Create file-creation template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set default file space information */
+ if(H5Pset_file_space_strategy(fcpl, fs_type, (hbool_t)fs_persist, fs_threshold) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the file to work on */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Allocate 6 blocks */
+ type = H5FD_MEM_SUPER;
+ if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr5 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr6 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE6)))
+ FAIL_STACK_ERROR
+
+ /* Put block #1, #3, #5 to H5FD_MEM_SUPER free-space manager */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE1) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE3) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr5, (hsize_t)TBLOCK_SIZE5) < 0)
+ FAIL_STACK_ERROR
+
+
+ /* Close the file */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ H5MF_alloc_to_fs_type(f, type, TBLOCK_SIZE6, (H5F_mem_page_t *)&tt);
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ if(f->shared->fs_persist) {
+ hssize_t nsects; /* # of free-space sections */
+ int i; /* local index variable */
+ H5F_sect_info_t *sect_info; /* array to hold the free-space information */
+
+ /* Get the # of free-space sections in the file */
+ if((nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, NULL)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify no free-space sections */
+ /* paged aggregation has 1 section for last_small */
+ if(fs_threshold > TBLOCK_SIZE5 && nsects && fs_type != H5F_FSPACE_STRATEGY_PAGE)
+ TEST_ERROR
+
+ if(nsects) {
+ /* Allocate storage for the free space section information */
+ sect_info = (H5F_sect_info_t *)HDcalloc((size_t)nsects, sizeof(H5F_sect_info_t));
+
+ H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)nsects, sect_info);
+
+ /* Verify the size of free-space sections */
+ for(i = 0; i < nsects; i++)
+ if(sect_info[i].size < fs_threshold)
+ TEST_ERROR
+ if(sect_info)
+ HDfree(sect_info);
+ }
+ } else {
+ if(H5F_addr_defined(f->shared->fs_addr[tt]))
+ TEST_ERROR
+ }
+
+ /* Closing */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
+ } /* end for fs_type */
+ } /* end for fs_threshold */
+ } /* end for fs_persist */
+
+ if(H5Pclose(fapl2) < 0)
+ FAIL_STACK_ERROR
PASSED()
@@ -7377,141 +7252,200 @@ test_filespace_gone(hid_t fapl_new)
error:
H5E_BEGIN_TRY {
H5Pclose(fcpl);
+ H5Pclose(fapl2);
H5Fclose(file);
} H5E_END_TRY;
return(1);
-} /* test_filespace_gone() */
+} /* test_mf_strat_thres_persist() */
/*
- * Tests to verify file space management for different drivers.
+ *-------------------------------------------------------------------------
+ * Verify free-space are merged/shrunk away with file space settings:
+ * --strategy, persist/not persist file space
+ *-------------------------------------------------------------------------
*/
static unsigned
-test_filespace_drivers(hid_t fapl)
+test_mf_strat_thres_gone(const char *env_h5_drvr, hid_t fapl, hbool_t new_format)
{
- hid_t fapl_new = -1; /* copy of file access property list */
- hid_t fapl2 = -1; /* copy of file access property list */
- unsigned new_format; /* Using library new format or not */
- unsigned ret = 0; /* return value */
+ hid_t file = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list template */
+ hid_t fapl2 = -1; /* File access property list template */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ H5FD_mem_t type; /* File allocation type */
+ H5FD_mem_t tt; /* File allocation type */
+ haddr_t addr1, addr2, addr3, addr4, addr5, addr6; /* File address for H5FD_MEM_SUPER */
+ H5F_fspace_strategy_t fs_type; /* File space handling strategy */
+ unsigned fs_persist; /* To persist free-space or not */
+ H5FS_stat_t fs_state; /* Information for free-space manager */
+ H5FS_stat_t fs_state_zero; /* Information for free-space manager */
+ hbool_t contig_addr_vfd;
+
+ if(new_format)
+ TESTING("File space merge/shrink for section size < threshold with new library format")
+ else
+ TESTING("File space merge/shrink for section size < threshold with old library format")
+
+ /* Current VFD that does not support contigous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
- H5FD_mem_t memb_map[H5FD_MEM_NTYPES]; /* Memory usage map */
- hid_t memb_fapl[H5FD_MEM_NTYPES]; /* Member access properties */
- char sv[H5FD_MEM_NTYPES][64]; /* Name generators */
- const char *memb_name[H5FD_MEM_NTYPES]; /* Name generators */
- haddr_t memb_addr[H5FD_MEM_NTYPES]; /* Member starting address */
- /* Copy the file access property list */
- if((fapl2 = H5Pcopy(fapl)) < 0) TEST_ERROR
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
- /* Set the "use the latest version of the format" bounds for creating objects in the file */
- if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
- TEST_ERROR
+ HDmemset(&fs_state_zero, 0, sizeof(H5FS_stat_t));
- /* Test with old and new format */
- for(new_format = FALSE; new_format <= TRUE; new_format++) {
+ /* Copy the file access property list */
+ if((fapl2 = H5Pcopy(fapl)) < 0)
+ FAIL_STACK_ERROR
- if(new_format)
- HDputs("Testing the following tests for file space management with new library format...");
- else
- HDputs("Testing the following tests for file space management with old library format...");
+ if(new_format)
+ if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
- /* SEC2 */
- HDputs("Testing file space management with sec2 driver");
+ /* Test with TRUE or FALSE for persisting free-space */
+ for(fs_persist = FALSE; fs_persist <= TRUE; fs_persist++) {
+ /* Testing for H5F_FSPACE_STRATEGY_FSM_AGGR and H5F_FSPACE_STRATEGY_PAGE strategies only */
+ for(fs_type = H5F_FSPACE_STRATEGY_FSM_AGGR; fs_type < H5F_FSPACE_STRATEGY_AGGR; H5_INC_ENUM(H5F_fspace_strategy_t, fs_type)) {
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_sec2(fapl_new) < 0)
- FAIL_STACK_ERROR
+ /* Skip for multi/split driver: persisting free-space or paged aggregation strategy */
+ if(!contig_addr_vfd && (fs_persist || fs_type == H5F_FSPACE_STRATEGY_PAGE))
+ continue;
- ret += test_filespace_strategy_threshold(fapl_new);
- ret += test_filespace_gone(fapl_new);
+ /* Clear out free-space statistics */
+ HDmemset(&fs_state, 0, sizeof(H5FS_stat_t));
- h5_clean_files(FILENAME, fapl_new);
+ /* Create file-creation template */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
- /* STDIO */
- HDputs("Testing file space management with stdio driver");
+ /* Set default file space information */
+ if(H5Pset_file_space_strategy(fcpl, fs_type, fs_persist, (hsize_t)TEST_THRESHOLD3) < 0)
+ FAIL_STACK_ERROR
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_stdio(fapl_new) < 0)
- FAIL_STACK_ERROR
+ /* Create the file to work on */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl2)) < 0)
+ FAIL_STACK_ERROR
- ret += test_filespace_strategy_threshold(fapl_new);
- ret += test_filespace_gone(fapl_new);
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ /* Allocate 6 blocks */
+ type = H5FD_MEM_SUPER;
+ if(HADDR_UNDEF == (addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr2 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE2)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr4 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr5 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5)))
+ FAIL_STACK_ERROR
+ if(HADDR_UNDEF == (addr6 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE6)))
+ FAIL_STACK_ERROR
- /* CORE */
- HDputs("Testing file space management with core driver");
+ H5MF_alloc_to_fs_type(f, type, TBLOCK_SIZE6, (H5F_mem_page_t *)&tt);
- /* create fapl to be a "core" file */
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_core(fapl_new, (size_t)CORE_INCREMENT, TRUE) < 0)
- FAIL_STACK_ERROR
+ /* For paged aggregation, the section in the page at EOF for small meta fs is not shrunk away */
+ if(fs_type == H5F_FSPACE_STRATEGY_PAGE) {
+ if(H5FS_stat_info(f, f->shared->fs_man[tt], &fs_state) < 0)
+ FAIL_STACK_ERROR
+ }
- ret += test_filespace_strategy_threshold(fapl_new);
- ret += test_filespace_gone(fapl_new);
+ /* Put block #3, #5 to H5FD_MEM_SUPER free-space manager */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE3) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr5, (hsize_t)TBLOCK_SIZE5) < 0)
+ FAIL_STACK_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ fs_state.tot_space += TBLOCK_SIZE3 + TBLOCK_SIZE5;
+ fs_state.tot_sect_count += 2;
+ fs_state.serial_sect_count += 2;
- /* FAMILY */
- HDputs("Testing file space managers with family driver");
+ if(check_stats(f, f->shared->fs_man[tt], &fs_state))
+ TEST_ERROR
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_family(fapl_new, (hsize_t)FAMILY_SIZE, H5P_DEFAULT) < 0)
- FAIL_STACK_ERROR
+ /* section #2 is less than threshold but is merged into section #3 */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE2) < 0)
+ FAIL_STACK_ERROR
- ret += test_filespace_strategy_threshold(fapl_new);
- ret += test_filespace_gone(fapl_new);
+ fs_state.tot_space += TBLOCK_SIZE2;
+ if(check_stats(f, f->shared->fs_man[tt], &fs_state))
+ TEST_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr4, (hsize_t)TBLOCK_SIZE4) < 0)
+ FAIL_STACK_ERROR
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr6, (hsize_t)TBLOCK_SIZE6) < 0)
+ FAIL_STACK_ERROR
- /* SPLIT */
- HDputs("Testing file space managers with split driver");
+ /* For paged aggregation, the sections in the page at EOF for small meta fs are merged but are not shrunk away */
+ if(fs_type == H5F_FSPACE_STRATEGY_PAGE) {
+ fs_state.tot_sect_count = fs_state.serial_sect_count = 1;
+ fs_state.tot_space += (TBLOCK_SIZE4 + TBLOCK_SIZE6);
+ }
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- FAIL_STACK_ERROR
- if(H5Pset_fapl_split(fapl_new, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT)<0)
- FAIL_STACK_ERROR
+ /* For old format: the sections at EOF are shrunk away */
+ if(check_stats(f, f->shared->fs_man[tt], (fs_type == H5F_FSPACE_STRATEGY_PAGE) ? &fs_state:&fs_state_zero))
+ TEST_ERROR
- ret += test_filespace_strategy_threshold(fapl_new);
- ret += test_filespace_gone(fapl_new);
+ /* section #1 is less than threshold but is shrunk away */
+ if(H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE1) < 0)
+ FAIL_STACK_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ /* For paged aggregation, the section in the page at EOF for small meta fs is not shrunk away */
+ if(fs_type == H5F_FSPACE_STRATEGY_PAGE)
+ fs_state.tot_space += TBLOCK_SIZE1;
- /* MULTI */
- HDputs("Testing file space managers with multi driver");
+ /* For old format: the sections at EOF are shrunk away */
+ if(check_stats(f, f->shared->fs_man[tt], (fs_type == H5F_FSPACE_STRATEGY_PAGE) ? &fs_state : &fs_state_zero))
+ TEST_ERROR
- MULTI_SETUP(memb_map, memb_fapl, memb_name, memb_addr, sv)
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
- if((fapl_new = H5Pcopy(new_format?fapl2:fapl)) < 0)
- TEST_ERROR
- if(H5Pset_fapl_multi(fapl_new, memb_map, memb_fapl, memb_name, memb_addr, TRUE) < 0)
- TEST_ERROR;
+ /* Re-open the file */
+ if((file = H5Fopen(filename, H5F_ACC_RDWR, fapl2)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
- ret += test_filespace_strategy_threshold(fapl_new);
- ret += test_filespace_gone(fapl_new);
+ /* Free-space manager should be empty */
+ if(!(fs_type == H5F_FSPACE_STRATEGY_PAGE && fs_persist))
+ if(H5F_addr_defined(f->shared->fs_addr[tt]))
+ TEST_ERROR
- h5_clean_files(FILENAME, fapl_new);
+ /* Closing */
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
- } /* end for new_format */
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
+ } /* end for fs_type */
+ } /* end for fs_persist */
- if (H5Pclose(fapl2) < 0)
+ if(H5Pclose(fapl2) < 0)
FAIL_STACK_ERROR
- return(ret);
+ PASSED()
+
+ return(0);
error:
H5E_BEGIN_TRY {
- H5Pclose(fapl_new);
+ H5Pclose(fcpl);
H5Pclose(fapl2);
+ H5Fclose(file);
} H5E_END_TRY;
return(1);
-} /* test_filespace_drivers() */
+} /* test_mf_strat_thres_gone() */
/*
+ *-------------------------------------------------------------------------
* To verify that file space is allocated from the corresponding free-space manager
* because H5FD_FLMAP_DICHOTOMY is used as the default free-list mapping.
*
@@ -7530,80 +7464,1388 @@ error:
*
* (9) Allocate the third block (size 30) of type H5FD_MEM_SUPER
* (10) Verify that this third block is not freed block from (8)
+ *-------------------------------------------------------------------------
*/
static unsigned
-test_dichotomy(const char *env_h5_drvr, hid_t fapl)
+test_dichotomy(hid_t fapl)
{
hid_t file = -1; /* File ID */
char filename[FILENAME_LEN]; /* Filename to use */
H5F_t *f = NULL; /* Internal file object pointer */
H5FD_mem_t type, stype;
haddr_t addr1, addr3, saddr1, saddr2;
- hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
TESTING("Allocation from raw or metadata free-space manager");
- /* Skip test when using VFDs that don't use the metadata aggregator */
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* Create the file to work on */
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file)))
+ FAIL_STACK_ERROR
+
+ /* Allocate the first block of type H5FD_MEM_SUPER */
+ type = H5FD_MEM_SUPER;
+ addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+
+ /* Allocate the second block of type H5FD_MEM_SUPER */
+ H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+
+ /* Allocate the first block of type H5FD_MEM_DRAW */
+ stype = H5FD_MEM_DRAW;
+ saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+
+ /* Free the first block of type H5FD_MEM_SUPER */
+ H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE30);
+
+ /* Allocate the second block of type H5FD_MEM_DRAW */
+ saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+
+ /* Verify that saddr1 is not addr1 */
+ if(saddr2 == addr1) TEST_ERROR
+
+ /* Free the first block of type H5FD_MEM_DRAW */
+ H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE30);
+
+ /* Allocate the third block of type H5FD_MEM_SUPER */
+ addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+
+ /* Verify that addr3 is not saddr1 */
+ if(addr3 == saddr1) TEST_ERROR
+
+ if(H5Fclose(file) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED()
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(file);
+ } H5E_END_TRY;
+ return(1);
+} /* test_dichotomy() */
+
+/*
+ *-------------------------------------------------------------------------
+ * set_multi_split():
+ * Internal routine to set up page-aligned address space for multi/split driver
+ * when testing paged aggregation.
+ *-------------------------------------------------------------------------
+ */
+static int
+set_multi_split(hid_t fapl, hsize_t pagesize, hbool_t multi, hbool_t split)
+{
+ H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
+ hid_t memb_fapl_arr[H5FD_MEM_NTYPES];
+ char *memb_name[H5FD_MEM_NTYPES];
+ haddr_t memb_addr[H5FD_MEM_NTYPES];
+ hbool_t relax;
+ H5FD_mem_t mt;
+
+ HDassert(multi || split);
+
+ HDmemset(memb_name, 0, sizeof memb_name);
+
+ /* Get current split settings */
+ if(H5Pget_fapl_multi(fapl, memb_map, memb_fapl_arr, memb_name, memb_addr, &relax) < 0)
+ TEST_ERROR
+
+ if(split) {
+ /* Set memb_addr aligned */
+ memb_addr[H5FD_MEM_SUPER] = ((memb_addr[H5FD_MEM_SUPER] + pagesize - 1) / pagesize) * pagesize;
+ memb_addr[H5FD_MEM_DRAW] = ((memb_addr[H5FD_MEM_DRAW] + pagesize - 1) / pagesize) * pagesize;
+ } else {
+ /* Set memb_addr aligned */
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ memb_addr[mt] = ((memb_addr[mt] + pagesize - 1) / pagesize) * pagesize;
+ } /* end else */
+
+ /* Set multi driver with new FAPLs */
+ if(H5Pset_fapl_multi(fapl, memb_map, memb_fapl_arr, (const char * const *)memb_name, memb_addr, relax) < 0)
+ TEST_ERROR
+
+ /* Free memb_name */
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ free(memb_name[mt]);
+
+ return 0;
+
+error:
+ return(-1);
+} /* set_multi_split() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_page_alloc_xfree
+ *
+ * Purpose: To verify allocations and de-allocations for large/small
+ * sections are done properly when paged aggregation is enabled.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Vailin Choi; Jan 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_page_alloc_xfree(const char *env_h5_drvr, hid_t fapl)
+{
+
+ hid_t fid = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list */
+ hid_t fapl_new = -1; /* File access property list ID */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ haddr_t addr2, addr3; /* Addresses for small meta data blocks */
+ haddr_t saddr1; /* Addresses for small raw data blocks */
+ haddr_t gaddr1; /* Addresses for large data blocks */
+ hbool_t split = FALSE, multi = FALSE;
+ char filename[FILENAME_LEN]; /* Filename to use */
+ haddr_t found_addr; /* Address of the found section */
+ unsigned fs_persist; /* To persist free-space or not */
+
+ TESTING("Paged aggregation for file space: H5MF_alloc/H5MF_xfree");
+
+ /* Check for split or multi driver */
+ if(!HDstrcmp(env_h5_drvr, "split"))
+ split = TRUE;
+ else if(!HDstrcmp(env_h5_drvr, "multi"))
+ multi = TRUE;
+
+ if(!multi && !split) {
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ if((fapl_new = H5Pcopy(fapl)) < 0) TEST_ERROR
+
+ if(multi || split)
+ if(set_multi_split(fapl_new, 4096, multi, split) < 0)
+ TEST_ERROR;
+
+ /* Test with TRUE or FALSE for persisting free-space */
+ for(fs_persist = FALSE; fs_persist <= TRUE; fs_persist++) {
+ H5F_mem_page_t fs_type;
+
+ /* File creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Set the strategy to paged aggregation */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, fs_persist, (hsize_t)1) < 0)
+ TEST_ERROR
+
+ /* Create the file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_new)) < 0)
+ TEST_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ TEST_ERROR
+
+ /* Allocate 3 small meta data blocks: addr1, addr2, addr3 */
+ H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+ addr2 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1034);
+ addr3 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+
+ /* Free the block with addr2 */
+ H5MF_xfree(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE1034);
+
+ if(!fs_persist) {
+
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_OHDR, TBLOCK_SIZE1034, (H5F_mem_page_t *)&fs_type);
+
+ /* Verify that the freed block with addr2 is found from the small meta data manager */
+ if(H5MF_find_sect(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1034, f->shared->fs_man[fs_type], &found_addr) < 0)
+ TEST_ERROR
+ if(found_addr != addr2)
+ TEST_ERROR
+ } /* end if */
+
+ /* Allocate 2 small raw data blocks: saddr1, saddr2 */
+ saddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+ H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE1034);
+
+ /* Free the block with saddr1 */
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE30);
+
+ if(!fs_persist) {
+ /* Verify that the freed block with saddr1 is found from the small raw data manager */
+ if(H5MF_find_sect(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30, f->shared->fs_man[H5F_MEM_PAGE_DRAW], &found_addr) < 0)
+ TEST_ERROR
+ if(found_addr != saddr1)
+ TEST_ERROR
+ } /* end if */
+
+ /* Allocate 2 large data blocks: gaddr1, gaddr2 */
+ gaddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5000);
+ H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE8000);
+
+ /* Free the block with gaddr1 */
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, gaddr1, (hsize_t)TBLOCK_SIZE5000);
+
+ if(!fs_persist) {
+
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_DRAW, TBLOCK_SIZE5000, (H5F_mem_page_t *)&fs_type);
+
+ /* Verify that the freed block with gaddr1 is found from the large data manager */
+ if(H5MF_find_sect(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE8192, f->shared->fs_man[fs_type], &found_addr) < 0)
+ TEST_ERROR
+ if(found_addr != gaddr1)
+ TEST_ERROR
+ } /* end if */
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fcpl) < 0)
+ TEST_ERROR
+
+ if(fs_persist) {
+ /* Re-open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl_new)) < 0)
+ TEST_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ TEST_ERROR
+
+ /* Verify that the large generic manager is there */
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_DRAW, TBLOCK_SIZE5000, (H5F_mem_page_t *)&fs_type);
+ if(!H5F_addr_defined(f->shared->fs_addr[fs_type]))
+ TEST_ERROR
+
+ /* Verify that the small meta data manager is there */
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_OHDR, f->shared->fs_page_size - 1, (H5F_mem_page_t *)&fs_type);
+ if(!H5F_addr_defined(f->shared->fs_addr[fs_type]))
+ TEST_ERROR
+
+ /* Since we are about to open a self referential free space
+ * manager prior to the first file space allocation / deallocation
+ * call H5MF_tidy_self_referential_fsm_hack() first so as to avoid
+ * assertion failures on the first file space alloc / dealloc.
+ */
+ if(f->shared->first_alloc_dealloc){
+ if(SUCCEED!=H5MF_tidy_self_referential_fsm_hack(f,H5AC_ind_read_dxpl_id))
+ FAIL_STACK_ERROR
+ }
+
+ /* Set up to use the small meta data manager */
+ if(!(f->shared->fs_man[fs_type]))
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, fs_type) < 0)
+ TEST_ERROR
+
+ /* Verify that the freed block with addr2 is found from the small meta data manager */
+ if(H5MF_find_sect(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)(f->shared->fs_page_size-(addr3+TBLOCK_SIZE50)), f->shared->fs_man[fs_type], &found_addr) < 0)
+ TEST_ERROR
+
+ if(found_addr != (addr3+TBLOCK_SIZE50))
+ TEST_ERROR
+
+ /* Verify that the small raw data manager is there */
+ if(!H5F_addr_defined(f->shared->fs_addr[H5F_MEM_PAGE_DRAW]))
+ TEST_ERROR
+
+ /* Set up to use the small raw data manager */
+ if(!(f->shared->fs_man[H5F_MEM_PAGE_DRAW]))
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, H5F_MEM_PAGE_DRAW) < 0)
+ TEST_ERROR
+
+ /* Verify that the freed block with saddr1 is found from the small raw data manager */
+ if(H5MF_find_sect(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30, f->shared->fs_man[H5F_MEM_PAGE_DRAW], &found_addr) < 0)
+ TEST_ERROR
+ if(found_addr != saddr1)
+ TEST_ERROR
+
+ H5MF_alloc_to_fs_type(f, H5FD_MEM_DRAW, TBLOCK_SIZE5000, (H5F_mem_page_t *)&fs_type);
+
+ if(!(f->shared->fs_man[fs_type]))
+ /* Set up to use the large data manager */
+ if(H5MF_open_fstype(f, H5AC_ind_read_dxpl_id, fs_type) < 0)
+ TEST_ERROR
+
+ /* Verify that the freed block with gaddr1 is found from the large data manager */
+ if(H5MF_find_sect(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE8192, f->shared->fs_man[fs_type], &found_addr) < 0)
+ TEST_ERROR
+ if(found_addr != gaddr1)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+ } /* end if fs_persist */
+ } /* end for */
+
+ if(H5Pclose(fapl_new) < 0)
+ TEST_ERROR
+
+ PASSED()
+
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support persisting free-space or paged aggregation strategy");
+ }
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Pclose(fcpl);
+ H5Pclose(fapl_new);
+ } H5E_END_TRY;
+ return(1);
+
+} /* test_page_alloc_xfree() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_page_try_shrink
+ *
+ * Purpose: To verify that shrinking via H5MF_try_shrink() work properly
+ * when paged aggregation is enabled.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Vailin Choi; Jan 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_page_try_shrink(const char *env_h5_drvr, hid_t fapl)
+{
+
+ hid_t fid = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ haddr_t addr1; /* Address for small meta data block */
+ haddr_t saddr1; /* Address for small raw data block */
+ haddr_t gaddr1; /* Address for large data block */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ htri_t status; /* status from shrinking */
+ h5_stat_size_t file_size; /* File size */
+ char filename[FILENAME_LEN]; /* Filename to use */
+
+ TESTING("Paged aggregation for file space: H5MF_try_shrink()");
+
+ /* Current VFD that does not support continuous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi") );
+
+ if(contig_addr_vfd) {
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* File creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the strategy to paged aggregation */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ FAIL_STACK_ERROR
+
+ /* Allocate a small meta data block with addr1 */
+ addr1 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+
+ /* Try to shrink the block with addr1 */
+ if((status = H5MF_try_shrink(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE50)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Couldn't shrink due to the section (remaining space in the page) is in the small meta data free-space manager */
+ if(status == TRUE) TEST_ERROR
+
+ /* Allocate a small raw data block with saddr1 */
+ saddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+
+ /* Try to shrink the block with saddr1 */
+ if((status = H5MF_try_shrink(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE50)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Couldn't shrink due to the section (remaining space in the page) is in the small raw data free-space manager */
+ if(status == TRUE) TEST_ERROR
+
+ /* Allocate a large data block with gaddr1 */
+ gaddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5000);
+
+ /* Try to shrink the block with gaddr1 */
+ if((status = H5MF_try_shrink(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, gaddr1, (hsize_t)TBLOCK_SIZE5000)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Couldn't shrink due to the section (remaining space in the page) is in the large-sized free-space manager */
+ if(status == TRUE) TEST_ERROR
+
+ /* Free the block with saddr1--merge to become 1 page, then return to the large manager */
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE50);
+
+ /* Merge all 3 sections and shrunk */
+ H5MF_xfree(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, gaddr1, (hsize_t)TBLOCK_SIZE5000);
+
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename, fapl)) < 0)
+ TEST_ERROR
+
+ /* Should be on page boundary */
+ if(file_size % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED()
+
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support paged aggregation");
+ }
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Pclose(fcpl);
+ } H5E_END_TRY;
+ return(1);
+
+} /* test_page_try_shrink() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_page_small_try_extend
+ *
+ * Purpose: To verify that extending a small block via H5MF_try_extend() works
+ * properly when paged aggregation is enabled.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Vailin Choi; Jan 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_page_small_try_extend(const char *env_h5_drvr, hid_t fapl)
+{
+
+ hid_t fid = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ haddr_t addr1, addr2, addr3; /* Addresses for small meta data blocks */
+ haddr_t saddr1; /* Address for small raw data block */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ htri_t was_extended; /* Whether the block can be extended or not */
+ char filename[FILENAME_LEN]; /* Filename to use */
+
+ TESTING("Paged aggregation for file space: H5MF_try_extend() a small block");
+
+ /* Current VFD that does not support continuous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi") && HDstrcmp(env_h5_drvr, "family"));
+
+ if(contig_addr_vfd) {
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* File creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the strategy to paged aggregation */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ FAIL_STACK_ERROR
+
+ /* Allocate a small meta data block with addr1 */
+ addr1 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE98);
+
+ /* Try extending the block with addr1 at EOF not crossing page boundary */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_OHDR, (haddr_t)addr1, (hsize_t)TBLOCK_SIZE98, (hsize_t)3100);
+ /* Should succeed */
+ if(was_extended != TRUE) TEST_ERROR
+
+ /* Allocate 2 small meta data blocks with addr2 and addr3--will be on another meta data page */
+ addr2 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE100);
+ addr3 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE150);
+
+ /* The block with addr2 should be page aligned */
+ /* The block with addr3 resides right next to the block with addr2 */
+ if(addr2 % TBLOCK_SIZE4096)
+ TEST_ERROR
+ if(addr3 != (addr2 + TBLOCK_SIZE100))
+ TEST_ERROR
+
+ /* Free the block with addr2 */
+ H5MF_xfree(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, addr2, (hsize_t)TBLOCK_SIZE100);
+
+ /* Try extending the block with addr1 that will cross to the next page where the freed block with addr2 resides */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_OHDR, (haddr_t)addr1, (hsize_t)TBLOCK_SIZE3198, (hsize_t)TBLOCK_SIZE100);
+ /* Shouldn't succeed--should not cross page boundary */
+ if(was_extended == TRUE) TEST_ERROR
+
+ /* Try extending the block with addr1 into the free-space section that is big enough to fulfill the request */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_OHDR, (haddr_t)addr1, (hsize_t)TBLOCK_SIZE3198, (hsize_t)TBLOCK_SIZE50);
+ /* Should succeed */
+ if(was_extended != TRUE) TEST_ERROR
+
+ /* Free the block with addr1 */
+ H5MF_xfree(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TBLOCK_SIZE3248);
+
+ /* Allocate a new meta data block with addr1 */
+ /* There is a page end threshold of size H5F_FILE_SPACE_PGEND_META_THRES at the end of the block */
+ /* The block is right next to the threshold */
+ addr1 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3286);
+
+ /* Try extending the block into the threshold with size > H5F_FILE_SPACE_PGEND_META_THRES */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_OHDR, (haddr_t)addr1, (hsize_t)TBLOCK_SIZE3286, (hsize_t)TBLOCK_SIZE11);
+ /* Shouldn't succeed */
+ if(was_extended == TRUE) TEST_ERROR
+
+ /* Try extending the block into the threshold with size < H5F_FILE_SPACE_PGEND_META_THRES */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_OHDR, (haddr_t)addr1, (hsize_t)TBLOCK_SIZE3286, (hsize_t)TBLOCK_SIZE2);
+ /* Should succeed */
+ if(was_extended != TRUE) TEST_ERROR
+
+ /* Free the block with addr3--will merge with the remaining sections to become a page and then free the page */
+ H5MF_xfree(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, addr3, (hsize_t)TBLOCK_SIZE150);
+
+ /* Allocate a small raw data block with saddr1 */
+ saddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4086);
+
+ /* Try extending the block crossing the page boundary */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)saddr1, (hsize_t)TBLOCK_SIZE4086, (hsize_t)TBLOCK_SIZE11);
+ /* Shouldn't succeed */
+ if(was_extended == TRUE) TEST_ERROR
+
+ /* Try extending the block not crossing page boundary */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)saddr1, (hsize_t)TBLOCK_SIZE4086, (hsize_t)TBLOCK_SIZE10);
+ /* Should succeed */
+ if(was_extended != TRUE) TEST_ERROR
+
+ /* The extended block is now "large" in size */
+ /* Try extending the block */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)saddr1, (hsize_t)TBLOCK_SIZE4096, (hsize_t)TBLOCK_SIZE10);
+ /* Should succeed */
+ if(was_extended != TRUE) TEST_ERROR
+
+ /* Try extending the large-sized block */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)saddr1, (hsize_t)TBLOCK_SIZE4106, (hsize_t)TBLOCK_SIZE5000);
+ /* Should not succeed because the mis-aligned fragment in the page is in the large-sized free-space manager */
+ if(was_extended == TRUE) TEST_ERROR
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED()
+
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support paged aggregation");
+ }
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Pclose(fcpl);
+ } H5E_END_TRY;
+ return(1);
+
+} /* test_page_small_try_extend() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_page_large_try_extend
+ *
+ * Purpose: To verify that extending a large block via H5MF_try_extend()
+ * is done properly when paged aggregation is enabled.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Vailin Choi; Jan 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_page_large_try_extend(const char *env_h5_drvr, hid_t fapl)
+{
+
+ hid_t fid = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ haddr_t gaddr1, gaddr2, gaddr3, gaddr4; /* Addresses for large data blocks */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ htri_t was_extended; /* Whether the block can be extended or not */
+ char filename[FILENAME_LEN]; /* Filename to use */
+
+ TESTING("Paged aggregation for file space: H5MF_try_extend() a large block");
+
+ /* Current VFD that does not support continuous address space */
contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
+
if(contig_addr_vfd) {
+
/* Set the filename to use for this test (dependent on fapl) */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+ /* File creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the strategy to paged aggregation */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR
+
/* Create the file to work on */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
FAIL_STACK_ERROR
/* Get a pointer to the internal file object */
- if(NULL == (f = (H5F_t *)H5I_object(file)))
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
FAIL_STACK_ERROR
- /* Allocate the first block of type H5FD_MEM_SUPER */
- type = H5FD_MEM_SUPER;
- addr1 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ /* Allocate a large data block with gaddr1 */
+ gaddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)6000);
- /* Allocate the second block of type H5FD_MEM_SUPER */
- H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE50);
+ /* Should be page aligned */
+ if(gaddr1 % TBLOCK_SIZE4096)
+ TEST_ERROR
- /* Allocate the first block of type H5FD_MEM_DRAW */
- stype = H5FD_MEM_DRAW;
- saddr1 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ /* Extending the block with gaddr1 at EOF to become 2 pages */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)gaddr1, (hsize_t)TBLOCK_SIZE6000, (hsize_t)TBLOCK_SIZE2192);
+ /* Should succeed */
+ if(was_extended != TRUE) TEST_ERROR
+
+ /* Allocate a large data block with gaddr2 */
+ gaddr2 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE8000);
+ /* Should be page aligned */
+ if(gaddr2 % TBLOCK_SIZE4096)
+ TEST_ERROR
- /* Free the first block of type H5FD_MEM_SUPER */
- H5MF_xfree(f, type, H5AC_ind_read_dxpl_id, addr1, (hsize_t)TEST_BLOCK_SIZE30);
+ /* Try extending the block with gaddr1 */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)gaddr1, (hsize_t)TBLOCK_SIZE8192, (hsize_t)TBLOCK_SIZE50);
+ /* Should not succeed */
+ if(was_extended == TRUE) TEST_ERROR
- /* Allocate the second block of type H5FD_MEM_DRAW */
- saddr2 = H5MF_alloc(f, stype, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ /* Allocate a large data block with gaddr3 */
+ gaddr3 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE8000);
+ /* Should be page aligned */
+ if(gaddr3 % TBLOCK_SIZE4096)
+ TEST_ERROR
- /* Verify that saddr1 is not addr1 */
- if(saddr2 == addr1) TEST_ERROR
+ /* Try extending the block with gaddr2--there is a free-space section big enough to fulfill the request */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)gaddr2, (hsize_t)TBLOCK_SIZE8000, (hsize_t)TBLOCK_SIZE100);
+ /* Should succeed */
+ if(was_extended == FALSE) TEST_ERROR
- /* Free the first block of type H5FD_MEM_DRAW */
- H5MF_xfree(f, stype, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TEST_BLOCK_SIZE30);
+ /* Try extending the block with gaddr2--there is no free-space section big enough to fulfill the request */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)gaddr2, (hsize_t)TBLOCK_SIZE8100, (hsize_t)TBLOCK_SIZE100);
+ /* Should not succeed */
+ if(was_extended == TRUE) TEST_ERROR
- /* Allocate the third block of type H5FD_MEM_SUPER */
- addr3 = H5MF_alloc(f, type, H5AC_ind_read_dxpl_id, (hsize_t)TEST_BLOCK_SIZE30);
+ /* Try extending the block with gaddr2--there is a free-space section big enough to fulfill the request */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)gaddr2, (hsize_t)TBLOCK_SIZE8100, (hsize_t)TBLOCK_SIZE90);
+ /* Should succeed */
+ if(was_extended == FALSE) TEST_ERROR
- /* Verify that addr3 is not saddr1 */
- if(addr3 == saddr1) TEST_ERROR
+ /* Try extending the block with gaddr2 */
+ /* There is no free-space section big enough to fulfill the request (request is < H5F_FILE_SPACE_PGEND_META_THRES) */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)gaddr2, (hsize_t)TBLOCK_SIZE8190, (hsize_t)TBLOCK_SIZE5);
+ /* Should not succeed */
+ if(was_extended == TRUE) TEST_ERROR
- if(H5Fclose(file) < 0)
+ /* Allocate a large data block with gaddr4 */
+ gaddr4 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5000);
+ /* Should be page aligned */
+ if(gaddr4 % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Free the block with gaddr3--will merge with remaining free space to become 2 pages + section (size 2) in previous page */
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, gaddr3, (hsize_t)TBLOCK_SIZE8000);
+
+ /* Try extending the block with gaddr2 crossing page boundary--there is free-space section big enough to fulfill the request */
+ was_extended = H5MF_try_extend(f, H5AC_ind_read_dxpl_id, H5FD_MEM_DRAW, (haddr_t)gaddr2, (hsize_t)TBLOCK_SIZE8190, (hsize_t)TBLOCK_SIZE5);
+ /* Should succeed */
+ if(was_extended == FALSE) TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fcpl) < 0)
FAIL_STACK_ERROR
PASSED()
- } /* end if */
- else {
- SKIPPED();
- puts(" Current VFD doesn't support metadata aggregator");
- } /* end else */
+
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support paged aggregation strategy");
+ }
return(0);
error:
H5E_BEGIN_TRY {
- H5Fclose(file);
+ H5Fclose(fid);
+ H5Pclose(fcpl);
} H5E_END_TRY;
return(1);
-} /* test_dichotomy() */
+
+} /* test_page_large_try_extend() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_page_large
+ *
+ * Purpose: To verify that allocations and de-allocations for large data
+ * are done properly when paged aggregation is enabled.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Vailin Choi; Jan 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_page_large(const char *env_h5_drvr, hid_t fapl)
+{
+
+ hid_t fid = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list ID */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ haddr_t gaddr1, gaddr2, gaddr3, gaddr4; /* Addresses for blocks */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ H5FS_stat_t fs_stat; /* Information for free-space manager */
+ h5_stat_size_t file_size; /* File size */
+ char filename[FILENAME_LEN]; /* Filename to use */
+
+ TESTING("Paged aggregation for file space: large allocations and de-allocations");
+
+ /* Current VFD that does not support continuous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
+
+ if(contig_addr_vfd) {
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* File creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the strategy to paged aggregation */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ FAIL_STACK_ERROR
+
+ /* Allocate a large data block with gaddr1 */
+ /* 1 page + 1904 bytes; 2192 bytes in free-space manager */
+ gaddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE6000);
+
+ /* Should be page aligned */
+ if(gaddr1 % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Allocate a large data block with gaddr2--should be on another page */
+ /* Allocate 1 page + 3904 bytes; 192 bytes in free-space manager */
+ gaddr2 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE8000);
+ /* Should be page aligned */
+ if(gaddr2 % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Allocate a large data block with gaddr3--should be on another page */
+ /* Allocate 2 pages + 3808 bytes; 288 bytes in free-space manager */
+ gaddr3 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE12000);
+ if(!H5F_addr_defined(gaddr3))
+ TEST_ERROR
+
+ /* Free the block with gaddr2 */
+ /* Merged sections: 2192 + 8000 + 192 = 10384 */
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, gaddr2, (hsize_t)TBLOCK_SIZE8000);
+
+ /* Get free-space info */
+ if(H5FS_stat_info(f, f->shared->fs_man[H5F_MEM_PAGE_GENERIC], &fs_stat) < 0)
+ FAIL_STACK_ERROR
+
+ /* Verify that the manager contains 2 free-space sections: 10384 and 288 */
+ if(fs_stat.tot_sect_count != 2)
+ TEST_ERROR
+ if(fs_stat.tot_space != (10384+288))
+ TEST_ERROR
+
+ /* Allocate a large data block with gaddr4--there is a free-space section able to fulfill the request */
+ /* Free-space sections: 2192 + 3192 + 288 = 5672 bytes */
+ gaddr4 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5000);
+
+ /* Should be page aligned */
+ if(gaddr4 % TBLOCK_SIZE4096)
+ TEST_ERROR
+ if(gaddr4 != gaddr2)
+ TEST_ERROR
+
+ /* Get free-space info */
+ if(H5FS_stat_info(f, f->shared->fs_man[H5F_MEM_PAGE_GENERIC], &fs_stat) < 0)
+ FAIL_STACK_ERROR
+ /* Verify that that there are 3 free-space sections */
+ if(fs_stat.tot_sect_count != 3)
+ TEST_ERROR
+ if(fs_stat.tot_space != (2192+3192+288))
+ TEST_ERROR
+
+ /* Free the two blocks with gaddr1 and gaddr4 */
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, gaddr1, (hsize_t)TBLOCK_SIZE6000);
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, gaddr4, (hsize_t)TBLOCK_SIZE5000);
+
+ /* Get free-space info */
+ if(H5FS_stat_info(f, f->shared->fs_man[H5F_MEM_PAGE_GENERIC], &fs_stat) < 0)
+ FAIL_STACK_ERROR
+ /* Verify that that there are 2 free-space sections: 16384 (4 pages) + 288 */
+ if(fs_stat.tot_sect_count != 2)
+ TEST_ERROR
+ if(fs_stat.tot_space != (16384+288))
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get the size of the file */
+ if((file_size = h5_get_file_size(filename, fapl)) < 0)
+ TEST_ERROR
+
+ /* Verify that file size end on a page boundary */
+ if(file_size % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ PASSED()
+
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support paged aggregation strategy");
+ }
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ } H5E_END_TRY;
+ return(1);
+
+} /* test_page_large() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_page_small
+ *
+ * Purpose: To verify allocations and de-allocations for small meta/raw data
+ * are done properly when paged aggregation is enabled.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Vailin Choi; Jan 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_page_small(const char *env_h5_drvr, hid_t fapl)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ haddr_t addr2, addr3, addr4, addr5; /* Addresses for blocks */
+ haddr_t addr9, addr10, addr11; /* Address for small meta data blocks */
+ haddr_t saddr1, saddr2; /* Addresses for small raw data blocks */
+ H5FS_stat_t fs_stat; /* Information for free-space manager */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hbool_t multi= FALSE, split = FALSE, family = FALSE;
+
+ TESTING("Paged aggregation for file space: small allocations and de-allocations");
+
+ if(!HDstrcmp(env_h5_drvr, "split"))
+ split = TRUE;
+ else if(!HDstrcmp(env_h5_drvr, "multi"))
+ multi = TRUE;
+ else if(!HDstrcmp(env_h5_drvr, "family"))
+ family = TRUE;
+
+ if(!multi && !split) {
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /* File creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set the strategy to paged aggregation */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR
+
+ /* Create the file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ FAIL_STACK_ERROR
+
+ /* Allocate 2 small meta data blocks: addr1, addr2 */
+ H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+ addr2 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+
+ /* Allocate a small raw data block with saddr1 */
+ saddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+ /* Should be on the second page and page aligned */
+ if(saddr1 % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Allocate a small raw data block with saddr2 */
+ saddr2 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+ /* Should not be page aligned */
+ if(!(saddr2 % TBLOCK_SIZE4096))
+ TEST_ERROR
+ /* Should be next to the block with saddr1 */
+ if(saddr2 != (saddr1 + TBLOCK_SIZE30))
+ TEST_ERROR
+
+ /* Allocate a small meta data block with addr3--there is no free-space section big enough to fulfill the request */
+ addr3 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE4020);
+ /* Should be on the third page and page aligned */
+ if(addr3 % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Allocate a small meta data block with addr4--there is a free-space section big enough to fulfill the request */
+ addr4 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE80);
+ /* Should not be page aligned */
+ if(!(addr4 % TBLOCK_SIZE4096))
+ TEST_ERROR
+ /* Should be next to the block with addr2 */
+ if(addr4 != (addr2 + TBLOCK_SIZE50))
+ TEST_ERROR
+
+ /* Allocate a small meta data block with addr5--there is a free-space section big enough to fulfill the request */
+ addr5 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE40);
+ /* Should not be page aligned */
+ if(!(addr5 % TBLOCK_SIZE4096))
+ TEST_ERROR
+
+ /* Should be next to the block with addr3 */
+ if(addr5 != (addr3 + TBLOCK_SIZE4020))
+ TEST_ERROR
+
+ /* Allocate a small meta data block with addr6--taking up the remaining space in the first page */
+ if(family)
+ H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3080);
+ else
+ H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3088);
+
+ /* Allocate a small meta data block with addr7--taking up the remaining space in the third page */
+ H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE36);
+
+ /* Allocate 2 small meta data blocks: addr8, addr9--there is no free-space to fulfill the request */
+ H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+ addr9 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE80);
+
+ /* Free the block with saddr1 and saddr2--merge with remaining section to become a page which will be returned to the large manager */
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, saddr1, (hsize_t)TBLOCK_SIZE30);
+ H5MF_xfree(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, saddr2, (hsize_t)TBLOCK_SIZE50);
+
+ /* Verify that the large manager does contain a section with file space page size (default is 4096) */
+ if(!f->shared->fs_man[H5F_MEM_PAGE_GENERIC])
+ TEST_ERROR
+ if(H5FS_stat_info(f, f->shared->fs_man[H5F_MEM_PAGE_GENERIC], &fs_stat) < 0)
+ FAIL_STACK_ERROR
+ if(fs_stat.tot_space != TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Allocate a small meta data block with addr10--there is a free-space section big enough to fulfill the request */
+ addr10 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE3900);
+ /* The block should be next to the block with addr9 */
+ if(addr10 != (addr9 + TBLOCK_SIZE80))
+ TEST_ERROR
+
+ /* Allocate a small meta data block with addr11 */
+ /* The current free-space section is unable to fulfill the request; obtain a page from the large manager */
+ addr11 = H5MF_alloc(f, H5FD_MEM_OHDR, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE80);
+ /* The address of the block should be the same the freed block with saddr1 */
+ if(addr11 != saddr1)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ FAIL_STACK_ERROR
+
+ /* Close the property list */
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR
+
+ PASSED()
+
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support paged aggregation strategy");
+ }
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Pclose(fcpl);
+ } H5E_END_TRY;
+ return(1);
+
+} /* test_page_small() */
+
+/*-------------------------------------------------------------------------
+ * Function: test_page_alignment
+ *
+ * Purpose: To verify the proper alignment is used when H5Pset_alignment()
+ * is set and paged aggregation is enabled.
+ *
+ * Return: Success: 0
+ * Failure: number of errors
+ *
+ * Programmer: Vailin Choi; Jan 2013
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_page_alignment(const char *env_h5_drvr, hid_t fapl)
+{
+
+ hid_t fid = -1; /* File ID */
+ hid_t fcpl = -1; /* File creation property list ID */
+ hid_t fcpl2 = -1; /* File creation property list ID */
+ hid_t fapl_new = -1; /* File access property list ID */
+ H5F_t *f = NULL; /* Internal file object pointer */
+ haddr_t addr1, addr2; /* Addresses for small meta data blocks */
+ haddr_t saddr1, saddr2; /* Addresses for small raw data blocks */
+ haddr_t gaddr1, gaddr2; /* Addresses for blocks */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hbool_t split = FALSE, multi = FALSE;
+
+ TESTING("Paged aggregation and H5Pset_alignment: verify proper alignment is used");
+
+ /* Check for split or multi driver */
+ if(!HDstrcmp(env_h5_drvr, "split"))
+ split = TRUE;
+ else if(!HDstrcmp(env_h5_drvr, "multi"))
+ multi = TRUE;
+
+ if(!multi && !split) {
+
+ /* Set the filename to use for this test (dependent on fapl) */
+ h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
+
+ /*
+ * Case 1: Verify that the alignment in use is the default file space
+ * page size when paged aggregation is enabled.
+ */
+
+ if((fapl_new = H5Pcopy(fapl)) < 0) TEST_ERROR
+
+ /* The alignment to use will be the library's default file space page size */
+ if(H5Pset_libver_bounds(fapl_new, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ FAIL_STACK_ERROR
+
+ /* Set alignment value to 16 */
+ if(H5Pset_alignment(fapl_new, (hsize_t)0, (hsize_t)TEST_ALIGN16) < 0)
+ TEST_ERROR
+
+ if(split || multi) {
+ hid_t memb_fapl;
+ H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
+ hid_t memb_fapl_arr[H5FD_MEM_NTYPES];
+ char *memb_name[H5FD_MEM_NTYPES];
+ haddr_t memb_addr[H5FD_MEM_NTYPES];
+ hbool_t relax;
+ H5FD_mem_t mt;
+
+ /* Create fapl */
+ if((memb_fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR
+
+ /* Set alignment */
+ if(H5Pset_alignment(memb_fapl, 0, (hsize_t)TEST_ALIGN16) < 0)
+ TEST_ERROR
+
+ HDmemset(memb_name, 0, sizeof memb_name);
+
+ if(split) {
+ /* Set split driver with new FAPLs */
+ if(H5Pset_fapl_split(fapl_new, "-m.h5", memb_fapl, "-r.h5", memb_fapl) < 0)
+ TEST_ERROR
+
+ /* Get current multi settings */
+ if(H5Pget_fapl_multi(fapl_new, memb_map, memb_fapl_arr, memb_name, memb_addr, &relax) < 0)
+ TEST_ERROR
+
+ /* Set memb_addr aligned */
+ memb_addr[H5FD_MEM_SUPER] = ((memb_addr[H5FD_MEM_SUPER] + TBLOCK_SIZE4096 - 1) / TBLOCK_SIZE4096) * TBLOCK_SIZE4096;
+ memb_addr[H5FD_MEM_DRAW] = ((memb_addr[H5FD_MEM_DRAW] + TBLOCK_SIZE4096 - 1) / TBLOCK_SIZE4096) * TBLOCK_SIZE4096;
+
+ /* Set split driver with new FAPLs */
+ if(H5Pset_fapl_multi(fapl_new, memb_map, memb_fapl_arr, (const char * const *)memb_name, memb_addr, relax) < 0)
+ TEST_ERROR
+
+ } else {
+ /* Get current multi settings */
+ if(H5Pget_fapl_multi(fapl_new, memb_map, NULL, memb_name, memb_addr, &relax) < 0)
+ TEST_ERROR
+
+ /* Populate memb_fapl_arr */
+ /* Set memb_addr aligned */
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt)) {
+ memb_fapl_arr[mt] = memb_fapl;
+ memb_addr[mt] = ((memb_addr[mt] + TBLOCK_SIZE4096 - 1) / TBLOCK_SIZE4096) * TBLOCK_SIZE4096;
+ }
+
+ /* Set multi driver with new FAPLs */
+ if(H5Pset_fapl_multi(fapl_new, memb_map, memb_fapl_arr, (const char * const *)memb_name, memb_addr, relax) < 0)
+ TEST_ERROR
+
+ } /* end else */
+
+ /* Free memb_name */
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ free(memb_name[mt]);
+
+ /* Close memb_fapl */
+ if(H5Pclose(memb_fapl) < 0)
+ TEST_ERROR
+ } /* end if */
+
+ /* File creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Set the strategy to paged aggregation and persisting free space */
+ /* The alignment to use will be the library's default file space page size */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) < 0)
+ TEST_ERROR
+
+ /* Create the file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_new)) < 0)
+ TEST_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ TEST_ERROR
+
+ /* Allocate 2 small raw data blocks */
+ saddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+ saddr2 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+
+ /* Should be on the second page and page aligned on 4096 (default file space page size) */
+ if(saddr1 % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Should be next to the block with saddr1 */
+ if(saddr2 != (saddr1 + TBLOCK_SIZE30))
+ TEST_ERROR
+
+ /* Allocate 2 large raw data blocks */
+ gaddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5000);
+ gaddr2 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE8000);
+
+ /* Should be on the 3rd page and page aligned */
+ if(gaddr1 % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Should be on the 4th page and page aligned */
+ if(gaddr2 % TBLOCK_SIZE4096)
+ TEST_ERROR
+
+ /* Close the file creation property list */
+ if(H5Pclose(fcpl) < 0)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ /*
+ * Case 2: Verify that the alignment in use is the alignment set
+ * via H5Pset_alignment when paged aggregation not enabled.
+ */
+ /* fapl_new has latest format and H5Pset_alignment set */
+ /* Disable small data block mechanism */
+ if(H5Pset_small_data_block_size(fapl_new, (hsize_t)0) < 0)
+ TEST_ERROR
+ /* Disable meta data block mechanism */
+ if(H5Pset_meta_block_size(fapl_new, (hsize_t)0) < 0)
+ TEST_ERROR
+
+ /* Create the file to work on */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl_new)) < 0)
+ TEST_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ TEST_ERROR
+
+ /* Allocate 2 small meta data blocks */
+ addr1 = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+ addr2 = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+
+ /* Should be aligned on 16 */
+ if(addr1 % TEST_ALIGN16 || addr2 % TEST_ALIGN16)
+ TEST_ERROR
+
+ /* addr2 should be right next to the block with addr1 */
+ if((addr1 + TBLOCK_SIZE30) % TEST_ALIGN16)
+ if(addr2 != (((addr1 + TBLOCK_SIZE30) / TEST_ALIGN16) + 1) * TEST_ALIGN16)
+ TEST_ERROR
+
+ /* Allocate 2 small raw data blocks */
+ saddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE80);
+ saddr2 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE100);
+
+ /* Should be aligned on 16 */
+ if(saddr1 % TEST_ALIGN16 || saddr2 % TEST_ALIGN16)
+ TEST_ERROR
+
+ if(!multi && !split) {
+ /* saddr1 should be right next to the block with addr2 */
+ if((addr2 + TBLOCK_SIZE50) % TEST_ALIGN16)
+ if(saddr1 != (((addr2 + TBLOCK_SIZE50) / TEST_ALIGN16) + 1) * TEST_ALIGN16)
+ TEST_ERROR
+ }
+
+ /* saddr2 should be right next to the block with saddr1 */
+ if((saddr1 + TBLOCK_SIZE80) % TEST_ALIGN16)
+ if(saddr2 != (((saddr1 + TBLOCK_SIZE80) / TEST_ALIGN16) + 1) * TEST_ALIGN16)
+ TEST_ERROR
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ /*
+ * Case 3: Verify that the alignment in use is the alignment set
+ * via H5Pset_alignment when paged aggregation not enabled.
+ */
+ /* File creation property list */
+ if((fcpl2 = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR
+
+ /* Set file space page size */
+ if(H5Pset_file_space_page_size(fcpl2, (hsize_t)TBLOCK_SIZE8192) < 0)
+ TEST_ERROR
+
+ /* Set strategy to H5F_FSPACE_STRATEGY_AGGR but meta/raw data block is 0 as set in fapl_new */
+ if(H5Pset_file_space_strategy(fcpl2, H5F_FSPACE_STRATEGY_AGGR, FALSE, (hsize_t)1) < 0)
+ TEST_ERROR
+
+ /* fapl_new has latest format, H5Pset_alignment set, and disable meta/raw block */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl2, fapl_new)) < 0)
+ TEST_ERROR
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(fid)))
+ TEST_ERROR
+
+ /* Allocate 2 small raw data blocks */
+ saddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE30);
+ saddr2 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE50);
+
+ /* Should be aligned on 16 */
+ if(saddr1 % TEST_ALIGN16)
+ TEST_ERROR
+ if(saddr2 % TEST_ALIGN16)
+ TEST_ERROR
+
+ /* saddr2 should be right next to the block with saddr1 */
+ if((saddr1 + TBLOCK_SIZE30) % TEST_ALIGN16)
+ if(saddr2 != (((saddr1 + TBLOCK_SIZE30) / TEST_ALIGN16) + 1) * TEST_ALIGN16)
+ TEST_ERROR
+
+ /* Allocate a large raw data block */
+ gaddr1 = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, (hsize_t)TBLOCK_SIZE5000);
+
+ /* Should be aligned on 16 */
+ if(gaddr1 % TEST_ALIGN16)
+ TEST_ERROR
+
+ /* gaddr1 is right next to the block with saddr2 */
+ if((saddr2 + TBLOCK_SIZE50) % TEST_ALIGN16)
+ if(gaddr1 != (((saddr2 + TBLOCK_SIZE50) / TEST_ALIGN16) + 1) * TEST_ALIGN16)
+ TEST_ERROR
+
+ /* There is no free-space manager involved for H5F_FSPACE_STRATEGY_AGGR strategy */
+ if(f->shared->fs_man[H5FD_MEM_DRAW] || f->shared->fs_man[H5FD_MEM_SUPER])
+ TEST_ERROR
+
+ /* Closing */
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+ if(H5Pclose(fcpl2) < 0)
+ TEST_ERROR
+ if(H5Pclose(fapl_new) < 0)
+ TEST_ERROR
+
+ PASSED()
+
+ } else {
+ SKIPPED();
+ puts(" Current VFD doesn't support persisting free-space or paged aggregation strategy");
+ }
+
+ return(0);
+
+error:
+ H5E_BEGIN_TRY {
+ H5Fclose(fid);
+ H5Pclose(fcpl);
+ H5Pclose(fapl_new);
+ } H5E_END_TRY;
+ return(1);
+
+} /* test_page_alignment() */
int
main(void)
@@ -7625,33 +8867,21 @@ main(void)
/* Make a copy of the FAPL before adjusting the alignment */
if((new_fapl = H5Pcopy(fapl)) < 0) TEST_ERROR
-
- /* alignment is not set for the following tests */
- if(H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1) < 0)
- TEST_ERROR
-
- /* meta/small data is set to 2048 for the following tests */
- if(H5Pset_meta_block_size(fapl, (hsize_t)TEST_BLOCK_SIZE2048) < 0)
- TEST_ERROR
- if(H5Pset_small_data_block_size(fapl, (hsize_t)TEST_BLOCK_SIZE2048) < 0)
- TEST_ERROR
-
- /* interaction with file allocation */
+ /* For old library format--interaction with file allocation */
nerrors += test_mf_eoa(env_h5_drvr, fapl);
nerrors += test_mf_eoa_shrink(env_h5_drvr, fapl);
nerrors += test_mf_eoa_extend(env_h5_drvr, fapl);
- /* interaction with temporary file space allocation */
- nerrors += test_mf_tmp(env_h5_drvr, fapl);
+ /* For old library format */
+ nerrors += test_dichotomy(new_fapl);
- /* interaction with free-space manager */
+ /* For old library format--interaction with free-space manager */
nerrors += test_mf_fs_start(fapl);
nerrors += test_mf_fs_alloc_free(fapl);
nerrors += test_mf_fs_extend(fapl);
nerrors += test_mf_fs_absorb(env_h5_drvr, fapl);
- nerrors += test_dichotomy(env_h5_drvr, new_fapl);
- /* interaction with meta/sdata aggregator */
+ /* For old library format--interaction with meta/sdata aggregator */
nerrors += test_mf_aggr_alloc1(env_h5_drvr, fapl);
nerrors += test_mf_aggr_alloc2(env_h5_drvr, fapl);
nerrors += test_mf_aggr_alloc3(env_h5_drvr, fapl);
@@ -7662,7 +8892,7 @@ main(void)
nerrors += test_mf_aggr_extend(env_h5_drvr, fapl);
nerrors += test_mf_aggr_absorb(env_h5_drvr, fapl);
- /* Tests for alignment */
+ /* For old library format--tests for alignment */
for(curr_test = TEST_NORMAL; curr_test < TEST_NTESTS; H5_INC_ENUM(test_type_t, curr_test)) {
switch(curr_test) {
@@ -7692,11 +8922,54 @@ main(void)
nerrors += test_mf_align_alloc6(env_h5_drvr, fapl, new_fapl);
} /* end for */
- /* tests to verify that file's free-space managers are persistent */
- nerrors += test_mf_fs_drivers(fapl);
+ /* For old and new format--interaction with temporary file space allocation */
+ nerrors += test_mf_tmp(env_h5_drvr, fapl, FALSE);
+ nerrors += test_mf_tmp(env_h5_drvr, fapl, TRUE);
+
+ /* For old and new format--free-space merge/shrunk away */
+
+ /* Temporary: modify to skip testing for multi/split driver:
+ fail file create when persisting free-space or using paged aggregation strategy */
+ nerrors += test_mf_fs_gone(env_h5_drvr, fapl, FALSE);
+ nerrors += test_mf_fs_gone(env_h5_drvr, fapl, TRUE);
+
+ /* Temporary: modify to skip testing multi/split driver:
+ fail file create when persisting free-space or using paged aggregation strategy */
+ nerrors += test_mf_strat_thres_gone(env_h5_drvr, fapl, FALSE);
+ nerrors += test_mf_strat_thres_gone(env_h5_drvr, fapl, TRUE);
- /* tests for file space management */
- nerrors += test_filespace_drivers(fapl);
+ /* For old and new format--persisting free-space */
+
+ /* Temporary: Modify to skip testing for multi/split driver:
+ fail file create when persisting free-space or using paged aggregation strategy */
+ nerrors += test_mf_fs_persist(env_h5_drvr, fapl, FALSE);
+ nerrors += test_mf_fs_persist(env_h5_drvr, fapl, TRUE);
+
+ /* Temporary: modify to skip testing for multi/split driver:
+ fail file create when persisting free-space or using paged aggregation strategy */
+ nerrors += test_mf_strat_thres_persist(env_h5_drvr, fapl, FALSE);
+ nerrors += test_mf_strat_thres_persist(env_h5_drvr, fapl, TRUE);
+
+ /* Temporary skipped for multi/split drivers:
+ fail file create when persisting free-space or using paged aggregation strategy */
+#ifdef PB_OUT
+ /* Tests specific for multi and split files--persisting free-space */
+ nerrors += test_mf_fs_persist_split();
+ nerrors += test_mf_fs_persist_multi();
+#endif
+
+ /*
+ * Tests specific for file space paging
+ */
+ /* Temporary: The following 7 tests are modified to skip testing for multi/split driver:
+ fail file create when persisting free-space or using paged aggregation strategy */
+ nerrors += test_page_small(env_h5_drvr, fapl);
+ nerrors += test_page_large(env_h5_drvr, fapl);
+ nerrors += test_page_large_try_extend(env_h5_drvr, fapl);
+ nerrors += test_page_small_try_extend(env_h5_drvr, fapl);
+ nerrors += test_page_try_shrink(env_h5_drvr, fapl);
+ nerrors += test_page_alloc_xfree(env_h5_drvr, fapl); /* can handle multi/split */
+ nerrors += test_page_alignment(env_h5_drvr, fapl); /* can handle multi/split */
/* tests for specific bugs */
nerrors += test_mf_bug1(env_h5_drvr, fapl);
diff --git a/test/mount.c b/test/mount.c
index 2502bbe..d2c0d21 100644
--- a/test/mount.c
+++ b/test/mount.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/mtime.c b/test/mtime.c
index 88ed31c..38e3960 100644
--- a/test/mtime.c
+++ b/test/mtime.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/none.h5 b/test/none.h5
new file mode 100644
index 0000000..bb6ff56
--- /dev/null
+++ b/test/none.h5
Binary files differ
diff --git a/test/ntypes.c b/test/ntypes.c
index bb6c973..b6f4de7 100644
--- a/test/ntypes.c
+++ b/test/ntypes.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/objcopy.c b/test/objcopy.c
index 4166284..b7f5673 100644
--- a/test/objcopy.c
+++ b/test/objcopy.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -7200,7 +7198,7 @@ test_copy_ext_link(hid_t fcpl_src, hid_t fcpl_dst, hid_t src_fapl, hid_t dst_fap
if(H5Gclose(gid) < 0) TEST_ERROR
/* create file to hold external links to the src file */
- if((fid_ext = H5Fcreate(ext_filename, H5F_ACC_TRUNC, H5P_DEFAULT, src_fapl)) < 0) TEST_ERROR
+ if((fid_ext = H5Fcreate(ext_filename, H5F_ACC_TRUNC, fcpl_src, src_fapl)) < 0) TEST_ERROR
/* create group in the file that will hold the external link */
if((gid = H5Gcreate2(fid_ext, NAME_GROUP_LINK, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
diff --git a/test/ohdr.c b/test/ohdr.c
index 2ac9866..faec835 100644
--- a/test/ohdr.c
+++ b/test/ohdr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Robb Matzke <matzke@llnl.gov>
diff --git a/test/page_buffer.c b/test/page_buffer.c
new file mode 100644
index 0000000..853ef93
--- /dev/null
+++ b/test/page_buffer.c
@@ -0,0 +1,2206 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+*
+* Test program: cache_page_buffer
+*
+* Tests the Page Buffer Feature.
+*
+*************************************************************/
+
+#include "h5test.h"
+
+#include "H5PBprivate.h"
+#include "H5Iprivate.h"
+
+/*
+ * This file needs to access private information from the H5F package.
+ */
+#define H5MF_FRIEND /*suppress error about including H5MFpkg */
+#include "H5MFpkg.h"
+
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#include "H5Fpkg.h"
+
+
+#define FILENAME_LEN 1024
+#define NUM_DSETS 5
+#define NX 100
+#define NY 50
+
+/* helper routines */
+static unsigned create_file(char *filename, hid_t fcpl, hid_t fapl);
+static unsigned open_file(char *filename, hid_t fapl, hsize_t page_size, size_t page_buffer_size);
+
+/* test routines */
+static unsigned test_args(hid_t fapl, const char *env_h5_drvr);
+static unsigned test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr);
+static unsigned test_lru_processing(hid_t orig_fapl, const char *env_h5_drvr);
+static unsigned test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr);
+static unsigned test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr);
+#ifdef H5_HAVE_PARALLEL
+static unsigned verify_page_buffering_disabled(hid_t orig_fapl,
+ const char *env_h5_drvr);
+#endif /* H5_HAVE_PARALLEL */
+
+const char *FILENAME[] = {
+ "filepaged",
+ NULL
+};
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_file()
+ *
+ * Purpose: The purpose of this function appears to be a smoke check
+ * intended to exercise the page buffer.
+ *
+ * Specifically, the function creates a file, and then goes
+ * through a loop in which it creates four data sets, write
+ * data to one of them, verifies the data written, and then
+ * deletes the three that it didn't write to.
+ *
+ * Any data mis-matches or failures reported by the HDF5
+ * library result in test failure.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: unknown
+ * ?? / ?? / ??
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+create_file(char *filename, hid_t fcpl, hid_t fapl)
+{
+ hid_t file_id = -1;
+ hid_t dset_id = -1;
+ hid_t grp_id = -1;
+ hid_t filespace = -1;
+ hsize_t dimsf[2] = {NX, NY}; /* dataset dimensions */
+ int *data = NULL; /* pointer to data buffer to write */
+ hid_t dcpl = -1;
+ int i;
+ int num_elements;
+ int j;
+ char dset_name[10];
+
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ if((grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ num_elements = NX * NY;
+ if((data = (int *)HDcalloc((size_t)num_elements, sizeof(int))) == NULL)
+ TEST_ERROR
+ for (i=0; i < (int)num_elements; i++)
+ data[i] = i;
+
+ if((filespace = H5Screate_simple(2, dimsf, NULL)) < 0)
+ FAIL_STACK_ERROR;
+
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY) < 0)
+ FAIL_STACK_ERROR;
+
+ for(i=0 ; i<NUM_DSETS; i++) {
+
+ HDsprintf(dset_name, "D1dset%d", i);
+ if((dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, filespace,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(dset_id) < 0)
+ FAIL_STACK_ERROR;
+
+ HDsprintf(dset_name, "D2dset%d", i);
+ if((dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, filespace,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(dset_id) < 0)
+ FAIL_STACK_ERROR;
+
+ HDsprintf(dset_name, "D3dset%d", i);
+ if((dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, filespace,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(dset_id) < 0)
+ FAIL_STACK_ERROR;
+
+ HDsprintf(dset_name, "dset%d", i);
+ if((dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, filespace,
+ H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Dwrite(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(dset_id) < 0)
+ FAIL_STACK_ERROR;
+
+ HDmemset(data, 0, (size_t)num_elements * sizeof(int));
+ if((dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Dclose(dset_id) < 0)
+ FAIL_STACK_ERROR;
+
+ for (j=0; j < num_elements; j++) {
+ if(data[j] != j) {
+ HDfprintf(stderr, "Read different values than written\n");
+ FAIL_STACK_ERROR;
+ }
+ }
+
+ HDsprintf(dset_name, "D1dset%d", i);
+ if(H5Ldelete(grp_id, dset_name, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+ HDsprintf(dset_name, "D2dset%d", i);
+ if(H5Ldelete(grp_id, dset_name, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+ HDsprintf(dset_name, "D3dset%d", i);
+ if(H5Ldelete(grp_id, dset_name, H5P_DEFAULT) < 0)
+ FAIL_STACK_ERROR;
+ }
+
+ if(H5Gclose(grp_id) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(dcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Sclose(filespace) < 0)
+ FAIL_STACK_ERROR;
+
+ HDfree(data);
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Sclose(filespace);
+ H5Gclose(grp_id);
+ H5Fclose(file_id);
+ if(data)
+ HDfree(data);
+ } H5E_END_TRY;
+ return(1);
+} /* create_file */
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_file()
+ *
+ * Purpose: The purpose of this function appears to be a smoke check
+ * intended to exercise the page buffer.
+ *
+ * Specifically, the function opens a file (created by
+ * create_file()?), and verify the contents of its datasets.
+ *
+ * Any data mis-matches or failures reported by the HDF5
+ * library result in test failure.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: unknown
+ * ?? / ?? / ??
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+open_file(char *filename, hid_t fapl, hsize_t page_size,
+ size_t page_buffer_size)
+{
+ hid_t file_id = -1;
+ hid_t dset_id = -1;
+ hid_t grp_id = -1;
+ int *data = NULL; /* pointer to data buffer to write */
+ int i;
+ int j;
+ int num_elements;
+ char dset_name[10];
+ H5F_t *f = NULL;
+
+ if((file_id = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf == NULL)
+ FAIL_STACK_ERROR;
+ if(f->shared->page_buf->page_size != page_size)
+ FAIL_STACK_ERROR;
+ if(f->shared->page_buf->max_size != page_buffer_size)
+ FAIL_STACK_ERROR;
+
+ if((grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ num_elements = NX * NY;
+ if((data = (int *)HDcalloc((size_t)num_elements, sizeof(int))) == NULL)
+ TEST_ERROR
+
+ for(i=0 ; i<NUM_DSETS; i++) {
+
+ HDsprintf(dset_name, "dset%d", i);
+ if((dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Dread(dset_id, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Dclose(dset_id) < 0)
+ FAIL_STACK_ERROR;
+
+ for (j=0; j < num_elements; j++) {
+ if(data[j] != j) {
+ HDfprintf(stderr, "Read different values than written\n");
+ FAIL_STACK_ERROR;
+ }
+ }
+ }
+
+ if(H5Gclose(grp_id) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+ HDfree(data);
+
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Gclose(grp_id);
+ H5Fclose(file_id);
+ if(data)
+ HDfree(data);
+ } H5E_END_TRY;
+ return 1;
+}
+
+/*
+ *
+ * set_multi_split():
+ * Internal routine to set up page-aligned address space for multi/split driver
+ * when testing paged aggregation.
+ *
+ */
+static unsigned
+set_multi_split(const char *env_h5_drvr, hid_t fapl, hsize_t pagesize)
+{
+ hbool_t split = FALSE;
+ hbool_t multi = FALSE;
+ H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
+ hid_t memb_fapl_arr[H5FD_MEM_NTYPES];
+ char *memb_name[H5FD_MEM_NTYPES];
+ haddr_t memb_addr[H5FD_MEM_NTYPES];
+ hbool_t relax;
+ H5FD_mem_t mt;
+
+ /* Check for split or multi driver */
+ if(!HDstrcmp(env_h5_drvr, "split"))
+ split = TRUE;
+ else if(!HDstrcmp(env_h5_drvr, "multi"))
+ multi = TRUE;
+
+ if(split || multi) {
+
+ HDmemset(memb_name, 0, sizeof memb_name);
+
+ /* Get current split settings */
+ if(H5Pget_fapl_multi(fapl, memb_map, memb_fapl_arr, memb_name, memb_addr, &relax) < 0)
+ TEST_ERROR
+
+ if(split) {
+ /* Set memb_addr aligned */
+ memb_addr[H5FD_MEM_SUPER] = ((memb_addr[H5FD_MEM_SUPER] + pagesize - 1) / pagesize) * pagesize;
+ memb_addr[H5FD_MEM_DRAW] = ((memb_addr[H5FD_MEM_DRAW] + pagesize - 1) / pagesize) * pagesize;
+ } else {
+ /* Set memb_addr aligned */
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ memb_addr[mt] = ((memb_addr[mt] + pagesize - 1) / pagesize) * pagesize;
+ } /* end else */
+
+ /* Set multi driver with new FAPLs */
+ if(H5Pset_fapl_multi(fapl, memb_map, memb_fapl_arr, (const char * const *)memb_name, memb_addr, relax) < 0)
+ TEST_ERROR
+
+ /* Free memb_name */
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ free(memb_name[mt]);
+
+ } /* end if */
+
+ return 0;
+
+error:
+ return 1;
+
+} /* set_multi_split() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_args()
+ *
+ * Purpose: This test appears to be a quick smoke check directed at:
+ *
+ * 1) verifying that API errors are caught.
+ *
+ * 2) verifying that the page buffer behaves more or less
+ * as advertized.
+ *
+ * Any data mis-matches or unexpected failures or successes
+ * reported by the HDF5 library result in test failure.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: unknown
+ * ?? / ?? / ??
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_args(hid_t orig_fapl, const char *env_h5_drvr)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl = -1;
+ hid_t fapl = -1;
+ herr_t ret;
+
+ TESTING("Settings for Page Buffering");
+
+ h5_fixname(FILENAME[0], orig_fapl, filename, sizeof(filename));
+
+ if((fapl = H5Pcopy(orig_fapl)) < 0) TEST_ERROR
+
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR;
+
+
+ /* Test setting a page buffer without Paged Aggregation enabled -
+ * should fail
+ */
+ if(H5Pset_page_buffer_size(fapl, 512, 0, 0) < 0)
+ TEST_ERROR;
+
+ H5E_BEGIN_TRY {
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ } H5E_END_TRY;
+
+ if(file_id >= 0)
+ TEST_ERROR;
+
+
+ /* Test setting a page buffer with a size smaller than a single
+ * page size - should fail
+ */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, 512) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_page_buffer_size(fapl, 511, 0, 0) < 0)
+ TEST_ERROR;
+
+ H5E_BEGIN_TRY {
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ } H5E_END_TRY;
+
+ if(file_id >= 0)
+ TEST_ERROR;
+
+
+ /* Test setting a page buffer with sum of min meta and raw
+ * data percentage > 100 - should fail
+ */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_page_buffer_size(fapl, 512, 50, 51);
+ } H5E_END_TRY;
+
+ if(ret >= 0)
+ TEST_ERROR;
+
+ if(set_multi_split(env_h5_drvr, fapl, 512) != 0)
+ TEST_ERROR;
+
+ /* Test setting a page buffer with a size equal to a single page size */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, 512) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_page_buffer_size(fapl, 512, 0, 0) < 0)
+ TEST_ERROR;
+
+ if(create_file(filename, fcpl, fapl) != 0)
+ TEST_ERROR;
+
+ if(open_file(filename, fapl, 512, 512) != 0)
+ TEST_ERROR;
+
+
+ /* Test setting a page buffer with a size slightly larger than a
+ * single page size
+ */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, 512) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_page_buffer_size(fapl, 513, 0, 0) < 0)
+ TEST_ERROR;
+
+ if(create_file(filename, fcpl, fapl) != 0)
+ TEST_ERROR;
+
+ if(open_file(filename, fapl, 512, 512) != 0)
+ TEST_ERROR;
+
+ if(set_multi_split(env_h5_drvr, fapl, 4194304) != 0)
+ TEST_ERROR;
+
+
+ /* Test setting a large page buffer size and page size */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, 4194304) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_page_buffer_size(fapl, 16777216, 0, 0) < 0)
+ TEST_ERROR;
+
+ if(create_file(filename, fcpl, fapl) != 0)
+ TEST_ERROR;
+
+ if(open_file(filename, fapl, 4194304, 16777216) != 0)
+ TEST_ERROR;
+
+ if(set_multi_split(env_h5_drvr, fapl, 1) != 0)
+ TEST_ERROR;
+
+
+ /* Test setting a 512 byte page buffer size and page size */
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, 512) < 0)
+ TEST_ERROR;
+ if(H5Pset_page_buffer_size(fapl, 512, 0, 0) < 0)
+ TEST_ERROR;
+ if(create_file(filename, fcpl, fapl) != 0)
+ TEST_ERROR;
+ if(open_file(filename, fapl, 512, 512) != 0)
+ TEST_ERROR;
+
+
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED()
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(fcpl);
+ } H5E_END_TRY;
+ return 1;
+} /* test_args */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_raw_data_handling()
+ *
+ * Purpose: The purpose of this function appears to be a smoke check
+ * of raw data reads and writes via the page buffer.
+ *
+ * Any data mis-matches or failures reported by the HDF5
+ * library result in test failure.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: unknown
+ * ?? / ?? / ??
+ *
+ * Changes: Added base_page_cnt field as supporting code. This allows
+ * the test to adjust to the number of page buffer pages
+ * accessed during file open / create.
+ *
+ * The test for the value of base_page_cnt just after file
+ * open exists detect changes in library behavior. Assuming
+ * any such change is not indicative of other issues, these
+ * tests can be modified to reflect the change.
+ *
+ * JRM -- 2/23/17
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/* Changes due to file space page size has a minimum size of 512 */
+static unsigned
+test_raw_data_handling(hid_t orig_fapl, const char *env_h5_drvr)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl = -1;
+ hid_t fapl = -1;
+ H5FD_io_info_t fdio_info;
+ H5P_genplist_t *meta_plist = (H5P_genplist_t *)H5I_object(H5AC_ind_read_dxpl_id);
+ H5P_genplist_t *raw_plist = (H5P_genplist_t *)H5I_object(H5AC_rawdata_dxpl_id);
+ size_t base_page_cnt;
+ size_t page_count = 0;
+ int i, num_elements = 2000;
+ haddr_t addr = HADDR_UNDEF;
+ int *data = NULL;
+ H5F_t *f = NULL;
+
+ TESTING("Raw Data Handling");
+
+ h5_fixname(FILENAME[0], orig_fapl, filename, sizeof(filename));
+
+ if((fapl = H5Pcopy(orig_fapl)) < 0)
+ TEST_ERROR
+
+ if(set_multi_split(env_h5_drvr, fapl, sizeof(int)*200) != 0)
+ TEST_ERROR;
+
+ if((data = (int *)HDcalloc((size_t)num_elements, sizeof(int))) == NULL)
+ TEST_ERROR;
+
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR;
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ TEST_ERROR;
+ if(H5Pset_file_space_page_size(fcpl, sizeof(int)*200) < 0)
+ TEST_ERROR;
+ if(H5Pset_page_buffer_size(fapl, sizeof(int)*2000, 0, 0) < 0)
+ TEST_ERROR;
+
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ /* opening the file inserts one or more pages into the page buffer.
+ * Get the number of pages inserted, and verify that it is the
+ * the expected value.
+ */
+ base_page_cnt = H5SL_count(f->shared->page_buf->slist_ptr);
+ if(base_page_cnt != 1)
+ TEST_ERROR;
+
+ /* allocate space for a 2000 elements */
+ if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ /* intialize all the elements to have a value of -1 */
+ for(i=0 ; i<num_elements ; i++)
+ data[i] = -1;
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ /* update the first 50 elements to have values 0-49 - this will be
+ a page buffer update with 1 page resulting in the page
+ buffer. */
+ /* Changes: 100 */
+ for(i=0 ; i<100 ; i++)
+ data[i] = i;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr, sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ page_count ++;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ FAIL_STACK_ERROR;
+
+ /* update elements 300 - 450, with values 300 - - this will
+ bring two more pages into the page buffer. */
+ for(i=0 ; i<150 ; i++)
+ data[i] = i+300;
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr+(sizeof(int)*300), sizeof(int)*150, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ page_count += 2;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ FAIL_STACK_ERROR;
+
+ /* update elements 100 - 300, this will go to disk but also update
+ existing pages in the page buffer. */
+ for(i=0 ; i<200 ; i++)
+ data[i] = i+100;
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr+(sizeof(int)*100), sizeof(int)*200, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ FAIL_STACK_ERROR;
+
+ /* Update elements 225-300 - this will update an existing page in the PB */
+ /* Changes: 450 - 600; 150 */
+ for(i=0 ; i<150 ; i++)
+ data[i] = i+450;
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr+(sizeof(int)*450), sizeof(int)*150, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ FAIL_STACK_ERROR;
+
+ /* Do a full page write to block 600-800 - should bypass the PB */
+ for(i=0 ; i<200 ; i++)
+ data[i] = i+600;
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr+(sizeof(int)*600), sizeof(int)*200, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ FAIL_STACK_ERROR;
+
+ /* read elements 800 - 1200, this should not affect the PB, and should read -1s */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, addr+(sizeof(int)*800), sizeof(int)*400, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ for (i=0; i < 400; i++) {
+ if(data[i] != -1) {
+ HDfprintf(stderr, "Read different values than written\n");
+ FAIL_STACK_ERROR;
+ }
+ }
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ FAIL_STACK_ERROR;
+
+ /* read elements 1200 - 1201, this should read -1 and bring in an
+ * entire page of addr 1200
+ */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, addr+(sizeof(int)*1200), sizeof(int)*1, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ for (i=0; i < 1; i++) {
+ if(data[i] != -1) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ }
+ }
+ page_count ++;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ TEST_ERROR;
+
+ /* read elements 175 - 225, this should use the PB existing pages */
+ /* Changes: 350 - 450 */
+ /* read elements 175 - 225, this should use the PB existing pages */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, addr+(sizeof(int)*350), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ for (i=0; i < 100; i++) {
+ if(data[i] != i+350) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ }
+ }
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ TEST_ERROR;
+
+ /* read elements 0 - 800 using the VFD.. this should result in -1s
+ except for the writes that went through the PB (100-300 & 600-800) */
+ fdio_info.file = f->shared->lf;
+ fdio_info.meta_dxpl = meta_plist;
+ fdio_info.raw_dxpl = raw_plist;
+ if(H5FD_read(&fdio_info, H5FD_MEM_DRAW, addr, sizeof(int)*800, data) < 0)
+ FAIL_STACK_ERROR;
+ i = 0;
+ while (i < 800) {
+ if((i>=100 && i<300) || (i>=600)) {
+ if(data[i] != i) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ }
+ }
+ else {
+ if(data[i] != -1) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ }
+ }
+ i++;
+ }
+
+ /* read elements 0 - 800 using the PB.. this should result in all
+ * what we have written so far and should get the updates from the PB
+ */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, addr, sizeof(int)*800, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ TEST_ERROR;
+ for (i=0; i < 800; i++) {
+ if(data[i] != i) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ }
+ }
+
+ /* update elements 400 - 1400 to value 0, this will go to disk but
+ * also evict existing pages from the PB (page 400 & 1200 that are
+ * existing).
+ */
+ for(i=0 ; i<1000 ; i++)
+ data[i] = 0;
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr+(sizeof(int)*400), sizeof(int)*1000, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ page_count -= 2;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ TEST_ERROR;
+
+ /* read elements 0 - 1000.. this should go to disk then update the
+ * buffer result 200-400 with existing pages
+ */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, addr, sizeof(int)*1000, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ i=0;
+ while (i < 1000) {
+ if(i<400) {
+ if(data[i] != i) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ }
+ }
+ else {
+ if(data[i] != 0) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ }
+ }
+ i++;
+ }
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ TEST_ERROR;
+
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ HDfree(data);
+
+ PASSED()
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(fcpl);
+ H5Fclose(file_id);
+ if(data)
+ HDfree(data);
+ } H5E_END_TRY;
+ return 1;
+} /* test_raw_data_handling */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_lru_processing()
+ *
+ * Purpose: Basic set of tests verifying expected page buffer LRU
+ * management.
+ *
+ * Any data mis-matches or failures reported by the HDF5
+ * library result in test failure.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: unknown
+ * ?? / ?? / ??
+ *
+ * Changes: Added base_page_cnt field as supporting code. This allows
+ * the test to adjust to the number of page buffer pages
+ * accessed during file open / create.
+ *
+ * The test for the value of base_page_cnt just after file
+ * open exists detect changes in library behavior. Assuming
+ * any such change is not indicative of other issues, these
+ * tests can be modified to reflect the change.
+ *
+ * JRM -- 2/23/17
+ *
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+test_lru_processing(hid_t orig_fapl, const char *env_h5_drvr)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl = -1;
+ hid_t fapl = -1;
+ size_t base_page_cnt;
+ size_t page_count = 0;
+ int i;
+ int num_elements = 2000;
+ haddr_t addr = HADDR_UNDEF;
+ haddr_t search_addr = HADDR_UNDEF;
+ int *data = NULL;
+ H5F_t *f = NULL;
+
+ TESTING("LRU Processing");
+
+ h5_fixname(FILENAME[0], orig_fapl, filename, sizeof(filename));
+
+ if((fapl = H5Pcopy(orig_fapl)) < 0)
+ FAIL_STACK_ERROR
+
+ if(set_multi_split(env_h5_drvr, fapl, sizeof(int)*200) != 0)
+ TEST_ERROR;
+
+ if((data = (int *)HDcalloc((size_t)num_elements, sizeof(int))) == NULL)
+ TEST_ERROR;
+
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, sizeof(int)*200) < 0)
+ FAIL_STACK_ERROR;
+
+ /* keep 2 pages at max in the page buffer */
+ if(H5Pset_page_buffer_size(fapl, sizeof(int)*400, 20, 0) < 0)
+ FAIL_STACK_ERROR;
+
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ /* opening the file inserts one or more pages into the page buffer.
+ * Get the number of pages inserted, and verify that it is the
+ * the expected value.
+ */
+ base_page_cnt = H5SL_count(f->shared->page_buf->slist_ptr);
+ if(base_page_cnt != 1)
+ TEST_ERROR;
+
+ /* allocate space for a 2000 elements */
+ if(HADDR_UNDEF == (addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ /* intialize all the elements to have a value of -1 */
+ for(i=0 ; i<num_elements ; i++)
+ data[i] = -1;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ /* update the first 100 elements to have values 0-99 - this will be
+ * a page buffer update with 1 page resulting in the page
+ * buffer.
+ */
+ for(i=0 ; i<100 ; i++)
+ data[i] = i;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr, sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ page_count ++;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count + base_page_cnt)
+ TEST_ERROR;
+
+ /* update elements 300 - 450, with values 300 - 449 - this will
+ * bring two pages into the page buffer and evict 0.
+ */
+ for(i=0 ; i<150 ; i++)
+ data[i] = i+300;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr+(sizeof(int)*300), sizeof(int)*150, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ page_count = 2;
+
+ /* at this point, the page buffer entry created at file open should
+ * have been evicted -- thus no further need to consider base_page_cnt.
+ */
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ /* The two pages should be the ones with address 100 and 200; 0
+ should have been evicted */
+ /* Changes: 200, 400 */
+ search_addr = addr;
+ if(NULL != H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+ search_addr = addr + sizeof(int)*200;
+ if(NULL == H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+ search_addr = addr + sizeof(int)*400;
+ if(NULL == H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+
+ /* update elements 150-151, this will update existing pages in the
+ page buffer and move it to the top of the LRU. */
+ /* Changes: 300 - 301 */
+ for(i=0 ; i<1 ; i++)
+ data[i] = i+300;
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr+(sizeof(int)*300), sizeof(int)*1, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ /* read elements 600 - 601, this should read -1 and bring in an
+ entire page of addr 600, and evict page 200 */
+ /* Changes: 1200 - 1201; 1200, 400 */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, addr+(sizeof(int)*1200), sizeof(int)*1, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ for (i=0; i < 1; i++) {
+ if(data[i] != -1) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ } /* end if */
+ } /* end for */
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ /* Changes: 400 */
+ search_addr = addr + sizeof(int)*400;
+ if(NULL != H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+
+ /* Changes: 200 */
+ search_addr = addr + sizeof(int)*200;
+ if(NULL == H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+
+ /* Changes: 1200 */
+ search_addr = addr + sizeof(int)*1200;
+ if(NULL == H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+ /* read elements 175 - 225, this should move 100 to the top, evict 600 and bring in 200 */
+ /* Changes: 350 - 450; 200, 1200, 400 */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, addr+(sizeof(int)*350), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ for (i=0; i < 100; i++) {
+ if(data[i] != i+350) {
+ HDfprintf(stderr, "Read different values than written\n");
+ TEST_ERROR;
+ } /* end if */
+ } /* end for */
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ /* Changes: 1200 */
+ search_addr = addr + sizeof(int)*1200;
+ if(NULL != H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+
+ /* Changes: 200 */
+ search_addr = addr + sizeof(int)*200;
+ if(NULL == H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+
+ /* Changes: 400 */
+ search_addr = addr + sizeof(int)*400;
+ if(NULL == H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+
+ /* update elements 200 - 700 to value 0, this will go to disk but
+ also discarding existing pages from the PB (page 200). */
+ /* Changes: 400 - 1400; 400 */
+ for(i=0 ; i<1000 ; i++)
+ data[i] = 0;
+ if(H5F_block_write(f, H5FD_MEM_DRAW, addr+(sizeof(int)*400), sizeof(int)*1000, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+ page_count -= 1;
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ /* Changes: 200 */
+ search_addr = addr + sizeof(int)*200;
+ if(NULL == H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+
+ /* Changes: 400 */
+ search_addr = addr + sizeof(int)*400;
+ if(NULL != H5SL_search(f->shared->page_buf->slist_ptr, &(search_addr)))
+ FAIL_STACK_ERROR;
+
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ HDfree(data);
+
+ PASSED()
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(fcpl);
+ H5Fclose(file_id);
+ if(data)
+ HDfree(data);
+ } H5E_END_TRY;
+ return 1;
+} /* test_lru_processing */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_min_threshold()
+ *
+ * Purpose: Tests verifying observation of minimum and maximum
+ * raw and metadata page counts in the page buffer.
+ *
+ * Any data mis-matches or failures reported by the HDF5
+ * library result in test failure.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: unknown
+ * ?? / ?? / ??
+ *
+ * Changes: Added the base_raw_cnt and base_meta_cnt fields and
+ * supporting code. This allows the test to adjust to the
+ * number of page buffer pages accessed during file open /
+ * create.
+ *
+ * The tests for the values of base_raw_cnt and base_meta_cnt
+ * just after file open exist detect changes in library
+ * behavior. Assuming any such change is not indicative of
+ * other issues, these tests can be modified to reflect the
+ * change.
+ *
+ * JRM -- 2/23/17
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+test_min_threshold(hid_t orig_fapl, const char *env_h5_drvr)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl = -1;
+ hid_t fapl = -1;
+ size_t base_raw_cnt = 0;
+ size_t base_meta_cnt = 0;
+ size_t page_count = 0;
+ int i;
+ int num_elements = 1000;
+ H5PB_t *page_buf;
+ haddr_t meta_addr = HADDR_UNDEF;
+ haddr_t raw_addr = HADDR_UNDEF;
+ int *data = NULL;
+ H5F_t *f = NULL;
+
+ TESTING("Minimum Metadata threshold Processing");
+ HDprintf("\n");
+ h5_fixname(FILENAME[0], orig_fapl, filename, sizeof(filename));
+
+ if((fapl = H5Pcopy(orig_fapl)) < 0)
+ TEST_ERROR
+
+ if(set_multi_split(env_h5_drvr, fapl, sizeof(int)*200) != 0)
+ TEST_ERROR;
+
+ if((data = (int *)HDcalloc((size_t)num_elements, sizeof(int))) == NULL)
+ TEST_ERROR;
+
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, sizeof(int)*200) < 0)
+ FAIL_STACK_ERROR;
+
+ HDprintf("\tMinimum metadata threshold = 100%%\n");
+
+ /* keep 5 pages at max in the page buffer and 5 meta page minimum */
+ if(H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 100, 0) < 0)
+ FAIL_STACK_ERROR;
+
+ /* create the file */
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ /* opening the file inserts one or more pages into the page buffer.
+ * Get the raw and meta counts now, so we can adjust tests accordingly.
+ */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->page_buf);
+
+ base_raw_cnt = f->shared->page_buf->raw_count;
+ base_meta_cnt = f->shared->page_buf->meta_count;
+
+ if(base_raw_cnt != 0)
+ TEST_ERROR;
+
+ if(base_meta_cnt != 1)
+ TEST_ERROR;
+
+ page_buf = f->shared->page_buf;
+
+ if(page_buf->min_meta_count != 5)
+ TEST_ERROR;
+
+ if(page_buf->min_raw_count != 0)
+ TEST_ERROR;
+
+ if(HADDR_UNDEF == (meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ if(HADDR_UNDEF == (raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ /* write all raw data, this would end up in page buffer since there
+ * is no metadata yet
+ *
+ * Not necessarily -- opening the file may may load a metadata page.
+ */
+ for(i=0 ; i<100 ; i++)
+ data[i] = i;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ page_count += 5;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(page_buf->raw_count != 5 - base_meta_cnt)
+ TEST_ERROR;
+
+ /* write all meta data, this would end up in page buffer */
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*200), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*400), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*600), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*800), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(page_buf->meta_count != 5)
+ TEST_ERROR;
+
+ if(page_buf->raw_count != 0)
+ TEST_ERROR;
+
+ /* write and read more raw data and make sure that they don't end up in
+ * page buffer since the minimum metadata is actually the entire
+ * page buffer
+ */
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*350), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*500), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*750), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*900), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(page_buf->meta_count != 5)
+ TEST_ERROR;
+
+ if(page_buf->raw_count != 0)
+ TEST_ERROR;
+
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+
+
+ HDprintf("\tMinimum raw data threshold = 100%%\n");
+
+ page_count = 0;
+
+ /* keep 5 pages at max in the page buffer and 5 raw page minimum */
+ /* Changes: 1000 */
+ if(H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 0, 100) < 0)
+ TEST_ERROR;
+
+ /* create the file */
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ /* opening the file inserts one or more pages into the page buffer.
+ * Get the raw and meta counts now, so we can adjust tests accordingly.
+ */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->page_buf);
+
+ base_raw_cnt = f->shared->page_buf->raw_count;
+ base_meta_cnt = f->shared->page_buf->meta_count;
+
+ if(base_raw_cnt != 0)
+ TEST_ERROR;
+
+ if(base_meta_cnt != 1)
+ TEST_ERROR;
+
+ page_buf = f->shared->page_buf;
+
+ if(page_buf->min_meta_count != 0)
+ TEST_ERROR;
+ if(page_buf->min_raw_count != 5)
+ FAIL_STACK_ERROR;
+
+ if(HADDR_UNDEF == (meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ if(HADDR_UNDEF == (raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ TEST_ERROR;
+
+ /* write all meta data, this would end up in page buffer since there
+ * is no raw data yet
+ */
+ for(i=0 ; i<100 ; i++)
+ data[i] = i;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ page_count += 5;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+ if(page_buf->meta_count != 5 - base_raw_cnt)
+ TEST_ERROR;
+
+ /* write/read all raw data, this would end up in page buffer */
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(page_buf->raw_count != 5)
+ TEST_ERROR;
+
+ if(page_buf->meta_count != 0)
+ TEST_ERROR;
+
+ /* write and read more meta data and make sure that they don't end up in
+ * page buffer since the minimum metadata is actually the entire
+ * page buffer
+ */
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*350), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*500), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*750), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*900), sizeof(int)*50, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(page_buf->raw_count != 5)
+ TEST_ERROR;
+
+ if(page_buf->meta_count != 0)
+ TEST_ERROR;
+
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+
+
+ HDprintf("\tMinimum metadata threshold = 40%%, Minimum rawdata threshold = 40%%\n");
+ page_count = 0;
+ /* keep 5 pages at max in the page buffer 2 meta pages, 2 raw pages
+ * minimum
+ */
+ if(H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 40, 40) < 0)
+ TEST_ERROR;
+
+ /* create the file */
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ /* opening the file inserts one or more pages into the page buffer.
+ *
+ * However, with the current 1 metadata page inserted into the
+ * the page buffer, it is not necessary to track the base raw and
+ * metadata entry counts.
+ */
+
+ if(base_raw_cnt != 0)
+ TEST_ERROR;
+
+ if(base_meta_cnt != 1)
+ TEST_ERROR;
+ page_buf = f->shared->page_buf;
+
+ if(page_buf->min_meta_count != 2)
+ TEST_ERROR;
+
+ if(page_buf->min_raw_count != 2)
+ TEST_ERROR;
+
+ if(HADDR_UNDEF == (meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ if(HADDR_UNDEF == (raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ /* intialize all the elements to have a value of -1 */
+ for(i=0 ; i<num_elements ; i++)
+ data[i] = -1;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ /* fill the page buffer with raw data */
+ for(i=0 ; i<100 ; i++)
+ data[i] = i;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ page_count += 5;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 5 - base_meta_cnt)
+ TEST_ERROR;
+
+ /* add 3 meta entries evicting 3 raw entries */
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->meta_count != 3)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 2)
+ TEST_ERROR;
+
+ /* adding more meta entires should replace meta entries since raw data
+ * is at its minimum
+ */
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->meta_count != 3)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 2)
+ TEST_ERROR;
+
+ /* bring existing raw entires up the LRU */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*750), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ /* adding 2 raw entries (even with 1 call) should only evict 1 meta
+ * entry and another raw entry
+ */
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*350), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->meta_count != 2)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 3)
+ TEST_ERROR;
+
+ /* adding 2 meta entries should replace 2 entires at the bottom of the LRU */
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*98), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*242), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->meta_count != 2)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 3)
+ TEST_ERROR;
+
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+
+ HDprintf("\tMinimum metadata threshold = 20%%\n");
+ page_count = 0;
+ /* keep 5 pages at max in the page buffer and 1 meta page minimum */
+ if(H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 39, 0) < 0)
+ TEST_ERROR;
+ /* create the file */
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file_id)))
+ FAIL_STACK_ERROR;
+ page_buf = f->shared->page_buf;
+
+ if(page_buf->min_meta_count != 1)
+ TEST_ERROR;
+ if(page_buf->min_raw_count != 0)
+ TEST_ERROR;
+
+ if(HADDR_UNDEF == (meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ if(HADDR_UNDEF == (raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ /* intialize all the elements to have a value of -1 */
+ for(i=0 ; i<num_elements ; i++)
+ data[i] = -1;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ /* fill the page buffer with raw data */
+ for(i=0 ; i<100 ; i++)
+ data[i] = i;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ page_count += 5;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ /* add 2 meta entries evicting 2 raw entries */
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->meta_count != 2)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 3)
+ TEST_ERROR;
+
+ /* bring the rest of the raw entries up the LRU */
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*500), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*700), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*900), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ /* write one more raw entry which replace one meta entry */
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->meta_count != 1)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 4)
+ TEST_ERROR;
+
+ /* write one more raw entry which should replace another raw entry
+ * keeping min threshold of meta entries
+ */
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*300), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->meta_count != 1)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 4)
+ TEST_ERROR;
+
+ /* write a metadata entry that should replace the metadata entry
+ * at the bottom of the LRU
+ */
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*500), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5SL_count(f->shared->page_buf->slist_ptr) != page_count)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->meta_count != 1)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->raw_count != 4)
+ TEST_ERROR;
+
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ HDfree(data);
+
+ PASSED()
+
+ return 0;
+
+error:
+
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(fcpl);
+ H5Fclose(file_id);
+ if(data)
+ HDfree(data);
+ } H5E_END_TRY;
+
+ return 1;
+
+} /* test_min_threshold */
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_stats_collection()
+ *
+ * Purpose: Tests verifying correct collection of statistics
+ * by the page buffer.
+ *
+ * Any data mis-matches or failures reported by the HDF5
+ * library result in test failure.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: unknown
+ * ?? / ?? / ??
+ *
+ * Changes: Added the base_raw_cnt and base_meta_cnt fields and
+ * supporting code. This allows the test to adjust to the
+ * number of page buffer pages accessed during file open /
+ * create.
+ *
+ * The tests for the values of base_raw_cnt and base_meta_cnt
+ * just after file open exist detect changes in library
+ * behavior. Assuming any such change is not indicative of
+ * other issues, these tests can be modified to reflect the
+ * change.
+ *
+ * JRM -- 2/23/17
+ *
+ *-------------------------------------------------------------------------
+ */
+static unsigned
+test_stats_collection(hid_t orig_fapl, const char *env_h5_drvr)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl = -1;
+ hid_t fapl = -1;
+ int i;
+ int num_elements = 1000;
+ size_t base_raw_cnt = 0;
+ size_t base_meta_cnt = 0;
+ haddr_t meta_addr = HADDR_UNDEF;
+ haddr_t raw_addr = HADDR_UNDEF;
+ int *data = NULL;
+ H5F_t *f = NULL;
+
+ TESTING("Statistics Collection");
+
+ h5_fixname(FILENAME[0], orig_fapl, filename, sizeof(filename));
+
+ if((fapl = H5Pcopy(orig_fapl)) < 0)
+ TEST_ERROR
+
+ if(set_multi_split(env_h5_drvr, fapl, sizeof(int)*200) != 0)
+ TEST_ERROR;
+
+ if((data = (int *)HDcalloc((size_t)num_elements, sizeof(int))) == NULL)
+ TEST_ERROR
+
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ TEST_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, sizeof(int)*200) < 0)
+ TEST_ERROR;
+
+ /* keep 5 pages at max in the page buffer */
+ if(H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 20, 0) < 0)
+ TEST_ERROR;
+
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* Get a pointer to the internal file object */
+ if(NULL == (f = (H5F_t *)H5I_object(file_id)))
+ FAIL_STACK_ERROR;
+
+ /* opening the file inserts one or more pages into the page buffer.
+ * Get the raw and meta counts now, so we can adjust the expected
+ * statistics accordingly.
+ */
+ HDassert(f);
+ HDassert(f->shared);
+ HDassert(f->shared->page_buf);
+
+ base_raw_cnt = f->shared->page_buf->raw_count;
+ base_meta_cnt = f->shared->page_buf->meta_count;
+
+ if(base_raw_cnt != 0)
+ TEST_ERROR;
+
+ if(base_meta_cnt != 1)
+ TEST_ERROR;
+
+ /* reset statistics before we begin the tests */
+ if(H5Freset_page_buffering_stats(file_id) < 0)
+ FAIL_STACK_ERROR;
+
+ if(HADDR_UNDEF == (meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+ if(HADDR_UNDEF == (raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements)))
+ FAIL_STACK_ERROR;
+
+
+ /* intialize all the elements to have a value of -1 */
+ for(i=0 ; i<num_elements ; i++)
+ data[i] = -1;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ for(i=0 ; i<200 ; i++)
+ data[i] = i;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*500), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*700), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*900), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*400), sizeof(int)*200, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*300), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*800), sizeof(int)*182, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*200), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*600), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_rawdata_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*400), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*600), sizeof(int)*200, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5F_block_read(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*800), sizeof(int)*100, H5AC_ind_read_dxpl_id, data) < 0)
+ FAIL_STACK_ERROR;
+
+ if(f->shared->page_buf->accesses[0] != 8)
+ TEST_ERROR;
+ if(f->shared->page_buf->accesses[1] != 16)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->bypasses[0] != 3)
+ TEST_ERROR;
+ if(f->shared->page_buf->bypasses[1] != 1)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->hits[0] != 0)
+ TEST_ERROR;
+ if(f->shared->page_buf->hits[1] != 4)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->misses[0] != 8)
+ TEST_ERROR;
+ if(f->shared->page_buf->misses[1] != 11)
+ TEST_ERROR;
+
+ if(f->shared->page_buf->evictions[0] != 5 + base_meta_cnt)
+ TEST_ERROR;
+ if(f->shared->page_buf->evictions[1] != 9 + base_raw_cnt)
+ TEST_ERROR;
+
+ {
+ unsigned accesses[2];
+ unsigned hits[2];
+ unsigned misses[2];
+ unsigned evictions[2];
+ unsigned bypasses[2];
+
+ if(H5Fget_page_buffering_stats(file_id, accesses, hits, misses, evictions, bypasses) < 0)
+ FAIL_STACK_ERROR;
+
+ if(accesses[0] != 8)
+ TEST_ERROR;
+ if(accesses[1] != 16)
+ TEST_ERROR;
+ if(bypasses[0] != 3)
+ TEST_ERROR;
+ if(bypasses[1] != 1)
+ TEST_ERROR;
+ if(hits[0] != 0)
+ TEST_ERROR;
+ if(hits[1] != 4)
+ TEST_ERROR;
+ if(misses[0] != 8)
+ TEST_ERROR;
+ if(misses[1] != 11)
+ TEST_ERROR;
+ if(evictions[0] != 5 + base_meta_cnt)
+ TEST_ERROR;
+ if(evictions[1] != 9 + base_raw_cnt)
+ TEST_ERROR;
+
+ if(H5Freset_page_buffering_stats(file_id) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Fget_page_buffering_stats(file_id, accesses, hits, misses, evictions, bypasses) < 0)
+ FAIL_STACK_ERROR;
+
+ if(accesses[0] != 0)
+ TEST_ERROR;
+ if(accesses[1] != 0)
+ TEST_ERROR;
+ if(bypasses[0] != 0)
+ TEST_ERROR;
+ if(bypasses[1] != 0)
+ TEST_ERROR;
+ if(hits[0] != 0)
+ TEST_ERROR;
+ if(hits[1] != 0)
+ TEST_ERROR;
+ if(misses[0] != 0)
+ TEST_ERROR;
+ if(misses[1] != 0)
+ TEST_ERROR;
+ if(evictions[0] != 0)
+ TEST_ERROR;
+ if(evictions[1] != 0)
+ TEST_ERROR;
+ } /* end block */
+
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR;
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+ HDfree(data);
+
+
+ PASSED()
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(fcpl);
+ H5Fclose(file_id);
+ if(data)
+ HDfree(data);
+ } H5E_END_TRY;
+
+ return 1;
+} /* test_stats_collection */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_page_buffering_disabled()
+ *
+ * Purpose: This function should only be called in parallel
+ * builds.
+ *
+ * At present, page buffering should be disabled in parallel
+ * builds. Verify this.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: John Mainzer
+ * 03/21/17
+ *
+ * Changes: None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+#ifdef H5_HAVE_PARALLEL
+static unsigned
+verify_page_buffering_disabled(hid_t orig_fapl, const char *env_h5_drvr)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl = -1;
+ hid_t fapl = -1;
+
+ TESTING("Page Buffering Disabled");
+ h5_fixname(FILENAME[0], orig_fapl, filename, sizeof(filename));
+
+
+ /* first, try to create a file with page buffering enabled */
+
+ if((fapl = H5Pcopy(orig_fapl)) < 0)
+ TEST_ERROR
+
+ if(set_multi_split(env_h5_drvr, fapl, 4096) != 0)
+ TEST_ERROR;
+
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, 4096) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_page_buffer_size(fapl, 4096*8, 0, 0) < 0)
+ FAIL_STACK_ERROR;
+
+ /* try to create the file -- should fail */
+ H5E_BEGIN_TRY {
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ } H5E_END_TRY;
+
+ if(file_id >= 0)
+ TEST_ERROR;
+
+ /* now, create a file, close it, and then try to open it with page
+ * buffering enabled.
+ */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pset_file_space_page_size(fcpl, 4096) < 0)
+ FAIL_STACK_ERROR;
+
+ /* create the file */
+ if((file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0)
+ FAIL_STACK_ERROR;
+
+ /* close the file */
+ if(H5Fclose(file_id) < 0)
+ FAIL_STACK_ERROR;
+
+ /* try to open the file using the fapl prepared above which enables
+ * page buffering. Should fail.
+ */
+ H5E_BEGIN_TRY {
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ } H5E_END_TRY;
+
+ if(file_id >= 0)
+ TEST_ERROR;
+
+ if(H5Pclose(fcpl) < 0)
+ FAIL_STACK_ERROR;
+
+ if(H5Pclose(fapl) < 0)
+ FAIL_STACK_ERROR;
+
+ PASSED()
+
+ return 0;
+
+error:
+
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Pclose(fcpl);
+ H5Fclose(file_id);
+ } H5E_END_TRY;
+
+ return 1;
+
+} /* verify_page_buffering_disabled() */
+#endif /* H5_HAVE_PARALLEL */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main()
+ *
+ * Purpose: Main function for the page buffer tests.
+ *
+ * Return: 0 if test is sucessful
+ * 1 if test fails
+ *
+ * Programmer: unknown
+ * ?? / ?? / ??
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ hid_t fapl = -1; /* File access property list for data files */
+ unsigned nerrors = 0; /* Cumulative error count */
+ const char *env_h5_drvr = NULL; /* File Driver value from environment */
+
+ h5_reset();
+
+ /* Get the VFD to use */
+ env_h5_drvr = HDgetenv("HDF5_DRIVER");
+ if(env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+
+ /* Temporary skip testing with multi/split drivers:
+ * Page buffering depends on paged aggregation which is
+ * currently disabled for multi/split drivers.
+ */
+ if((0 == HDstrcmp(env_h5_drvr, "multi")) ||
+ (0 == HDstrcmp(env_h5_drvr, "split"))) {
+
+ SKIPPED()
+ HDputs("Skip page buffering test because paged aggregation is disabled for multi/split drivers");
+ HDexit(EXIT_SUCCESS);
+ } /* end if */
+
+ if((fapl = h5_fileaccess()) < 0) {
+ nerrors++;
+ PUTS_ERROR("Can't get VFD-dependent fapl")
+ } /* end if */
+
+#ifdef H5_HAVE_PARALLEL
+
+ HDputs("Page Buffering is disabled for parallel.");
+ nerrors += verify_page_buffering_disabled(fapl, env_h5_drvr);
+
+#else /* H5_HAVE_PARALLEL */
+
+ nerrors += test_args(fapl, env_h5_drvr);
+ nerrors += test_raw_data_handling(fapl, env_h5_drvr);
+ nerrors += test_lru_processing(fapl, env_h5_drvr);
+ nerrors += test_min_threshold(fapl, env_h5_drvr);
+ nerrors += test_stats_collection(fapl, env_h5_drvr);
+
+#endif /* H5_HAVE_PARALLEL */
+
+ h5_clean_files(FILENAME, fapl);
+
+ if(nerrors)
+ goto error;
+
+ HDputs("All Page Buffering tests passed.");
+
+ HDexit(EXIT_SUCCESS);
+
+error:
+
+ HDprintf("***** %d Page Buffering TEST%s FAILED! *****\n",
+ nerrors, nerrors > 1 ? "S" : "");
+
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ } H5E_END_TRY;
+
+ HDexit(EXIT_FAILURE);
+
+} /* main() */
+
diff --git a/tools/test/h5diff/testfiles/h5diff_dset_idx1.h5 b/test/paged_nopersist.h5
index 3252303..b6c945a 100644
--- a/tools/test/h5diff/testfiles/h5diff_dset_idx1.h5
+++ b/test/paged_nopersist.h5
Binary files differ
diff --git a/test/paged_persist.h5 b/test/paged_persist.h5
new file mode 100644
index 0000000..f0ae836
--- /dev/null
+++ b/test/paged_persist.h5
Binary files differ
diff --git a/test/plugin.c b/test/plugin.c
index a70e885..8b4324d 100644
--- a/test/plugin.c
+++ b/test/plugin.c
@@ -4,32 +4,32 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer: Raymond Lu
- * 13 February 2013
+ * Programmer: Raymond Lu
+ * 13 February 2013
*
- * Purpose: Tests the plugin module (H5PL)
+ * Purpose: Tests the plugin module (H5PL)
*/
#include "h5test.h"
#include "H5srcdir.h"
/*
- * This file needs to access private datatypes from the H5Z package.
+ * This file needs to access private datatypes from the H5Z and H5PL package.
*/
+#define H5PL_FRIEND
+#include "H5PLpkg.h"
#define H5Z_FRIEND
#include "H5Zpkg.h"
/* Filters for HDF5 internal test */
#define H5Z_FILTER_DYNLIB1 257
-#define H5Z_FILTER_DYNLIB2 258
+#define H5Z_FILTER_DYNLIB2 258
#define H5Z_FILTER_DYNLIB3 259
#define H5Z_FILTER_DYNLIB4 260
@@ -40,10 +40,10 @@ const char *FILENAME[] = {
#define FILENAME_BUF_SIZE 1024
/* Dataset names for testing filters */
-#define DSET_DEFLATE_NAME "deflate"
-#define DSET_DYNLIB1_NAME "dynlib1"
-#define DSET_DYNLIB2_NAME "dynlib2"
-#define DSET_DYNLIB4_NAME "dynlib4"
+#define DSET_DEFLATE_NAME "deflate"
+#define DSET_DYNLIB1_NAME "dynlib1"
+#define DSET_DYNLIB2_NAME "dynlib2"
+#define DSET_DYNLIB4_NAME "dynlib4"
/* Parameters for internal filter test */
#define FILTER_CHUNK_DIM1 2
@@ -62,65 +62,61 @@ const char *FILENAME[] = {
#define GROUP_ITERATION 1000
-int points_deflate[DSET_DIM1][DSET_DIM2],
- points_dynlib1[DSET_DIM1][DSET_DIM2],
- points_dynlib2[DSET_DIM1][DSET_DIM2],
- points_dynlib4[DSET_DIM1][DSET_DIM2],
- points_bzip2[DSET_DIM1][DSET_DIM2];
+int points_deflate[DSET_DIM1][DSET_DIM2],
+ points_dynlib1[DSET_DIM1][DSET_DIM2],
+ points_dynlib2[DSET_DIM1][DSET_DIM2],
+ points_dynlib4[DSET_DIM1][DSET_DIM2],
+ points_bzip2[DSET_DIM1][DSET_DIM2];
/*-------------------------------------------------------------------------
- * Function: test_filter_internal
+ * Function: test_filter_internal
*
- * Purpose: Tests writing entire data and partial data with filters
- *
- * Return: Success: 0
- * Failure: -1
- *
- * Programmer: Raymond Lu
- * 27 February 2013
+ * Purpose: Tests writing entire data and partial data with filters
*
+ * Return: Success: 0
+ * Failure: -1
*-------------------------------------------------------------------------
*/
static herr_t
test_filter_internal(hid_t fid, const char *name, hid_t dcpl)
{
- hid_t dataset; /* Dataset ID */
- hid_t dxpl; /* Dataset xfer property list ID */
- hid_t write_dxpl; /* Dataset xfer property list ID for writing */
- hid_t sid; /* Dataspace ID */
- const hsize_t size[2] = {DSET_DIM1, DSET_DIM2}; /* Dataspace dimensions */
- const hsize_t hs_offset[2] = {FILTER_HS_OFFSET1, FILTER_HS_OFFSET2}; /* Hyperslab offset */
- const hsize_t hs_size[2] = {FILTER_HS_SIZE1, FILTER_HS_SIZE2}; /* Hyperslab size */
- void *tconv_buf = NULL; /* Temporary conversion buffer */
- int points[DSET_DIM1][DSET_DIM2], check[DSET_DIM1][DSET_DIM2];
- size_t i, j; /* Local index variables */
- int n = 0;
+ herr_t ret_value = -1;
+ hid_t dataset = -1; /* Dataset ID */
+ hid_t dxpl = -1; /* Dataset xfer property list ID */
+ hid_t write_dxpl = -1; /* Dataset xfer property list ID for writing */
+ hid_t sid = -1; /* Dataspace ID */
+ const hsize_t size[2] = {DSET_DIM1, DSET_DIM2}; /* Dataspace dimensions */
+ const hsize_t hs_offset[2] = {FILTER_HS_OFFSET1, FILTER_HS_OFFSET2}; /* Hyperslab offset */
+ const hsize_t hs_size[2] = {FILTER_HS_SIZE1, FILTER_HS_SIZE2}; /* Hyperslab size */
+ void *tconv_buf = NULL; /* Temporary conversion buffer */
+ int points[DSET_DIM1][DSET_DIM2], check[DSET_DIM1][DSET_DIM2];
+ size_t i, j; /* Local index variables */
+ int n = 0;
/* Create the data space */
- if((sid = H5Screate_simple(2, size, NULL)) < 0) goto error;
+ if((sid = H5Screate_simple(2, size, NULL)) < 0) TEST_ERROR
/*
* Create a small conversion buffer to test strip mining. We
* might as well test all we can!
*/
- if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) goto error;
+ if((dxpl = H5Pcreate(H5P_DATASET_XFER)) < 0) TEST_ERROR
tconv_buf = HDmalloc((size_t)1000);
- if(H5Pset_buffer(dxpl, (size_t)1000, tconv_buf, NULL) < 0) goto error;
+ if(H5Pset_buffer(dxpl, (size_t)1000, tconv_buf, NULL) < 0) TEST_ERROR
if((write_dxpl = H5Pcopy(dxpl)) < 0) TEST_ERROR;
TESTING(" filters (setup)");
/* Check if all the filters are available */
- if(H5Pall_filters_avail(dcpl)!=TRUE) {
+ if(H5Pall_filters_avail(dcpl) != TRUE) {
H5_FAILED();
- printf(" Line %d: Incorrect filter availability\n",__LINE__);
- goto error;
+ HDprintf(" Line %d: Incorrect filter availability\n", __LINE__);
+ TEST_ERROR
} /* end if */
/* Create the dataset */
- if((dataset = H5Dcreate2(fid, name, H5T_NATIVE_INT, sid, H5P_DEFAULT,
- dcpl, H5P_DEFAULT)) < 0) goto error;
+ if((dataset = H5Dcreate2(fid, name, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0) TEST_ERROR
PASSED();
@@ -130,20 +126,16 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl)
*/
TESTING(" filters (uninitialized read)");
- if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0)
- TEST_ERROR;
-
- for(i=0; i<(size_t)size[0]; i++) {
- for(j=0; j<(size_t)size[1]; j++) {
- if(0!=check[i][j]) {
- H5_FAILED();
- printf(" Read a non-zero value.\n");
- printf(" At index %lu,%lu\n",
- (unsigned long)i, (unsigned long)j);
- goto error;
- }
- }
- }
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0) TEST_ERROR;
+
+ for(i=0; i<(size_t)size[0]; i++)
+ for(j=0; j<(size_t)size[1]; j++)
+ if(0 != check[i][j]) {
+ H5_FAILED();
+ HDprintf(" Read a non-zero value.\n");
+ HDprintf(" At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
+ TEST_ERROR
+ } /* end if */
PASSED();
/*----------------------------------------------------------------------
@@ -154,14 +146,11 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl)
TESTING(" filters (write)");
n = 0;
- for(i=0; i<size[0]; i++) {
- for(j=0; j<size[1]; j++) {
- points[i][j] = (int)(n++);
- }
- }
+ for(i=0; i<size[0]; i++)
+ for(j=0; j<size[1]; j++)
+ points[i][j] = (int)(n++);
- if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, points) < 0)
- TEST_ERROR;
+ if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, points) < 0) TEST_ERROR;
PASSED();
@@ -172,22 +161,19 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl)
TESTING(" filters (read)");
/* Read the dataset back */
- if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0)
- TEST_ERROR;
-
- /* Check that the values read are the same as the values written */
- for(i=0; i<size[0]; i++) {
- for(j=0; j<size[1]; j++) {
- if(points[i][j] != check[i][j]) {
- H5_FAILED();
- fprintf(stderr," Read different values than written.\n");
- fprintf(stderr," At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
- fprintf(stderr," At original: %d\n", (int)points[i][j]);
- fprintf(stderr," At returned: %d\n", (int)check[i][j]);
- goto error;
- }
- }
- }
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0) TEST_ERROR;
+
+ /* Check that the values read are the same as the values written */
+ for(i=0; i<size[0]; i++)
+ for(j=0; j<size[1]; j++)
+ if(points[i][j] != check[i][j]) {
+ H5_FAILED();
+ HDfprintf(stderr," Read different values than written.\n");
+ HDfprintf(stderr," At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
+ HDfprintf(stderr," At original: %d\n", (int)points[i][j]);
+ HDfprintf(stderr," At returned: %d\n", (int)check[i][j]);
+ TEST_ERROR
+ } /* end if */
PASSED();
@@ -200,30 +186,24 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl)
*/
TESTING(" filters (modify)");
- for(i=0; i<size[0]; i++) {
- for(j=0; j<size[1]/2; j++) {
- points[i][j] = (int)HDrandom () % RANDOM_LIMIT;
- }
- }
- if(H5Dwrite (dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, points) < 0)
- TEST_ERROR;
-
- /* Read the dataset back and check it */
- if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0)
- TEST_ERROR;
-
- /* Check that the values read are the same as the values written */
- for(i=0; i<size[0]; i++) {
- for(j=0; j<size[1]; j++) {
- if(points[i][j] != check[i][j]) {
- H5_FAILED();
- printf(" Read different values than written.\n");
- printf(" At index %lu,%lu\n",
- (unsigned long)i, (unsigned long)j);
- goto error;
- }
- }
- }
+ for(i=0; i<size[0]; i++)
+ for(j=0; j<size[1]/2; j++)
+ points[i][j] = (int)HDrandom () % RANDOM_LIMIT;
+
+ if(H5Dwrite(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, write_dxpl, points) < 0) TEST_ERROR;
+
+ /* Read the dataset back and check it */
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0) TEST_ERROR;
+
+ /* Check that the values read are the same as the values written */
+ for(i=0; i<size[0]; i++)
+ for(j=0; j<size[1]; j++)
+ if(points[i][j] != check[i][j]) {
+ H5_FAILED();
+ HDprintf(" Read different values than written.\n");
+ HDprintf(" At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
+ TEST_ERROR
+ } /* end if */
PASSED();
@@ -238,19 +218,17 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl)
if(H5Dclose(dataset) < 0) TEST_ERROR;
if((dataset = H5Dopen2(fid, name, H5P_DEFAULT)) < 0) TEST_ERROR;
- if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0)
- TEST_ERROR;
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, dxpl, check) < 0) TEST_ERROR;
- /* Check that the values read are the same as the values written */
- for(i = 0; i < size[0]; i++)
- for(j = 0; j < size[1]; j++)
- if(points[i][j] != check[i][j]) {
- H5_FAILED();
- printf(" Read different values than written.\n");
- printf(" At index %lu,%lu\n",
- (unsigned long)i, (unsigned long)j);
- goto error;
- } /* end if */
+ /* Check that the values read are the same as the values written */
+ for(i = 0; i < size[0]; i++)
+ for(j = 0; j < size[1]; j++)
+ if(points[i][j] != check[i][j]) {
+ H5_FAILED();
+ HDprintf(" Read different values than written.\n");
+ HDprintf(" At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
+ TEST_ERROR
+ } /* end if */
PASSED();
@@ -262,249 +240,228 @@ test_filter_internal(hid_t fid, const char *name, hid_t dcpl)
*/
TESTING(" filters (partial I/O)");
- for(i=0; i<(size_t)hs_size[0]; i++) {
- for(j=0; j<(size_t)hs_size[1]; j++) {
- points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j] = (int)HDrandom() % RANDOM_LIMIT;
- }
- }
- if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size,
- NULL) < 0) TEST_ERROR;
+ for(i=0; i<(size_t)hs_size[0]; i++)
+ for(j=0; j<(size_t)hs_size[1]; j++)
+ points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j] = (int)HDrandom() % RANDOM_LIMIT;
+
+ if(H5Sselect_hyperslab(sid, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL) < 0) TEST_ERROR;
/* (Use the "read" DXPL because partial I/O on corrupted data test needs to ignore errors during writing) */
- if(H5Dwrite (dataset, H5T_NATIVE_INT, sid, sid, dxpl, points) < 0)
- TEST_ERROR;
-
- if(H5Dread (dataset, H5T_NATIVE_INT, sid, sid, dxpl, check) < 0)
- TEST_ERROR;
-
- /* Check that the values read are the same as the values written */
- for(i=0; i<(size_t)hs_size[0]; i++) {
- for(j=0; j<(size_t)hs_size[1]; j++) {
- if(points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j] !=
- check[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]) {
- H5_FAILED();
- fprintf(stderr," Read different values than written.\n");
- fprintf(stderr," At index %lu,%lu\n",
- (unsigned long)((size_t)hs_offset[0]+i),
- (unsigned long)((size_t)hs_offset[1]+j));
- fprintf(stderr," At original: %d\n",
- (int)points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]);
- fprintf(stderr," At returned: %d\n",
- (int)check[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]);
- goto error;
- }
- }
- }
+ if(H5Dwrite(dataset, H5T_NATIVE_INT, sid, sid, dxpl, points) < 0) TEST_ERROR;
+
+ if(H5Dread(dataset, H5T_NATIVE_INT, sid, sid, dxpl, check) < 0) TEST_ERROR;
+
+ /* Check that the values read are the same as the values written */
+ for(i=0; i<(size_t)hs_size[0]; i++)
+ for(j=0; j<(size_t)hs_size[1]; j++)
+ if(points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j] != check[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]) {
+ H5_FAILED();
+ HDfprintf(stderr," Read different values than written.\n");
+ HDfprintf(stderr," At index %lu,%lu\n", (unsigned long)((size_t)hs_offset[0]+i), (unsigned long)((size_t)hs_offset[1]+j));
+ HDfprintf(stderr," At original: %d\n", (int)points[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]);
+ HDfprintf(stderr," At returned: %d\n", (int)check[(size_t)hs_offset[0]+i][(size_t)hs_offset[1]+j]);
+ TEST_ERROR
+ } /* end if */
PASSED();
- /* Save the data written to the file for later comparison when the file
+ /* Save the data written to the file for later comparison when the file
* is reopened for read test */
- for(i=0; i<size[0]; i++) {
- for(j=0; j<size[1]; j++) {
- if(!HDstrcmp(name, DSET_DEFLATE_NAME)) {
- points_deflate[i][j] = points[i][j];
- } else if(!HDstrcmp(name, DSET_DYNLIB1_NAME)) {
- points_dynlib1[i][j] = points[i][j];
- } else if(!HDstrcmp(name, DSET_DYNLIB2_NAME)) {
- points_dynlib2[i][j] = points[i][j];
- } else if(!HDstrcmp(name, DSET_DYNLIB4_NAME)) {
- points_dynlib4[i][j] = points[i][j];
- }
- }
- }
+ for(i=0; i<size[0]; i++)
+ for(j=0; j<size[1]; j++)
+ if(!HDstrcmp(name, DSET_DEFLATE_NAME))
+ points_deflate[i][j] = points[i][j];
+ else if(!HDstrcmp(name, DSET_DYNLIB1_NAME))
+ points_dynlib1[i][j] = points[i][j];
+ else if(!HDstrcmp(name, DSET_DYNLIB2_NAME))
+ points_dynlib2[i][j] = points[i][j];
+ else if(!HDstrcmp(name, DSET_DYNLIB4_NAME))
+ points_dynlib4[i][j] = points[i][j];
+
+ ret_value = 0;
+error:
/* Clean up objects used for this test */
- if(H5Dclose (dataset) < 0) goto error;
- if(H5Sclose (sid) < 0) goto error;
- if(H5Pclose (dxpl) < 0) goto error;
- free (tconv_buf);
-
- return(0);
+ H5E_BEGIN_TRY {
+ H5Dclose(dataset);
+ H5Sclose(sid);
+ H5Pclose(dxpl);
+ } H5E_END_TRY
-error:
if(tconv_buf)
- free (tconv_buf);
- return -1;
+ HDfree(tconv_buf);
+ return ret_value;
}
/*-------------------------------------------------------------------------
- * Function: test_filters_for_datasets
- *
- * Purpose: Tests creating datasets and writing data with dynamically
- * loaded filters
+ * Function: test_filters_for_datasets
*
- * Return: Success: 0
- * Failure: -1
- *
- * Programmer: Raymond Lu
- * 14 March 2013
+ * Purpose: Tests creating datasets and writing data with dynamically loaded filters
*
+ * Return: Success: 0
+ * Failure: -1
*-------------------------------------------------------------------------
*/
static herr_t
test_filters_for_datasets(hid_t file)
{
- hid_t dc; /* Dataset creation property list ID */
- const hsize_t chunk_size[2] = {FILTER_CHUNK_DIM1, FILTER_CHUNK_DIM2}; /* Chunk dimensions */
- unsigned int compress_level = 9;
- unsigned int dynlib4_values[4];
+ herr_t ret_value = -1;
+ hid_t dc = -1; /* Dataset creation property list ID */
+ const hsize_t chunk_size[2] = {FILTER_CHUNK_DIM1, FILTER_CHUNK_DIM2}; /* Chunk dimensions */
+ unsigned int compress_level = 9;
+ unsigned int dynlib4_values[4];
/*----------------------------------------------------------
* STEP 1: Test deflation by itself.
*----------------------------------------------------------
*/
+ HDputs("Testing deflate filter");
#ifdef H5_HAVE_FILTER_DEFLATE
- puts("Testing deflate filter");
- if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
- if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error;
- if(H5Pset_deflate (dc, 6) < 0) goto error;
+ if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(dc, 2, chunk_size) < 0) TEST_ERROR
+ if(H5Pset_deflate(dc, 6) < 0) TEST_ERROR
- if(test_filter_internal(file,DSET_DEFLATE_NAME,dc) < 0) goto error;
+ if(test_filter_internal(file, DSET_DEFLATE_NAME, dc) < 0) TEST_ERROR
/* Clean up objects used for this test */
- if(H5Pclose (dc) < 0) goto error;
+ if(H5Pclose(dc) < 0) TEST_ERROR
#else /* H5_HAVE_FILTER_DEFLATE */
- TESTING("deflate filter");
SKIPPED();
- puts(" Deflate filter not enabled");
+ HDputs(" Deflate filter not enabled");
#endif /* H5_HAVE_FILTER_DEFLATE */
/*----------------------------------------------------------
* STEP 2: Test DYNLIB1 by itself.
*----------------------------------------------------------
*/
- puts("Testing DYNLIB1 filter");
- if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
- if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error;
- if(H5Pset_filter (dc, H5Z_FILTER_DYNLIB1, H5Z_FLAG_MANDATORY, (size_t)1, &compress_level) < 0) goto error;
+ HDputs(" DYNLIB1 filter");
+ if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(dc, 2, chunk_size) < 0) TEST_ERROR
+ if(H5Pset_filter(dc, H5Z_FILTER_DYNLIB1, H5Z_FLAG_MANDATORY, (size_t)1, &compress_level) < 0) TEST_ERROR
- if(test_filter_internal(file,DSET_DYNLIB1_NAME,dc) < 0) goto error;
+ if(test_filter_internal(file, DSET_DYNLIB1_NAME, dc) < 0) TEST_ERROR
/* Clean up objects used for this test */
- if(H5Pclose (dc) < 0) goto error;
+ if(H5Pclose(dc) < 0) TEST_ERROR
- /* Unregister the dynamic filter DYNLIB1 for testing purpose. The next time when this test is run for
+ /* Unregister the dynamic filter DYNLIB1 for testing purpose. The next time when this test is run for
* the new file format, the library's H5PL code has to search in the table of loaded plugin libraries
* for this filter. */
- if(H5Zunregister(H5Z_FILTER_DYNLIB1) < 0) goto error;
+ if(H5Zunregister(H5Z_FILTER_DYNLIB1) < 0) TEST_ERROR
/*----------------------------------------------------------
* STEP 3: Test DYNLIB2 by itself.
*----------------------------------------------------------
*/
- puts("Testing DYNLIB2 filter");
- if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
- if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error;
- if(H5Pset_filter (dc, H5Z_FILTER_DYNLIB2, H5Z_FLAG_MANDATORY, 0, NULL) < 0) goto error;
+ HDputs(" DYNLIB2 filter");
+ if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(dc, 2, chunk_size) < 0) TEST_ERROR
+ if(H5Pset_filter(dc, H5Z_FILTER_DYNLIB2, H5Z_FLAG_MANDATORY, 0, NULL) < 0) TEST_ERROR
- if(test_filter_internal(file,DSET_DYNLIB2_NAME,dc) < 0) goto error;
+ if(test_filter_internal(file,DSET_DYNLIB2_NAME,dc) < 0) TEST_ERROR
/* Clean up objects used for this test */
- if(H5Pclose (dc) < 0) goto error;
+ if(H5Pclose(dc) < 0) TEST_ERROR
- /* Unregister the dynamic filter DYNLIB2 for testing purpose. The next time when this test is run for
+ /* Unregister the dynamic filter DYNLIB2 for testing purpose. The next time when this test is run for
* the new file format, the library's H5PL code has to search in the table of loaded plugin libraries
* for this filter. */
- if(H5Zunregister(H5Z_FILTER_DYNLIB2) < 0) goto error;
+ if(H5Zunregister(H5Z_FILTER_DYNLIB2) < 0) TEST_ERROR
/*----------------------------------------------------------
* STEP 4: Test DYNLIB4 by itself.
*----------------------------------------------------------
*/
- puts("Testing DYNLIB4 filter");
- if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) goto error;
- if(H5Pset_chunk (dc, 2, chunk_size) < 0) goto error;
+ HDputs(" DYNLIB4 filter");
+ if((dc = H5Pcreate(H5P_DATASET_CREATE)) < 0) TEST_ERROR
+ if(H5Pset_chunk(dc, 2, chunk_size) < 0) TEST_ERROR
dynlib4_values[0] = 9;
- if(H5get_libversion(&dynlib4_values[1], &dynlib4_values[2], &dynlib4_values[3]) < 0) goto error;
- if(H5Pset_filter (dc, H5Z_FILTER_DYNLIB4, H5Z_FLAG_MANDATORY, (size_t)4, dynlib4_values) < 0) goto error;
+ if(H5get_libversion(&dynlib4_values[1], &dynlib4_values[2], &dynlib4_values[3]) < 0) TEST_ERROR
+ if(H5Pset_filter(dc, H5Z_FILTER_DYNLIB4, H5Z_FLAG_MANDATORY, (size_t)4, dynlib4_values) < 0) TEST_ERROR
- if(test_filter_internal(file,DSET_DYNLIB4_NAME,dc) < 0) goto error;
+ if(test_filter_internal(file, DSET_DYNLIB4_NAME, dc) < 0) TEST_ERROR
/* Clean up objects used for this test */
- if(H5Pclose (dc) < 0) goto error;
+ if(H5Pclose(dc) < 0) TEST_ERROR
/* Unregister the dynamic filter DYNLIB4 for testing purpose. The next time when this test is run for
* the new file format, the library's H5PL code has to search in the table of loaded plugin libraries
* for this filter. */
- if(H5Zunregister(H5Z_FILTER_DYNLIB4) < 0) goto error;
+ if(H5Zunregister(H5Z_FILTER_DYNLIB4) < 0) TEST_ERROR
- return 0;
+ ret_value = 0;
error:
- return -1;
+ /* Clean up objects used for this test */
+ H5E_BEGIN_TRY {
+ H5Pclose(dc);
+ } H5E_END_TRY
+
+ return ret_value;
}
/*-------------------------------------------------------------------------
- * Function: test_read_data
- *
- * Purpose: Tests reading data and compares values
- *
- * Return: Success: 0
- * Failure: -1
+ * Function: test_read_data
*
- * Programmer: Raymond Lu
- * 14 March 2013
+ * Purpose: Tests reading data and compares values
*
+ * Return: Success: 0
+ * Failure: -1
*-------------------------------------------------------------------------
*/
static herr_t
test_read_data(hid_t dataset, int *origin_data)
{
- int check[DSET_DIM1][DSET_DIM2];
- const hsize_t size[2] = {DSET_DIM1, DSET_DIM2}; /* Dataspace dimensions */
- int *data_p = origin_data;
- size_t i, j; /* Local index variables */
+ herr_t ret_value = -1;
+ int check[DSET_DIM1][DSET_DIM2];
+ const hsize_t size[2] = {DSET_DIM1, DSET_DIM2}; /* Dataspace dimensions */
+ int *data_p = origin_data;
+ size_t i, j; /* Local index variables */
/* Read the dataset back */
- if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, check) < 0)
- TEST_ERROR;
+ if(H5Dread(dataset, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, check) < 0) TEST_ERROR;
/* Check that the values read are the same as the values written */
- for(i=0; i<size[0]; i++) {
+ for(i=0; i<size[0]; i++)
for(j=0; j<size[1]; j++) {
- if(*data_p != check[i][j]) {
- H5_FAILED();
- fprintf(stderr," Read different values than written.\n");
- fprintf(stderr," At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
- fprintf(stderr," At original: %d\n", *data_p);
- fprintf(stderr," At returned: %d\n", (int)check[i][j]);
- goto error;
- }
+ if(*data_p != check[i][j]) {
+ H5_FAILED();
+ HDfprintf(stderr," Read different values than written.\n");
+ HDfprintf(stderr," At index %lu,%lu\n", (unsigned long)i, (unsigned long)j);
+ HDfprintf(stderr," At original: %d\n", *data_p);
+ HDfprintf(stderr," At returned: %d\n", (int)check[i][j]);
+ TEST_ERROR
+ } /* end if */
data_p++;
- }
- }
+ }
PASSED();
- return 0;
+ ret_value = 0;
error:
- return -1;
+ return ret_value;
}
/*-------------------------------------------------------------------------
- * Function: test_read_with_filters
+ * Function: test_read_with_filters
*
- * Purpose: Tests reading dataset created with dynamically loaded filters
- *
- * Return: Success: 0
- * Failure: -1
- *
- * Programmer: Raymond Lu
- * 14 March 2013
+ * Purpose: Tests reading dataset created with dynamically loaded filters
*
+ * Return: Success: 0
+ * Failure: -1
*-------------------------------------------------------------------------
*/
static herr_t
test_read_with_filters(hid_t file)
{
- hid_t dset; /* Dataset ID */
+ herr_t ret_value = -1;
+ hid_t dset = -1; /* Dataset ID */
/*----------------------------------------------------------
* STEP 1: Test deflation by itself.
*----------------------------------------------------------
*/
-#ifdef H5_HAVE_FILTER_DEFLATE
- TESTING("Testing deflate filter");
+ TESTING("deflate filter");
+#ifdef H5_HAVE_FILTER_DEFLATE
if(H5Zfilter_avail(H5Z_FILTER_DEFLATE) != TRUE) TEST_ERROR
if((dset = H5Dopen2(file,DSET_DEFLATE_NAME,H5P_DEFAULT)) < 0) TEST_ERROR
@@ -515,18 +472,17 @@ test_read_with_filters(hid_t file)
/* Clean up objects used for this test */
#else /* H5_HAVE_FILTER_DEFLATE */
- TESTING("deflate filter");
SKIPPED();
- puts(" Deflate filter not enabled");
+ HDputs(" Deflate filter not enabled");
#endif /* H5_HAVE_FILTER_DEFLATE */
/*----------------------------------------------------------
* STEP 2: Test DYNLIB1 by itself.
*----------------------------------------------------------
*/
- TESTING("Testing DYNLIB1 filter");
+ TESTING(" DYNLIB1 filter");
- if((dset = H5Dopen2(file,DSET_DYNLIB1_NAME,H5P_DEFAULT)) < 0) TEST_ERROR
+ if((dset = H5Dopen2(file, DSET_DYNLIB1_NAME, H5P_DEFAULT)) < 0) TEST_ERROR
if(test_read_data(dset, (int *)points_dynlib1) < 0) TEST_ERROR
@@ -536,9 +492,9 @@ test_read_with_filters(hid_t file)
* STEP 3: Test Bogus2 by itself.
*----------------------------------------------------------
*/
- TESTING("Testing DYNLIB2 filter");
+ TESTING(" DYNLIB2 filter");
- if((dset = H5Dopen2(file,DSET_DYNLIB2_NAME,H5P_DEFAULT)) < 0) TEST_ERROR
+ if((dset = H5Dopen2(file, DSET_DYNLIB2_NAME, H5P_DEFAULT)) < 0) TEST_ERROR
if(test_read_data(dset, (int *)points_dynlib2) < 0) TEST_ERROR
@@ -548,7 +504,7 @@ test_read_with_filters(hid_t file)
* STEP 4: Test DYNLIB4 by itself.
*----------------------------------------------------------
*/
- TESTING("Testing DYNLIB4 filter");
+ TESTING(" DYNLIB4 filter");
if((dset = H5Dopen2(file,DSET_DYNLIB4_NAME,H5P_DEFAULT)) < 0) TEST_ERROR
@@ -556,27 +512,33 @@ test_read_with_filters(hid_t file)
if(H5Dclose(dset) < 0) TEST_ERROR
- return 0;
+ ret_value = 0;
error:
- return -1;
+ /* Clean up objects used for this test */
+ H5E_BEGIN_TRY {
+ H5Dclose(dset);
+ } H5E_END_TRY
+
+ return ret_value;
}
/*-------------------------------------------------------------------------
- * Function: test_noread_data
+ * Function: test_noread_data
*
- * Purpose: Tests not reading data
+ * Purpose: Tests not reading data
*
- * Return: Success: 0
- * Failure: -1
+ * Return: Success: 0
+ * Failure: -1
*
*-------------------------------------------------------------------------
*/
static herr_t
test_noread_data(hid_t dataset)
{
+ herr_t ret_value = -1;
int check[DSET_DIM1][DSET_DIM2];
- herr_t ret;
+ herr_t ret = -1;
/* Read the dataset back */
H5E_BEGIN_TRY {
@@ -586,113 +548,122 @@ test_noread_data(hid_t dataset)
TEST_ERROR
PASSED();
- return 0;
+ ret_value = 0;
error:
- return -1;
+ return ret_value;
}
/*-------------------------------------------------------------------------
- * Function: test_noread_with_filters
- *
- * Purpose: Tests reading dataset created with dynamically loaded filters disabled
+ * Function: test_noread_with_filters
*
- * Return: Success: 0
- * Failure: -1
+ * Purpose: Tests reading dataset created with dynamically loaded filters disabled
*
+ * Return: Success: 0
+ * Failure: -1
*-------------------------------------------------------------------------
*/
static herr_t
test_noread_with_filters(hid_t file)
{
- hid_t dset; /* Dataset ID */
- unsigned plugin_state; /* status of plugins */
- TESTING("Testing DYNLIB1 filter with plugins disabled");
+ herr_t ret_value = -1;
+ hid_t dset = -1; /* Dataset ID */
+ unsigned plugin_state; /* status of plugins */
+
+ TESTING("DYNLIB1 filter with plugins disabled");
/* disable filter plugin */
if(H5PLget_loading_state(&plugin_state) < 0) TEST_ERROR
plugin_state = plugin_state & ~H5PL_FILTER_PLUGIN;
if(H5PLset_loading_state(plugin_state) < 0) TEST_ERROR
- if((dset = H5Dopen2(file,DSET_DYNLIB1_NAME,H5P_DEFAULT)) < 0) TEST_ERROR
+ if((dset = H5Dopen2(file, DSET_DYNLIB1_NAME, H5P_DEFAULT)) < 0) TEST_ERROR
if(test_noread_data(dset) < 0) TEST_ERROR
if(H5Dclose(dset) < 0) TEST_ERROR
- /* re-enable filter plugin */
- plugin_state = plugin_state | H5PL_FILTER_PLUGIN;
- if(H5PLset_loading_state(plugin_state) < 0) TEST_ERROR
-
- return 0;
+ ret_value = 0;
error:
/* re-enable filter plugin */
plugin_state = plugin_state | H5PL_FILTER_PLUGIN;
- if(H5PLset_loading_state(plugin_state) < 0) TEST_ERROR
- return -1;
+ H5PLset_loading_state(plugin_state);
+
+ /* Clean up objects used for this test */
+ H5E_BEGIN_TRY {
+ H5Dclose(dset);
+ } H5E_END_TRY
+
+ return ret_value;
}
/*-------------------------------------------------------------------------
- * Function: test_filters_for_groups
- *
- * Purpose: Tests creating group with dynamically loaded filters
+ * Function: test_filters_for_groups
*
- * Return: Success: 0
- * Failure: -1
- *
- * Programmer: Raymond Lu
- * 1 April 2013
+ * Purpose: Tests creating group with dynamically loaded filters
*
+ * Return: Success: 0
+ * Failure: -1
*-------------------------------------------------------------------------
*/
static herr_t
test_filters_for_groups(hid_t file)
{
- hid_t gcpl, gid, group;
- int i;
- char gname[256];
+ herr_t ret_value = -1;
+ hid_t gcpl = -1;
+ hid_t gid = -1;
+ hid_t group = -1;
+ int i;
+ char gname[256];
+
+ TESTING("DYNLIB3 filter for group");
- TESTING("Testing DYNLIB3 filter for group");
+ if((gcpl = H5Pcreate(H5P_GROUP_CREATE)) < 0) TEST_ERROR
- if((gcpl = H5Pcreate(H5P_GROUP_CREATE)) < 0) goto error;
-
- /* Use DYNLIB3 for creating groups */
- if(H5Pset_filter (gcpl, H5Z_FILTER_DYNLIB3, H5Z_FLAG_MANDATORY, (size_t)0, NULL) < 0) goto error;
+ /* Use DYNLIB3 for creating groups */
+ if(H5Pset_filter (gcpl, H5Z_FILTER_DYNLIB3, H5Z_FLAG_MANDATORY, (size_t)0, NULL) < 0) TEST_ERROR
/* Create a group using this filter */
- if((gid = H5Gcreate2(file, "group1", H5P_DEFAULT, gcpl, H5P_DEFAULT)) < 0) goto error;
+ if((gid = H5Gcreate2(file, "group1", H5P_DEFAULT, gcpl, H5P_DEFAULT)) < 0) TEST_ERROR
/* Create multiple groups under "group1" */
- for (i=0; i < GROUP_ITERATION; i++) {
- sprintf(gname, "group_%d", i);
- if((group = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) goto error;
- if(H5Gclose(group) < 0) goto error;
+ for(i=0; i < GROUP_ITERATION; i++) {
+ HDsprintf(gname, "group_%d", i);
+ if((group = H5Gcreate2(gid, gname, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
+ if(H5Gclose(group) < 0) TEST_ERROR
}
/* Close the group */
- if(H5Gclose(gid) < 0) goto error;
+ if(H5Gclose(gid) < 0) TEST_ERROR
/* Clean up objects used for this test */
- if(H5Pclose (gcpl) < 0) goto error;
+ if(H5Pclose(gcpl) < 0) TEST_ERROR
PASSED();
- return 0;
+ ret_value = 0;
error:
- return -1;
+ /* Clean up objects used for this test */
+ H5E_BEGIN_TRY {
+ H5Gclose(group);
+ H5Gclose(gid);
+ H5Pclose(gcpl);
+ } H5E_END_TRY
+
+ return ret_value;
}
/*-------------------------------------------------------------------------
- * Function: test_groups_with_filters
+ * Function: test_groups_with_filters
*
- * Purpose: Tests opening group with dynamically loaded filters
+ * Purpose: Tests opening group with dynamically loaded filters
*
- * Return: Success: 0
- * Failure: -1
+ * Return: Success: 0
+ * Failure: -1
*
- * Programmer: Raymond Lu
+ * Programmer: Raymond Lu
* 1 April 2013
*
*-------------------------------------------------------------------------
@@ -700,70 +671,323 @@ error:
static herr_t
test_groups_with_filters(hid_t file)
{
- hid_t gid, group;
+ herr_t ret_value = -1;
+ hid_t gid;
+ hid_t group;
int i;
char gname[256];
- TESTING("Testing opening groups with DYNLIB3 filter");
+ TESTING("opening groups with DYNLIB3 filter");
/* Open the top group */
- if((gid = H5Gopen2(file, "group1", H5P_DEFAULT)) < 0) goto error;
+ if((gid = H5Gopen2(file, "group1", H5P_DEFAULT)) < 0) TEST_ERROR
/* Create multiple groups under "group1" */
- for (i=0; i < GROUP_ITERATION; i++) {
- sprintf(gname, "group_%d", i);
- if((group = H5Gopen2(gid, gname, H5P_DEFAULT)) < 0) goto error;
- if(H5Gclose(group) < 0) goto error;
+ for(i=0; i < GROUP_ITERATION; i++) {
+ HDsprintf(gname, "group_%d", i);
+ if((group = H5Gopen2(gid, gname, H5P_DEFAULT)) < 0) TEST_ERROR
+ if(H5Gclose(group) < 0) TEST_ERROR
}
/* Close the group */
- if(H5Gclose(gid) < 0) goto error;
+ if(H5Gclose(gid) < 0) TEST_ERROR
PASSED();
- return 0;
+ ret_value = 0;
+
+error:
+ /* Clean up objects used for this test */
+ H5E_BEGIN_TRY {
+ H5Gclose(group);
+ H5Gclose(gid);
+ } H5E_END_TRY
+
+ return ret_value;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: test_filter_path_apis
+ *
+ * Purpose: Tests accessing the path table for dynamically loaded filters
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *-------------------------------------------------------------------------
+ */
+static herr_t
+test_filter_path_apis(void)
+{
+ herr_t ret_value = -1;
+ unsigned int i;
+ unsigned int ndx;
+ herr_t ret;
+ ssize_t pathlen = -1;
+ char pathname[256];
+ char tempname[256];
+
+ HDputs("Testing access to the filter path table");
+
+ if(H5Zfilter_avail(H5Z_FILTER_DYNLIB1) != TRUE) TEST_ERROR
+
+ H5PLsize(&ndx);
+
+ TESTING(" remove");
+ /* Remove all existing paths*/
+ for(i=ndx; i > 0; i--)
+ if(H5PLremove(i-1) < 0) {
+ HDfprintf(stderr," at %d: %s\n", i, pathname);
+ TEST_ERROR
+ } /* end if */
+ /* Verify the table is empty */
+ H5PLsize(&ndx);
+ if(ndx > 0) TEST_ERROR
+ PASSED();
+
+ TESTING(" remove (exceed min)");
+ /* Exceed the min path removal */
+ H5E_BEGIN_TRY {
+ ret = H5PLremove(0);
+ } H5E_END_TRY
+ if(ret >= 0) TEST_ERROR
+ PASSED();
+
+ TESTING(" append");
+ /* Create multiple paths to fill table */
+ for(i=0; i < H5PL_MAX_PATH_NUM; i++) {
+ HDsprintf(pathname, "a_path_%d", i);
+ if(H5PLappend(pathname) < 0) {
+ HDfprintf(stderr," at %d: %s\n", i, pathname);
+ TEST_ERROR
+ }
+ }
+ /* Verify the table is full */
+ H5PLsize(&ndx);
+ if(ndx != H5PL_MAX_PATH_NUM) TEST_ERROR
+ PASSED();
+
+ TESTING(" append (exceed)");
+ /* Exceed the max path append */
+ H5E_BEGIN_TRY {
+ HDsprintf(pathname, "a_path_%d", H5PL_MAX_PATH_NUM);
+ ret = H5PLappend(pathname);
+ } H5E_END_TRY
+ if(ret >= 0) TEST_ERROR
+ PASSED();
+
+ TESTING(" remove (exceed max)");
+ /* Exceed the max path removal */
+ H5E_BEGIN_TRY {
+ ret = H5PLremove(H5PL_MAX_PATH_NUM);
+ } H5E_END_TRY
+ if(ret >= 0) TEST_ERROR
+ PASSED();
+
+ TESTING(" get (path name)");
+ if((pathlen = H5PLget(0, NULL, 0)) <= 0) {
+ HDfprintf(stderr," get path 0 length failed\n");
+ TEST_ERROR
+ }
+ if(pathlen != 8) TEST_ERROR
+
+ if((pathlen = H5PLget(0, pathname, 256)) <= 0) {
+ HDfprintf(stderr," get 0 len: %d : %s\n", pathlen, pathname);
+ TEST_ERROR
+ }
+ if(HDstrcmp(pathname, "a_path_0") != 0) {
+ HDfprintf(stderr," get 0: %s\n", pathname);
+ TEST_ERROR
+ }
+ PASSED();
+
+ TESTING(" get (bounds)");
+ if((pathlen = H5PLget(1, pathname, 256)) <= 0) TEST_ERROR
+ if(HDstrcmp(pathname, "a_path_1") != 0) {
+ HDfprintf(stderr," get 1: %s\n", pathname);
+ TEST_ERROR
+ }
+ if((pathlen = H5PLget(H5PL_MAX_PATH_NUM - 1, pathname, 256)) <= 0) TEST_ERROR
+ HDsprintf(tempname, "a_path_%d", H5PL_MAX_PATH_NUM - 1);
+ if(HDstrcmp(pathname, tempname) != 0) {
+ HDfprintf(stderr," get %d: %s\n", H5PL_MAX_PATH_NUM - 1, pathname);
+ TEST_ERROR
+ }
+ PASSED();
+
+ TESTING(" get (bounds exceed)");
+ H5E_BEGIN_TRY {
+ pathlen = H5PLget(H5PL_MAX_PATH_NUM, NULL, 0);
+ } H5E_END_TRY
+ if(pathlen > 0) TEST_ERROR
+ PASSED();
+
+ TESTING(" remove (verify for prepend)");
+ /* Remove one path*/
+ if(H5PLremove(8) < 0) TEST_ERROR
+
+ /* Verify that the entries were moved */
+ if((pathlen = H5PLget(8, pathname, 256)) <= 0) TEST_ERROR
+ if(HDstrcmp(pathname, "a_path_9") != 0) {
+ HDfprintf(stderr," get 8: %s\n", pathname);
+ TEST_ERROR
+ }
+ PASSED();
+
+ /* Verify the table is not full */
+ H5PLsize(&ndx);
+ if (ndx != H5PL_MAX_PATH_NUM - 1) TEST_ERROR
+
+ TESTING(" prepend");
+ /* Prepend one path*/
+ HDsprintf(pathname, "a_path_%d", H5PL_MAX_PATH_NUM + 1);
+ if(H5PLprepend(pathname) < 0) {
+ HDfprintf(stderr," prepend %d: %s\n", H5PL_MAX_PATH_NUM + 1, pathname);
+ TEST_ERROR
+ }
+
+ /* Verify the table is full */
+ H5PLsize(&ndx);
+ if(ndx != H5PL_MAX_PATH_NUM) TEST_ERROR
+
+ /* Verify that the entries were moved */
+ if(H5PLget(8, pathname, 256) <= 0) TEST_ERROR
+ if(HDstrcmp(pathname, "a_path_7") != 0) {
+ HDfprintf(stderr," get 8: %s\n", pathname);
+ TEST_ERROR
+ }
+ if(H5PLget(0, pathname, 256) <= 0) TEST_ERROR
+ HDsprintf(tempname, "a_path_%d", H5PL_MAX_PATH_NUM + 1);
+ if(HDstrcmp(pathname, tempname) != 0) {
+ HDfprintf(stderr," get 0: %s\n", pathname);
+ TEST_ERROR
+ }
+ PASSED();
+
+ TESTING(" prepend (exceed)");
+ /* Exceed the max path prepend */
+ H5E_BEGIN_TRY {
+ HDsprintf(pathname, "a_path_%d", H5PL_MAX_PATH_NUM + 2);
+ ret = H5PLprepend(pathname);
+ } H5E_END_TRY
+ if(ret >= 0) TEST_ERROR
+ PASSED();
+
+ TESTING(" replace");
+ /* Replace one path*/
+ HDsprintf(pathname, "a_path_%d", H5PL_MAX_PATH_NUM + 4);
+ if(H5PLreplace(pathname, 1) < 0) {
+ HDfprintf(stderr," replace 1: %s\n", pathname);
+ TEST_ERROR
+ }
+
+ /* Verify the table is full */
+ H5PLsize(&ndx);
+ if(ndx != H5PL_MAX_PATH_NUM) TEST_ERROR
+
+ /* Verify that the entries were not moved */
+ if(H5PLget(0, pathname, 256) <= 0) TEST_ERROR
+ HDsprintf(tempname, "a_path_%d", H5PL_MAX_PATH_NUM + 1);
+ if(HDstrcmp(pathname, tempname) != 0) {
+ HDfprintf(stderr," get 0: %s\n", pathname);
+ TEST_ERROR
+ }
+ if(H5PLget(2, pathname, 256) <= 0) TEST_ERROR
+ if(HDstrcmp(pathname, "a_path_1") != 0) {
+ HDfprintf(stderr," get 2: %s\n", pathname);
+ TEST_ERROR
+ }
+ PASSED();
+
+ TESTING(" remove (verify for insert)");
+ /* Remove one path*/
+ if(H5PLremove(4) < 0) TEST_ERROR
+
+ /* Verify that the entries were moved */
+ if(H5PLget(4, pathname, 256) <= 0) TEST_ERROR
+ if(HDstrcmp(pathname, "a_path_4") != 0) {
+ HDfprintf(stderr," get 4: %s\n", pathname);
+ TEST_ERROR
+ }
+ PASSED();
+
+ /* Verify the table is not full */
+ H5PLsize(&ndx);
+ if(ndx != 15) TEST_ERROR
+
+ TESTING(" insert");
+ /* Insert one path*/
+ HDsprintf(pathname, "a_path_%d", H5PL_MAX_PATH_NUM + 5);
+ if(H5PLinsert(pathname, 3) < 0) {
+ HDfprintf(stderr," insert 3: %s\n", pathname);
+ TEST_ERROR
+ }
+
+ /* Verify that the entries were moved */
+ if(H5PLget(4, pathname, 256) <= 0) TEST_ERROR
+ if(HDstrcmp(pathname, "a_path_2") != 0) {
+ HDfprintf(stderr," get 4: %s\n", pathname);
+ TEST_ERROR
+ }
+ PASSED();
+
+ /* Verify the table is full */
+ H5PLsize(&ndx);
+ if(ndx != H5PL_MAX_PATH_NUM) TEST_ERROR
+
+ TESTING(" insert (exceed)");
+ /* Exceed the max path insert */
+ H5E_BEGIN_TRY {
+ HDsprintf(pathname, "a_path_%d", H5PL_MAX_PATH_NUM + 6);
+ ret = H5PLinsert(pathname, 12);
+ } H5E_END_TRY
+ if(ret >= 0) TEST_ERROR
+
+ PASSED();
+
+ ret_value = 0;
error:
- return -1;
+ return ret_value;
}
/*-------------------------------------------------------------------------
- * Function: main
+ * Function: main
*
- * Purpose: Tests the plugin module (H5PL)
+ * Purpose: Tests the plugin module (H5PL)
*
- * Return: Success: exit(EXIT_SUCCESS)
+ * Return: Success: exit(EXIT_SUCCESS)
*
- * Failure: exit(EXIT_FAILURE)
+ * Failure: exit(EXIT_FAILURE)
*
- * Programmer: Raymond Lu
- * 14 March 2013
+ * Programmer: Raymond Lu
+ * 14 March 2013
*
*-------------------------------------------------------------------------
*/
int
main(void)
{
- char filename[FILENAME_BUF_SIZE];
- hid_t file, fapl, fapl2;
- unsigned new_format;
- int mdc_nelmts;
- size_t rdcc_nelmts;
- size_t rdcc_nbytes;
- double rdcc_w0;
- int nerrors = 0;
+ char filename[FILENAME_BUF_SIZE];
+ hid_t file = -1;
+ hid_t fapl = -1;
+ hid_t fapl2 = -1;
+ unsigned new_format;
+ int mdc_nelmts;
+ size_t rdcc_nelmts;
+ size_t rdcc_nbytes;
+ double rdcc_w0;
+ int nerrors = 0;
/* Testing setup */
h5_reset();
fapl = h5_fileaccess();
/* Turn off the chunk cache, so all the chunks are immediately written to disk */
- if(H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0) < 0)
- TEST_ERROR
+ if(H5Pget_cache(fapl, &mdc_nelmts, &rdcc_nelmts, &rdcc_nbytes, &rdcc_w0) < 0) TEST_ERROR
rdcc_nbytes = 0;
- if(H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0) < 0)
- TEST_ERROR
+ if(H5Pset_cache(fapl, mdc_nelmts, rdcc_nelmts, rdcc_nbytes, rdcc_w0) < 0) TEST_ERROR
/* Copy the file access property list */
if((fapl2 = H5Pcopy(fapl)) < 0) TEST_ERROR
@@ -779,50 +1003,47 @@ main(void)
/* Set the FAPL for the type of format */
if(new_format) {
- puts("\nTesting with new file format:");
+ HDputs("\nTesting with new file format:");
my_fapl = fapl2;
} /* end if */
else {
- puts("Testing with old file format:");
+ HDputs("Testing with old file format:");
my_fapl = fapl;
} /* end else */
/* Create the file for this test */
- if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0)
- TEST_ERROR
+ if((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, my_fapl)) < 0) TEST_ERROR
/* Test dynamically loaded filters for chunked dataset */
- nerrors += (test_filters_for_datasets(file) < 0 ? 1 : 0);
+ nerrors += (test_filters_for_datasets(file) < 0 ? 1 : 0);
/* Test dynamically loaded filters for groups */
nerrors += (test_filters_for_groups(file) < 0 ? 1 : 0);
- if(H5Fclose(file) < 0)
- TEST_ERROR
+ if(H5Fclose(file) < 0) TEST_ERROR
} /* end for */
/* Close FAPL */
if(H5Pclose(fapl2) < 0) TEST_ERROR
if(H5Pclose(fapl) < 0) TEST_ERROR
-
+
/* Restore the default error handler (set in h5_reset()) */
h5_restore_err();
- puts("\nTesting reading data with with dynamic plugin filters:");
+ HDputs("\nTesting reading data with with dynamic plugin filters:");
/* Close the library so that all loaded plugin libraries are unloaded */
h5_reset();
fapl = h5_fileaccess();
/* Reopen the file for testing data reading */
- if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
- TEST_ERROR
+ if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) TEST_ERROR
/* Read the data with filters */
- nerrors += (test_read_with_filters(file) < 0 ? 1 : 0);
+ nerrors += (test_read_with_filters(file) < 0 ? 1 : 0);
/* Open the groups with filters */
- nerrors += (test_groups_with_filters(file) < 0 ? 1 : 0);
+ nerrors += (test_groups_with_filters(file) < 0 ? 1 : 0);
/* Restore the default error handler (set in h5_reset()) */
h5_restore_err();
@@ -832,26 +1053,26 @@ main(void)
fapl = h5_fileaccess();
/* Reopen the file for testing data reading */
- if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0)
- TEST_ERROR
+ if((file = H5Fopen(filename, H5F_ACC_RDONLY, fapl)) < 0) TEST_ERROR
/* Read the data with disabled filters */
nerrors += (test_noread_with_filters(file) < 0 ? 1 : 0);
- if(H5Fclose(file) < 0)
- TEST_ERROR
+ if(H5Fclose(file) < 0) TEST_ERROR
+
+ /* Test the APIs for access to the filter plugin path table */
+ nerrors += (test_filter_path_apis() < 0 ? 1 : 0);
+
+ if(nerrors) TEST_ERROR
- if(nerrors)
- TEST_ERROR
- printf("All plugin tests passed.\n");
+ HDprintf("All plugin tests passed.\n");
h5_cleanup(FILENAME, fapl);
return 0;
error:
nerrors = MAX(1, nerrors);
- printf("***** %d PLUGIN TEST%s FAILED! *****\n",
- nerrors, 1 == nerrors ? "" : "S");
+ HDprintf("***** %d PLUGIN TEST%s FAILED! *****\n", nerrors, 1 == nerrors ? "" : "S");
return 1;
}
diff --git a/test/pool.c b/test/pool.c
index 7d80096..1851d6e 100644
--- a/test/pool.c
+++ b/test/pool.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Programmer: Quincey Koziol <koziol@ncsa.uiuc.edu>
diff --git a/test/reserved.c b/test/reserved.c
index bb6d328..d8d0c59 100644
--- a/test/reserved.c
+++ b/test/reserved.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5test.h"
diff --git a/test/set_extent.c b/test/set_extent.c
index a992419..b9536e5 100644
--- a/test/set_extent.c
+++ b/test/set_extent.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -139,6 +137,15 @@ int main( void )
unsigned new_format; /* Whether to use the latest file format */
unsigned chunk_cache; /* Whether to enable chunk caching */
int nerrors = 0;
+ const char *env_h5_drvr; /* File Driver value from environment */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+
+
+ env_h5_drvr = HDgetenv("HDF5_DRIVER");
+ if(env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+ /* Current VFD that does not support contigous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
/* Initialize random number seed */
HDsrandom((unsigned)HDtime(NULL));
@@ -194,12 +201,15 @@ int main( void )
H5F_LIBVER_LATEST) < 0) TEST_ERROR
/* Tests which use chunked datasets */
- nerrors += do_ranks( my_fapl, new_format ) < 0 ? 1 : 0;
+ if(!new_format || (new_format && contig_addr_vfd))
+ nerrors += do_ranks( my_fapl, new_format ) < 0 ? 1 : 0;
} /* end for */
/* Tests which do not use chunked datasets */
- nerrors += test_external( fapl ) < 0 ? 1 : 0;
- nerrors += do_layouts( fapl ) < 0 ? 1 : 0;
+ if(!new_format || (new_format && contig_addr_vfd)) {
+ nerrors += test_external( fapl ) < 0 ? 1 : 0;
+ nerrors += do_layouts( fapl ) < 0 ? 1 : 0;
+ }
} /* end for */
/* Close 2nd FAPL */
@@ -2591,13 +2601,6 @@ static int test_random_rank4( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
volatile unsigned i, j, k, l, m; /* Local indices */
char filename[NAME_BUF_SIZE];
- /*!FIXME Skip the test if a fixed array index is requested, as resizing
- * fixed arrays is broken now. Extensible arrays are also broken. Remove
- * these lines as appropriate when these problems are fixed. */
- /* Fixed Array index type is now fixed */
- if(index_type == RANK4_INDEX_EARRAY)
- return 0;
-
/* create a new file */
h5_fixname(FILENAME[4], fapl, filename, sizeof filename);
if ((file = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
@@ -2802,12 +2805,6 @@ static int test_random_rank4_vl( hid_t fapl, hid_t dcpl, hbool_t do_fillvalue,
volatile unsigned i, j, k, l, m; /* Local indices */
char filename[NAME_BUF_SIZE];
- /*!FIXME Skip the test if a fixed array index is requested, as resizing
- * fixed arrays is broken now. Extensible arrays are also broken. Remove
- * these lines as appropriate when these problems are fixed. */
- if(index_type == RANK4_INDEX_FARRAY || index_type == RANK4_INDEX_EARRAY)
- return 0;
-
/* Initialize fill value buffers so they aren't freed in case of an error */
fill_value.len = 0;
fill_value.p = NULL;
diff --git a/test/space_overflow.c b/test/space_overflow.c
index f0e5e1d..15be9ba 100644
--- a/test/space_overflow.c
+++ b/test/space_overflow.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/stab.c b/test/stab.c
index f81bb5f..97abc16 100644
--- a/test/stab.c
+++ b/test/stab.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -96,7 +94,7 @@ const char *FILENAME[] = {
*-------------------------------------------------------------------------
*/
static int
-test_misc(hid_t fapl, hbool_t new_format)
+test_misc(hid_t fcpl, hid_t fapl, hbool_t new_format)
{
hid_t fid = (-1); /* File ID */
hid_t g1 = (-1), g2 = (-1), g3 = (-1);
@@ -110,7 +108,7 @@ test_misc(hid_t fapl, hbool_t new_format)
/* Create file */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
- if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0) TEST_ERROR
/* Create initial groups for testing, then close */
if((g1 = H5Gcreate2(fid, "test_1a", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) < 0) TEST_ERROR
@@ -177,7 +175,7 @@ test_misc(hid_t fapl, hbool_t new_format)
*-------------------------------------------------------------------------
*/
static int
-test_long(hid_t fapl, hbool_t new_format)
+test_long(hid_t fcpl, hid_t fapl, hbool_t new_format)
{
hid_t fid = (-1); /* File ID */
hid_t g1 = (-1), g2 = (-1);
@@ -192,7 +190,7 @@ test_long(hid_t fapl, hbool_t new_format)
/* Create file */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
- if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0) TEST_ERROR
/* Group names */
name1 = (char *)HDmalloc((size_t)LONG_NAME_LEN);
@@ -252,7 +250,7 @@ error:
*-------------------------------------------------------------------------
*/
static int
-test_large(hid_t fapl, hbool_t new_format)
+test_large(hid_t fcpl, hid_t fapl, hbool_t new_format)
{
hid_t fid = (-1); /* File ID */
hid_t cwg = (-1), dir = (-1); /* Group IDs */
@@ -267,7 +265,7 @@ test_large(hid_t fapl, hbool_t new_format)
/* Create file */
h5_fixname(FILENAME[0], fapl, filename, sizeof(filename));
- if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0) TEST_ERROR
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0) TEST_ERROR
/*
* Create a directory that has so many entries that the root
@@ -318,7 +316,7 @@ test_large(hid_t fapl, hbool_t new_format)
*-------------------------------------------------------------------------
*/
static int
-lifecycle(hid_t fapl2)
+lifecycle(hid_t fcpl, hid_t fapl2)
{
hid_t fid = (-1); /* File ID */
hid_t gid = (-1); /* Group ID */
@@ -341,7 +339,7 @@ lifecycle(hid_t fapl2)
/* Create file */
h5_fixname(FILENAME[0], fapl2, filename, sizeof(filename));
- if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl2)) < 0) TEST_ERROR
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl2)) < 0) TEST_ERROR
/* Close file */
if(H5Fclose(fid) < 0) TEST_ERROR
@@ -532,7 +530,7 @@ error:
*-------------------------------------------------------------------------
*/
static int
-long_compact(hid_t fapl2)
+long_compact(hid_t fcpl, hid_t fapl2)
{
hid_t fid = (-1); /* File ID */
hid_t gid = (-1); /* Group ID */
@@ -546,7 +544,7 @@ long_compact(hid_t fapl2)
/* Create file */
h5_fixname(FILENAME[0], fapl2, filename, sizeof(filename));
- if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl2)) < 0) TEST_ERROR
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl2)) < 0) TEST_ERROR
/* Close file */
if(H5Fclose(fid) < 0) TEST_ERROR
@@ -755,7 +753,7 @@ error:
*-------------------------------------------------------------------------
*/
static int
-no_compact(hid_t fapl2)
+no_compact(hid_t fcpl, hid_t fapl2)
{
hid_t fid = (-1); /* File ID */
hid_t gid = (-1); /* Group ID */
@@ -772,7 +770,7 @@ no_compact(hid_t fapl2)
/* Create file */
h5_fixname(FILENAME[0], fapl2, filename, sizeof(filename));
- if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl2)) < 0) TEST_ERROR
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl2)) < 0) TEST_ERROR
/* Close file */
if(H5Fclose(fid) < 0) TEST_ERROR
@@ -1165,9 +1163,20 @@ error:
int
main(void)
{
- hid_t fapl, fapl2; /* File access property list IDs */
- unsigned new_format; /* Whether to use the new format or not */
- int nerrors = 0;
+ hid_t fapl, fapl2; /* File access property list IDs */
+ hid_t fcpl, fcpl2; /* File creation property list ID */
+ unsigned new_format; /* Whether to use the new format or not */
+ const char *env_h5_drvr; /* File Driver value from environment */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ int nerrors = 0;
+
+ /* Get the VFD to use */
+ env_h5_drvr = HDgetenv("HDF5_DRIVER");
+ if(env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+
+ /* VFD that does not support contigous address space */
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
/* Reset library */
h5_reset();
@@ -1179,20 +1188,42 @@ main(void)
/* Set the "use the latest version of the format" bounds for creating objects in the file */
if(H5Pset_libver_bounds(fapl2, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0) TEST_ERROR
+ /* Set up file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0) TEST_ERROR
+ if((fcpl2 = H5Pcopy(fcpl)) < 0) TEST_ERROR
+
+ /* Set to use paged aggregation strategy and persisting free-space */
+ /* Skip testing for multi/split drivers */
+ if(H5Pset_file_space_strategy(fcpl2, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)1) < 0)
+ TEST_ERROR
+
/* Loop over using new group format */
for(new_format = FALSE; new_format <= TRUE; new_format++) {
+ hid_t my_fapl = fapl;
+ hid_t my_fcpl = fcpl;
+
+ if(!contig_addr_vfd)
+ continue;
+
+ if(new_format) {
+ my_fapl = fapl2;
+ my_fcpl = fcpl2; /* Set to use paged aggregation and persisting free-space */
+ }
+
/* Perform basic tests, with old & new style groups */
- nerrors += test_misc((new_format ? fapl2 : fapl), new_format);
- nerrors += test_long((new_format ? fapl2 : fapl), new_format);
- nerrors += test_large((new_format ? fapl2 : fapl), new_format);
+ nerrors += test_misc(my_fcpl, my_fapl, new_format);
+ nerrors += test_long(my_fcpl, my_fapl, new_format);
+ nerrors += test_large(my_fcpl, my_fapl, new_format);
} /* end for */
/* New format group specific tests (require new format features) */
- nerrors += lifecycle(fapl2);
- nerrors += long_compact(fapl2);
- nerrors += read_old();
- nerrors += no_compact(fapl2);
- nerrors += gcpl_on_root(fapl2);
+ if(contig_addr_vfd) {
+ nerrors += lifecycle(fcpl2, fapl2);
+ nerrors += long_compact(fcpl2, fapl2);
+ nerrors += read_old();
+ nerrors += no_compact(fcpl2, fapl2);
+ nerrors += gcpl_on_root(fapl2);
+ }
/* Old group API specific tests */
nerrors += old_api(fapl);
@@ -1200,6 +1231,8 @@ main(void)
/* Close 2nd FAPL */
H5Pclose(fapl2);
+ H5Pclose(fcpl);
+ H5Pclose(fcpl2);
/* Verify symbol table messages are cached */
nerrors += (h5_verify_cached_stabs(FILENAME, fapl) < 0 ? 1 : 0);
diff --git a/test/swmr.c b/test/swmr.c
index 780eb0f..399a9ec 100644
--- a/test/swmr.c
+++ b/test/swmr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/swmr_addrem_writer.c b/test/swmr_addrem_writer.c
new file mode 100644
index 0000000..51caa3d
--- /dev/null
+++ b/test/swmr_addrem_writer.c
@@ -0,0 +1,441 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_addrem_writer.c
+ *
+ * Purpose: Adds and removes data to a randomly selected subset of the
+ * datasets in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_reader program. It is also run AFTER a sequential
+ * (not concurrent!) invoking of swmr_writer so the writer
+ * can dump a bunch of data into the datasets. Otherwise,
+ * there wouldn't be much to shrink :)
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* The maximum # of records to add/remove from the dataset in one step */
+#define MAX_SIZE_CHANGE 10
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose);
+static int addrem_records(hid_t fid, unsigned verbose, unsigned long nops,
+ unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim[2]; /* Dataspace dimension */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ if((sid = H5Dget_space(symbol_info[u][v].dsid)) < 0)
+ return -1;
+ if(2 != H5Sget_simple_extent_ndims(sid))
+ return -1;
+ if(H5Sget_simple_extent_dims(sid, dim, NULL) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = dim[1];
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: addrem_records
+ *
+ * Purpose: Adds/removes a specified number of records to random datasets
+ * to the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nops
+ * # of records to read/write in the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+addrem_records(hid_t fid, unsigned verbose, unsigned long nops, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t buf[MAX_SIZE_CHANGE]; /* Write buffer */
+ unsigned long op_to_flush; /* # of operations before flush */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid > 0);
+
+ /* Reset the buffer */
+ HDmemset(&buf, 0, sizeof(buf));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate_simple(2, count, NULL)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Add and remove records to random datasets, according to frequency
+ * distribution */
+ op_to_flush = flush_count;
+ for(u=0; u<nops; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Decide whether to shrink or expand, and by how much */
+ count[1] = (hsize_t)HDrandom() % (MAX_SIZE_CHANGE * 2) + 1;
+
+ if(count[1] > MAX_SIZE_CHANGE) {
+ /* Add records */
+ count[1] -= MAX_SIZE_CHANGE;
+
+ /* Set the buffer's IDs (equal to its position) */
+ for(v=0; v<count[1]; v++)
+ buf[v].rec_id = (uint64_t)symbol->nrecords + (uint64_t)v;
+
+ /* Set the memory space to the correct size */
+ if(H5Sset_extent_simple(mem_sid, 2, count, NULL) < 0)
+ return -1;
+
+ /* Get the coordinates to write */
+ start[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ if(H5Odisable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords+= count[1];
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &buf) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ if(H5Oenable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+ } /* end if */
+ else {
+ /* Shrink the dataset's dataspace */
+ if(count[1] > symbol->nrecords)
+ symbol->nrecords = 0;
+ else
+ symbol->nrecords -= count[1];
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+ } /* end else */
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ op_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == op_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ op_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_addrem_writer [-q] [-f <# of operations between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of operations>\n");
+ printf("\n");
+ printf("<# of operations between flushing file contents> should be 0 (for\n");
+ printf("no flushing) or between 1 and (<# of operations> - 1).\n");
+ printf("\n");
+ printf("<# of operations> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), flushing every 1000 operations\n");
+ printf("('-f 1000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nops = 0; /* # of times to grow or shrink the dataset */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nops = HDatol(argv[u]);
+ if(nops <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nops <= 0)
+ usage();
+ if(flush_count >= nops)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of operations between flushes = %ld\n", flush_count);
+ HDfprintf(stderr, "\t# of operations = %ld\n", nops);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stderr, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ HDfprintf(stderr, "Error opening skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Adding and removing records\n");
+
+ /* Grow and shrink datasets */
+ if(addrem_records(fid, verbose, (unsigned long)nops, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error adding and removing records from datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_check_compat_vfd.c b/test/swmr_check_compat_vfd.c
new file mode 100644
index 0000000..1589f6e
--- /dev/null
+++ b/test/swmr_check_compat_vfd.c
@@ -0,0 +1,54 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Purpose: This is a small program that checks if the HDF5_DRIVER
+ * environment variable is set to a value that supports SWMR.
+ *
+ * It is intended for use in shell scripts.
+ */
+
+#include "h5test.h"
+
+/* This file needs to access the file driver testing code */
+#define H5FD_FRIEND /*suppress error about including H5FDpkg */
+#define H5FD_TESTING
+#include "H5FDpkg.h" /* File drivers */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Inspects the HDF5_DRIVER environment variable, which
+ * determines the VFD that the test harness will use with
+ * the majority of the tests.
+ *
+ * Return: VFD supports SWMR: EXIT_SUCCESS
+ *
+ * VFD does not support SWMR
+ * or failure: EXIT_FAILURE
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+main(void)
+{
+ char *driver = NULL;
+
+ driver = HDgetenv("HDF5_DRIVER");
+
+ if(H5FD_supports_swmr_test(driver))
+ return EXIT_SUCCESS;
+ else
+ return EXIT_FAILURE;
+
+} /* end main() */
diff --git a/test/swmr_common.c b/test/swmr_common.c
new file mode 100644
index 0000000..a0d79e3
--- /dev/null
+++ b/test/swmr_common.c
@@ -0,0 +1,288 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_common.c
+ *
+ * Purpose: Utility functions for the SWMR test code.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+/* The SWMR data arrays:
+ *
+ * The code uses a 2-D jagged array of datasets. The first dimension is called
+ * the 'level' and there are five of them.
+ *
+ * #define NLEVELS 5
+ *
+ * The second dimension is the 'count' and there are quite a few datasets per
+ * 'level'.
+ *
+ * unsigned symbol_count[NLEVELS] = {100, 200, 400, 800, 1600};
+ *
+ * These datasets are created when the skeleton is generated and are initially
+ * empty. Each dataset has no upper bound on size (H5S_UNLIMITED). They
+ * are of compound type, with two members: an integer ID and an opaque
+ * 'data part'. The data part is not used by the SWMR testing.
+ *
+ * The SWMR testing will then randomly add and/or remove entries
+ * from these datasets. The selection of the level is skewed by a mapping
+ * table which preferentially hammers on the lower levels with their smaller
+ * number of datasets.
+ *
+ * static unsigned symbol_mapping[NMAPPING] = {0, 0, 0, 0, 1, 1, 2, 3, 4};
+ *
+ * The information about each dataset (name, hid_t, etc.) is stored in a
+ * separate array.
+ *
+ * symbol_info_t *symbol_info[NLEVELS];
+ */
+
+/* An array of dataset levels, used to select the level for a SWMR operation
+ * Note that this preferentially selects the lower levels with their smaller
+ * number of datasets.
+ */
+static unsigned symbol_mapping[NMAPPING] = {0, 0, 0, 0, 1, 1, 2, 3, 4};
+
+/* The number of datasets at each level */
+unsigned symbol_count[NLEVELS] = {100, 200, 400, 800, 1600};
+
+/* Array of dataset information entries (1 per dataset) */
+symbol_info_t *symbol_info[NLEVELS];
+
+
+/*-------------------------------------------------------------------------
+ * Function: choose_dataset
+ *
+ * Purpose: Selects a random dataset in the SWMR file
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: A pointer to information about a dataset.
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+symbol_info_t *
+choose_dataset(void)
+{
+ unsigned level; /* The level of the dataset */
+ unsigned offset; /* The "offset" of the dataset at that level */
+
+ /* Determine level of dataset */
+ level = symbol_mapping[HDrandom() % NMAPPING];
+
+ /* Determine the offset of the level */
+ offset = (unsigned)(HDrandom() % (int)symbol_count[level]);
+
+ return &symbol_info[level][offset];
+} /* end choose_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_symbol_datatype
+ *
+ * Purpose: Create's the HDF5 datatype used for elements in the SWMR
+ * testing datasets.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: An HDF5 type ID
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+hid_t
+create_symbol_datatype(void)
+{
+ hid_t sym_type_id; /* Datatype ID for symbol */
+ hid_t opaq_type_id; /* Datatype ID for opaque part of record */
+
+ /* Create opaque datatype to represent other information for this record */
+ if((opaq_type_id = H5Tcreate(H5T_OPAQUE, (size_t)DTYPE_SIZE)) < 0)
+ return -1;
+
+ /* Create compound datatype for symbol */
+ if((sym_type_id = H5Tcreate(H5T_COMPOUND, sizeof(symbol_t))) < 0)
+ return -1;
+
+ /* Insert fields in symbol datatype */
+ if(H5Tinsert(sym_type_id, "rec_id", HOFFSET(symbol_t, rec_id), H5T_NATIVE_UINT64) < 0)
+ return -1;
+ if(H5Tinsert(sym_type_id, "info", HOFFSET(symbol_t, info), opaq_type_id) < 0)
+ return -1;
+
+ /* Close opaque datatype */
+ if(H5Tclose(opaq_type_id) < 0)
+ return -1;
+
+ return sym_type_id;
+} /* end create_symbol_datatype() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: generate_name
+ *
+ * Purpose: Generates a SWMR testing dataset name given a level and
+ * count.
+ * The name is in the format <name>-<level> (%u-%04u).
+ *
+ * Parameters: char *name_buf
+ * Buffer for the created name. Must be pre-allocated.
+ * Since the name is formulaic, this isn't considered an issue.
+ *
+ * unsigned level
+ * The dataset's level
+ *
+ * unsigned count
+ * The dataset's count
+ *
+ * Return: Success: 0
+ *
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+generate_name(char *name_buf, unsigned level, unsigned count)
+{
+ HDassert(name_buf);
+
+ sprintf(name_buf, "%u-%04u", level, count);
+
+ return 0;
+} /* end generate_name() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: generate_symbols
+ *
+ * Purpose: Initializes the global dataset infomration arrays.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: 0
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+generate_symbols(void)
+{
+ unsigned u, v; /* Local index variables */
+
+ for(u = 0; u < NLEVELS; u++) {
+ symbol_info[u] = (symbol_info_t *)HDmalloc(symbol_count[u] * sizeof(symbol_info_t));
+ for(v = 0; v < symbol_count[u]; v++) {
+ char name_buf[64];
+
+ generate_name(name_buf, u, v);
+ symbol_info[u][v].name = (char *)HDmalloc(HDstrlen(name_buf) + 1);
+ HDstrcpy(symbol_info[u][v].name, name_buf);
+ symbol_info[u][v].dsid = -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+ } /* end for */
+
+ return 0;
+} /* end generate_symbols() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: shutdown_symbols
+ *
+ * Purpose: Cleans up the global dataset information arrays.
+ *
+ * Parameters: N/A
+ *
+ * Return: Success: 0
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+shutdown_symbols(void)
+{
+ unsigned u, v; /* Local index variables */
+
+ /* Clean up the symbols */
+ for(u = 0; u < NLEVELS; u++) {
+ for(v = 0; v < symbol_count[u]; v++)
+ HDfree(symbol_info[u][v].name);
+ HDfree(symbol_info[u]);
+ } /* end for */
+
+ return 0;
+} /* end shutdown_symbols() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: print_metadata_retries_info
+ *
+ * Purpose: To retrieve and print the collection of metadata retries for the file.
+ *
+ * Parameters: fid: the currently opened file identifier
+ *
+ * Return: Success: 0
+ * Failure: negative
+ *
+ *-------------------------------------------------------------------------
+ */
+int
+print_metadata_retries_info(hid_t fid)
+{
+ H5F_retry_info_t info;
+ unsigned i;
+
+ /* Retrieve the collection of retries */
+ if(H5Fget_metadata_read_retry_info(fid, &info) < 0)
+ return (-1);
+
+ /* Print information for each non-NULL retries[i] */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++) {
+ unsigned power;
+ unsigned j;
+
+ if(NULL == info.retries[i])
+ continue;
+
+ HDfprintf(stderr, "Metadata read retries for item %u:\n", i);
+ power = 1;
+ for(j = 0; j < info.nbins; j++) {
+ if(info.retries[i][j])
+ HDfprintf(stderr, "\t# of retries for %u - %u retries: %u\n",
+ power, (power * 10) - 1, info.retries[i][j]);
+ power *= 10;
+ } /* end for */
+ } /* end for */
+
+ /* Free memory for each non-NULL retries[i] */
+ for(i = 0; i < H5F_NUM_METADATA_READ_RETRY_TYPES; i++)
+ if(info.retries[i] != NULL)
+ H5free_memory(info.retries[i]);
+
+ return 0;
+} /* print_metadata_retries_info() */
diff --git a/test/swmr_common.h b/test/swmr_common.h
new file mode 100644
index 0000000..99d1cb2
--- /dev/null
+++ b/test/swmr_common.h
@@ -0,0 +1,78 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef _SWMR_COMMON_H
+#define _SWMR_COMMON_H
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+
+/**********/
+/* Macros */
+/**********/
+
+#define NLEVELS 5 /* # of datasets in the SWMR test file */
+
+#define NMAPPING 9
+
+#define FILENAME "swmr_data.h5" /* SWMR test file name */
+#define DTYPE_SIZE 150 /* Data size in opaque type */
+
+/* The message sent by writer that the file open is done--releasing the file lock */
+#define WRITER_MESSAGE "SWMR_WRITER_MESSAGE"
+
+/************/
+/* Typedefs */
+/************/
+
+/* Information about a symbol/dataset */
+typedef struct {
+ char *name; /* Dataset name for symbol */
+ hid_t dsid; /* Dataset ID for symbol */
+ hsize_t nrecords; /* Number of records for the symbol */
+} symbol_info_t;
+
+/* A symbol's record */
+typedef struct {
+ uint64_t rec_id; /* ID for this record (unique in symbol) */
+ uint8_t info[DTYPE_SIZE]; /* "Other" information for this record */
+} symbol_t;
+
+/********************/
+/* Global Variables */
+/********************/
+H5TEST_DLLVAR symbol_info_t *symbol_info[NLEVELS];
+H5TEST_DLLVAR unsigned symbol_count[NLEVELS];
+
+/**************/
+/* Prototypes */
+/**************/
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+H5TEST_DLL symbol_info_t * choose_dataset(void);
+H5TEST_DLL hid_t create_symbol_datatype(void);
+H5TEST_DLL int generate_name(char *name_buf, unsigned level, unsigned count);
+H5TEST_DLL int generate_symbols(void);
+H5TEST_DLL int shutdown_symbols(void);
+H5TEST_DLL int print_metadata_retries_info(hid_t fid);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _SWMR_COMMON_H */
diff --git a/test/swmr_generator.c b/test/swmr_generator.c
new file mode 100644
index 0000000..1c2ec04
--- /dev/null
+++ b/test/swmr_generator.c
@@ -0,0 +1,384 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_generator.c
+ *
+ * Purpose: Functions for building and setting up the SWMR test file
+ * and datasets.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/*
+ * This file needs to access testing codefrom the H5O package.
+ */
+#define H5O_FRIEND /*suppress error about including H5Opkg */
+#define H5O_TESTING
+#include "H5Opkg.h" /* Object headers */
+
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define CHUNK_SIZE 50 /* Chunk size for created datasets */
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int gen_skeleton(const char *filename, hbool_t verbose,
+ hbool_t swmr_write, int comp_level, const char *index_type,
+ unsigned random_seed);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: gen_skeleton
+ *
+ * Purpose: Creates the HDF5 file and datasets which will be used in
+ * the SWMR testing.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * hbool_t verbose
+ * Whether verbose console output is desired.
+ *
+ * hbool_t swmr_write
+ * Whether to create the file with SWMR writing enabled
+ *
+ * int comp_level
+ * The zlib compression level to use. -1 = no compression.
+ *
+ * const char *index_type
+ * The chunk index type (b1 | b2 | ea | fa)
+ *
+ * unsigned random_seed
+ * The random seed to store in the file. The sparse tests use
+ * this value.
+ *
+ * Return: Success: 0
+ * Failure: Can't fail
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+gen_skeleton(const char *filename, hbool_t verbose, hbool_t swmr_write,
+ int comp_level, const char *index_type, unsigned random_seed)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+#ifdef FILLVAL_WORKS
+ symbol_t fillval; /* Dataset fill value */
+#endif /* FILLVAL_WORKS */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+ HDassert(index_type);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Can create a file for SWMR support with: (a) (write+latest-format) or (b) (SWMR write+non-latest-format) */
+ if(!swmr_write) {
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+ /* There are two chunk indexes tested here.
+ * With one unlimited dimension, we get the extensible array index
+ * type, with two unlimited dimensions, we get a v2 B-tree.
+ */
+ if(!HDstrcmp(index_type, "b2"))
+ max_dims[0] = H5S_UNLIMITED;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_small_data_block_size(fapl, (hsize_t)(50 * CHUNK_SIZE * DTYPE_SIZE));
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+
+#ifdef QAK
+ H5Pset_link_phase_change(fcpl, 0, 0);
+#endif /* QAK */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Creating file\n");
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC | (swmr_write ? H5F_ACC_SWMR_WRITE : 0), fcpl, fapl)) < 0)
+ return -1;
+
+ /* Close file creation property list */
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Create attribute with (shared) random number seed - for sparse test */
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(fid, "seed", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &random_seed) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+#ifdef FILLVAL_WORKS
+ /* Currently fill values do not work because they can bump the dataspace
+ * message to the second object header chunk. We should enable the fillval
+ * here when this is fixed. -NAF 8/11/11 */
+ HDmemset(&fillval, 0, sizeof(fillval));
+ fillval.rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Pset_fill_value(dcpl, tid, &fillval) < 0)
+ return -1;
+#endif /* FILLVAL_WORKS */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ hid_t dsid; /* Dataset ID */
+ char name_buf[64];
+ hbool_t move_dataspace_message = FALSE; /* Whether to move the dataspace message out of object header chunk #0 */
+
+ generate_name(name_buf, u, v);
+ if((dsid = H5Dcreate2(fid, name_buf, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Determine if the dataspace message for this dataset should be
+ * moved out of chunk #0 of the object header
+ * (Set to TRUE for every fourth dataset)
+ */
+ move_dataspace_message = !(HDrandom() % 4);
+ if(move_dataspace_message) {
+ unsigned chunk_num; /* Object header chunk # for dataspace message */
+
+ /* Move the dataspace message to a new object header chunk */
+ if(H5O_msg_move_to_new_chunk_test(dsid, H5O_SDSPACE_ID) < 0)
+ return -1;
+
+ /* Retrieve the chunk # for the dataspace message */
+ chunk_num = UINT_MAX;
+ if(H5O_msg_get_chunkno_test(dsid, H5O_SDSPACE_ID, &chunk_num) < 0)
+ return -1;
+ /* Should not be in chunk #0 for now */
+ if(0 == chunk_num)
+ return -1;
+ } /* end if */
+
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close everythign */
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Tclose(tid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+} /* end gen_skeleton() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_generator [-q] [-s] [-c <deflate compression level>]\n");
+ printf(" [-i <index type>] [-r <random seed>]\n");
+ printf("\n");
+ printf("NOTE: The random seed option is only used by the sparse test. Other\n");
+ printf(" tests specify the random seed as a reader/writer option.\n");
+ printf("\n");
+ printf("<deflate compression level> should be -1 (for no compression) or 0-9\n");
+ printf("\n");
+ printf("<index type> should be b2 or ea\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), no SWMR_WRITE mode (no '-s' given) no\n");
+ printf("compression ('-c -1'), v1 b-tree indexing (-i b1), and will generate a random\n");
+ printf("seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+} /* end usage() */
+
+int main(int argc, const char *argv[])
+{
+ int comp_level = -1; /* Compression level (-1 is no compression) */
+ hbool_t verbose = TRUE; /* Whether to emit some informational messages */
+ hbool_t swmr_write = FALSE; /* Whether to create file with SWMR_WRITE access */
+ const char *index_type = "b1"; /* Chunk index type */
+ hbool_t use_seed = FALSE; /* Set to TRUE if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* Compress dataset chunks */
+ case 'c':
+ comp_level = HDatoi(argv[u + 1]);
+ if(comp_level < -1 || comp_level > 9)
+ usage();
+ u += 2;
+ break;
+
+ /* Chunk index type */
+ case 'i':
+ index_type = argv[u + 1];
+ if(HDstrcmp(index_type, "ea")
+ && HDstrcmp(index_type, "b2"))
+ usage();
+ u += 2;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = TRUE;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = FALSE;
+ u++;
+ break;
+
+ /* Run with SWMR_WRITE */
+ case 's':
+ swmr_write = TRUE;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ } /* end while */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\tswmr writes %s\n", swmr_write ? "on" : "off");
+ HDfprintf(stderr, "\tcompression level = %d\n", comp_level);
+ HDfprintf(stderr, "\tindex type = %s\n", index_type);
+ } /* end if */
+
+ /* Set the random seed */
+ if(!use_seed) {
+ struct timeval t;
+
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stderr, "Using generator random seed (used in sparse test only): %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating skeleton file: %s\n", FILENAME);
+
+ /* Generate file skeleton */
+ if(gen_skeleton(FILENAME, verbose, swmr_write, comp_level, index_type, random_seed) < 0) {
+ HDfprintf(stderr, "Error generating skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_reader.c b/test/swmr_reader.c
new file mode 100644
index 0000000..e181d3a
--- /dev/null
+++ b/test/swmr_reader.c
@@ -0,0 +1,546 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ const char *sym_name, symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed, unsigned long nseconds, unsigned poll_time,
+ unsigned ncommon, unsigned nrandom);
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = -1; /* The type ID for the SWMR datasets */
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * hbool_t verbose
+ * Whether verbose console output is desired.
+ *
+ * FILE *verbose_file
+ * File handle for verbose output
+ *
+ * const char *sym_name
+ * The name of the dataset from which to read.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ const char *sym_name, symbol_t *record, hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hssize_t snpoints; /* Number of elements in dataset */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+
+ HDassert(fid >= 0);
+ HDassert(sym_name);
+ HDassert(record);
+ HDassert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, sym_name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Get the number of elements (= records, for 1-D datasets) */
+ if((snpoints = H5Sget_simple_extent_npoints(file_sid)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Symbol = '%s', # of records = %lld\n", sym_name, (long long)snpoints);
+
+ /* Check if there are records for symbol */
+ if(snpoints > 0) {
+ /* Choose the last record in the dataset */
+ start[1] = (hsize_t)(snpoints - 1);
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Read record from dataset */
+ record->rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value */
+ if(record->rec_id != start[1]) {
+ struct timeval tv;
+
+ HDgettimeofday(&tv, NULL);
+
+ if(verbose) {
+ HDfprintf(verbose_file, "*** ERROR ***\n");
+ HDfprintf(verbose_file, "Incorrect record value!\n");
+ HDfprintf(verbose_file, "Time = %llu.%llu, Symbol = '%s', # of records = %lld, record->rec_id = %llu\n", (unsigned long long)tv.tv_sec, (unsigned long long)tv.tv_usec, sym_name, (long long)snpoints, (unsigned long long)record->rec_id);
+ } /* end if */
+ return -1;
+ } /* end if */
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * The "common" datasets are a random selection from among
+ * the level 0 datasets. The "random" datasets are a random
+ * selection from among all the file's datasets. This scheme
+ * ensures that the level 0 datasets are interrogated vigorously.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * hbool_t verbose
+ * Whether verbose console output is desired.
+ *
+ * FILE *verbose_file
+ * File handle for verbose output
+ *
+ * unsigned random_seed
+ * Random seed for the file (used for verbose logging)
+ *
+ * unsigned long nseconds
+ * The amount of time to read records (ns).
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned ncommon
+ * The number of common/non-random datasets that will be opened.
+ *
+ * unsigned nrandom
+ * The number of random datasets that will be opened.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed, unsigned long nseconds, unsigned poll_time,
+ unsigned ncommon, unsigned nrandom)
+{
+ time_t start_time; /* Starting time */
+ time_t curr_time; /* Current time */
+ symbol_info_t **sym_com = NULL; /* Pointers to array of common dataset IDs */
+ symbol_info_t **sym_rand = NULL; /* Pointers to array of random dataset IDs */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hid_t fid; /* SWMR test file ID */
+ hid_t fapl; /* file access property list */
+ symbol_t record; /* The record to read from the dataset */
+ unsigned v; /* Local index variable */
+ hbool_t use_log_vfd = FALSE; /* Use the log VFD (set this manually) */
+
+ HDassert(filename);
+ HDassert(nseconds != 0);
+ HDassert(poll_time != 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record read, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Choosing datasets\n");
+
+ /* Allocate space for 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Allocate array to hold pointers to symbols for common datasets */
+ if(NULL == (sym_com = (symbol_info_t **)HDmalloc(sizeof(symbol_info_t *) * ncommon)))
+ return -1;
+
+ /* Open the common datasets */
+ for(v = 0; v < ncommon; v++) {
+ unsigned offset; /* Offset of symbol to use */
+
+ /* Determine the offset of the symbol, within level 0 symbols */
+ /* (level 0 symbols are the most common symbols) */
+ offset = (unsigned)((unsigned)HDrandom() % symbol_count[0]);
+ sym_com[v] = &symbol_info[0][offset];
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Common symbol #%u = '%s'\n", v, symbol_info[0][offset].name);
+ } /* end for */
+ } /* end if */
+
+ /* Allocate space for 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Allocate array to hold pointers to symbols for random datasets */
+ if(NULL == (sym_rand = (symbol_info_t **)HDmalloc(sizeof(symbol_info_t *) * nrandom)))
+ return -1;
+
+ /* Determine the random datasets */
+ for(v = 0; v < nrandom; v++) {
+ symbol_info_t *sym; /* Symbol to use */
+
+ /* Determine the symbol, within all symbols */
+ if(NULL == (sym = choose_dataset()))
+ return -1;
+ sym_rand[v] = sym;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Random symbol #%u = '%s'\n", v, sym->name);
+ } /* end for */
+ } /* end if */
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = HDtime(NULL);
+ curr_time = start_time;
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Log I/O when verbose output it enbabled */
+ if(use_log_vfd) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_reader.log.%u", random_seed);
+
+ H5Pset_fapl_log(fapl, verbose_name, H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+ } /* end if */
+
+ /* Loop over reading records until [at least] the correct # of seconds have passed */
+ while(curr_time < (time_t)(start_time + (time_t)nseconds)) {
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Check 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Checking common symbols\n");
+
+ /* Iterate over common datasets */
+ for(v = 0; v < ncommon; v++) {
+ /* Check common dataset */
+ if(check_dataset(fid, verbose, verbose_file, sym_com[v]->name, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Check 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Checking random symbols\n");
+
+ /* Iterate over random datasets */
+ for(v = 0; v < nrandom; v++) {
+ /* Check random dataset */
+ if(check_dataset(fid, verbose, verbose_file, sym_rand[v]->name, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing file\n");
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Sleep for the appropriate # of seconds */
+ HDsleep(poll_time);
+
+ /* Retrieve the current time */
+ curr_time = HDtime(NULL);
+ } /* end while */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the fapl */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing datasets\n");
+
+ /* Close 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Release array holding dataset ID's for random datasets */
+ HDfree(sym_rand);
+ } /* end if */
+
+ /* Close 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Release array holding dataset ID's for common datasets */
+ HDfree(sym_com);
+ } /* end if */
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_reader [-q] [-s <# of seconds to sleep between polling>]\n");
+ printf(" [-h <# of common symbols to poll>] [-l <# of random symbols to poll>]\n");
+ printf(" [-r <random seed>] <# of seconds to test>\n");
+ printf("\n");
+ printf("<# of seconds to test> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second between polling ('-s 1'),\n");
+ printf("5 common symbols to poll ('-h 5'), 10 random symbols to poll ('-l 10'),\n");
+ printf("and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ long nseconds = 0; /* # of seconds to test */
+ int poll_time = 1; /* # of seconds between polling */
+ int ncommon = 5; /* # of common symbols to poll */
+ int nrandom = 10; /* # of random symbols to poll */
+ hbool_t verbose = TRUE; /* Whether to emit some informational messages */
+ FILE *verbose_file = NULL; /* File handle for verbose output */
+ hbool_t use_seed = FALSE; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of common symbols to poll */
+ case 'h':
+ ncommon = HDatoi(argv[u + 1]);
+ if(ncommon < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* # of random symbols to poll */
+ case 'l':
+ nrandom = HDatoi(argv[u + 1]);
+ if(nrandom < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = FALSE;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = TRUE;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = HDatoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nseconds = HDatol(argv[u]);
+ if(nseconds <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nseconds <= 0)
+ usage();
+ if(poll_time >= nseconds)
+ usage();
+
+ /* Set the random seed */
+ if(!use_seed) {
+ struct timeval t;
+
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+
+ /* Open output file */
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_reader.out.%u", random_seed);
+ if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) {
+ HDfprintf(stderr, "Can't open verbose output file!\n");
+ HDexit(1);
+ }
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(verbose_file, "Parameters:\n");
+ HDfprintf(verbose_file, "\t# of seconds between polling = %d\n", poll_time);
+ HDfprintf(verbose_file, "\t# of common symbols to poll = %d\n", ncommon);
+ HDfprintf(verbose_file, "\t# of random symbols to poll = %d\n", nrandom);
+ HDfprintf(verbose_file, "\t# of seconds to test = %ld\n", nseconds);
+ } /* end if */
+
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stdout, "Using reader random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ HDfprintf(stderr, "Error generating symbol names!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, verbose_file, random_seed, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) {
+ HDfprintf(stderr, "Error reading records from datasets (random_seed = %u)!\n", random_seed);
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ HDfprintf(stderr, "Error closing symbol datatype!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
+
diff --git a/test/swmr_remove_reader.c b/test/swmr_remove_reader.c
new file mode 100644
index 0000000..11649e3
--- /dev/null
+++ b/test/swmr_remove_reader.c
@@ -0,0 +1,517 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_remove_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file. Unlike the regular reader, these
+ * datasets will be shrinking.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_remove_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = -1;
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, unsigned verbose, const char *sym_name,
+ symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * const char *sym_name
+ * The name of the dataset from which to read.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, unsigned verbose, const char *sym_name, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hssize_t snpoints; /* Number of elements in dataset */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+
+ HDassert(fid >= 0);
+ HDassert(sym_name);
+ HDassert(record);
+ HDassert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, sym_name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Get the number of elements (= records, for 1-D datasets) */
+ if((snpoints = H5Sget_simple_extent_npoints(file_sid)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Symbol = '%s', # of records = %lld\n", sym_name, (long long)snpoints);
+
+ /* Check if there are records for symbol */
+ if(snpoints > 0) {
+ /* Choose a random record in the dataset, choosing the last record half
+ * the time */
+ start[1] = (hsize_t)(HDrandom() % (snpoints * 2));
+ if(start[1] > (hsize_t)(snpoints - 1))
+ start[1] = (hsize_t)(snpoints - 1);
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Read record from dataset */
+#ifdef FILLVAL_WORKS
+ /* When shrinking the dataset, we cannot guarantee that the buffer will
+ * even be touched, unless there is a fill value. Since fill values do
+ * not work with SWMR currently (see note in swmr_generator.c), we
+ * simply initialize rec_id to 0. */
+ record->rec_id = (uint64_t)ULLONG_MAX - 1;
+#else /* FILLVAL_WORKS */
+ record->rec_id = (uint64_t)0;
+#endif /* FILLVAL_WORKS */
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value - note that it may be the fill value, because the
+ * chunk may be deleted before the object header has the updated
+ * dimensions */
+ if(record->rec_id != start[1] && record->rec_id != (uint64_t)0) {
+ HDfprintf(stderr, "*** ERROR ***\n");
+ HDfprintf(stderr, "Incorrect record value!\n");
+ HDfprintf(stderr, "Symbol = '%s', # of records = %lld, record->rec_id = %llx\n", sym_name, (long long)snpoints, (unsigned long long)record->rec_id);
+ return -1;
+ } /* end if */
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * The "common" datasets are a random selection from among
+ * the level 0 datasets. The "random" datasets are a random
+ * selection from among all the file's datasets. This scheme
+ * ensures that the level 0 datasets are interrogated vigorously.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * unsigned long nseconds
+ * The amount of time to read records (ns).
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned ncommon
+ * The number of common/non-random datasets that will be opened.
+ *
+ * unsigned nrandom
+ * The number of random datasets that will be opened.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nseconds,
+ unsigned poll_time, unsigned ncommon, unsigned nrandom)
+{
+ time_t start_time; /* Starting time */
+ time_t curr_time; /* Current time */
+ symbol_info_t **sym_com = NULL; /* Pointers to array of common dataset IDs */
+ symbol_info_t **sym_rand = NULL; /* Pointers to array of random dataset IDs */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hid_t fid; /* SWMR test file ID */
+ hid_t fapl; /* File access property list */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned v; /* Local index variable */
+
+ HDassert(filename);
+ HDassert(nseconds != 0);
+ HDassert(poll_time != 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Choosing datasets\n");
+
+ /* Allocate space for 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Allocate array to hold pointers to symbols for common datasets */
+ if(NULL == (sym_com = (symbol_info_t **)HDmalloc(sizeof(symbol_info_t *) * ncommon)))
+ return -1;
+
+ /* Open the common datasets */
+ for(v = 0; v < ncommon; v++) {
+ unsigned offset; /* Offset of symbol to use */
+
+ /* Determine the offset of the symbol, within level 0 symbols */
+ /* (level 0 symbols are the most common symbols) */
+ offset = (unsigned)(HDrandom() % symbol_count[0]);
+ sym_com[v] = &symbol_info[0][offset];
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Common symbol #%u = '%s'\n", v, symbol_info[0][offset].name);
+ } /* end for */
+ } /* end if */
+
+ /* Allocate space for 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Allocate array to hold pointers to symbols for random datasets */
+ if(NULL == (sym_rand = (symbol_info_t **)HDmalloc(sizeof(symbol_info_t *) * nrandom)))
+ return -1;
+
+ /* Determine the random datasets */
+ for(v = 0; v < nrandom; v++) {
+ symbol_info_t *sym; /* Symbol to use */
+
+ /* Determine the symbol, within all symbols */
+ if(NULL == (sym = choose_dataset()))
+ return -1;
+ sym_rand[v] = sym;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Random symbol #%u = '%s'\n", v, sym->name);
+ } /* end for */
+ } /* end if */
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = HDtime(NULL);
+ curr_time = start_time;
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Loop over reading records until [at least] the correct # of seconds have passed */
+ while(curr_time < (time_t)(start_time + (time_t)nseconds)) {
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Check 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Checking common symbols\n");
+
+ /* Iterate over common datasets */
+ for(v = 0; v < ncommon; v++) {
+ /* Check common dataset */
+ if(check_dataset(fid, verbose, sym_com[v]->name, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Check 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Checking random symbols\n");
+
+ /* Iterate over random datasets */
+ for(v = 0; v < nrandom; v++) {
+ /* Check random dataset */
+ if(check_dataset(fid, verbose, sym_rand[v]->name, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+ } /* end for */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing file\n");
+
+ /* Close the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Sleep for the appropriate # of seconds */
+ HDsleep(poll_time);
+
+ /* Retrieve the current time */
+ curr_time = HDtime(NULL);
+ } /* end while */
+
+ /* Close the fapl */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing datasets\n");
+
+ /* Close 'random' datasets, if any */
+ if(nrandom > 0) {
+ /* Release array holding dataset ID's for random datasets */
+ HDfree(sym_rand);
+ } /* end if */
+
+ /* Close 'common' datasets, if any */
+ if(ncommon > 0) {
+ /* Release array holding dataset ID's for common datasets */
+ HDfree(sym_com);
+ } /* end if */
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_remove_reader [-q] [-s <# of seconds to sleep between\n");
+ printf(" polling>] [-h <# of common symbols to poll>] [-l <# of random symbols\n");
+ printf(" to poll>] [-r <random seed>] <# of seconds to test>\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second between polling ('-s 1'),\n");
+ printf("5 common symbols to poll ('-h 5'), 10 random symbols to poll ('-l 10'),\n");
+ printf("and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ long nseconds = 0; /* # of seconds to test */
+ int poll_time = 1; /* # of seconds between polling */
+ int ncommon = 5; /* # of common symbols to poll */
+ int nrandom = 10; /* # of random symbols to poll */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variables */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of common symbols to poll */
+ case 'h':
+ ncommon = HDatoi(argv[u + 1]);
+ if(ncommon < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* # of random symbols to poll */
+ case 'l':
+ nrandom = HDatoi(argv[u + 1]);
+ if(nrandom < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = HDatoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nseconds = HDatol(argv[u]);
+ if(nseconds <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nseconds <= 0)
+ usage();
+ if(poll_time >= nseconds)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of seconds between polling = %d\n", poll_time);
+ HDfprintf(stderr, "\t# of common symbols to poll = %d\n", ncommon);
+ HDfprintf(stderr, "\t# of random symbols to poll = %d\n", nrandom);
+ HDfprintf(stderr, "\t# of seconds to test = %ld\n", nseconds);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stderr, "Using reader random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ HDfprintf(stderr, "Error generating symbol names!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long)nseconds, (unsigned)poll_time, (unsigned)ncommon, (unsigned)nrandom) < 0) {
+ HDfprintf(stderr, "Error reading records from datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ HDfprintf(stderr, "Error closing symbol datatype!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_remove_writer.c b/test/swmr_remove_writer.c
new file mode 100644
index 0000000..82c2f8b
--- /dev/null
+++ b/test/swmr_remove_writer.c
@@ -0,0 +1,379 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_remove_writer.c
+ *
+ * Purpose: Removes data from a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_remove_reader program. It is also run AFTER a sequential
+ * (not concurrent!) invoking of swmr_writer so the writer
+ * can dump a bunch of data into the datasets. Otherwise,
+ * there wouldn't be much to shrink :)
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+/* The maximum number of records to remove in one step */
+#define MAX_REMOVE_SIZE 10
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose, unsigned old);
+static int remove_records(hid_t fid, unsigned verbose, unsigned long nshrinks,
+ unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose, unsigned old)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dim[2]; /* Dataspace dimensions */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ if(!old) {
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+#ifdef QAK
+/* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ if((sid = H5Dget_space(symbol_info[u][v].dsid)) < 0)
+ return -1;
+ if(2 != H5Sget_simple_extent_ndims(sid))
+ return -1;
+ if(H5Sget_simple_extent_dims(sid, dim, NULL) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = dim[1];
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: remove_records
+ *
+ * Purpose: Removes a specified number of records from random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nshrinks
+ * # of records to remove from the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+remove_records(hid_t fid, unsigned verbose, unsigned long nshrinks, unsigned long flush_count)
+{
+ unsigned long shrink_to_flush; /* # of removals before flush */
+ hsize_t dim[2] = {1,0}; /* Dataspace dimensions */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid >= 0);
+
+ /* Remove records from random datasets, according to frequency distribution */
+ shrink_to_flush = flush_count;
+ for(u = 0; u < nshrinks; u++) {
+ symbol_info_t *symbol; /* Symbol to remove record from */
+ hsize_t remove_size; /* Size to reduce dataset dimension by */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Shrink the dataset's dataspace */
+ remove_size = (hsize_t)HDrandom() % MAX_REMOVE_SIZE + 1;
+ if(remove_size > symbol->nrecords)
+ symbol->nrecords = 0;
+ else
+ symbol->nrecords -= remove_size;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ shrink_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == shrink_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ shrink_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_remove_writer [-q] [-o] [-f <# of shrinks between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of shrinks>\n");
+ printf("\n");
+ printf("<# of shrinks between flushing file contents> should be 0 (for no\n");
+ printf("flushing) or between 1 and (<# of shrinks> - 1)\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), latest format when opening file (no '-o' given),\n");
+ printf("flushing every 1000 shrinks ('-f 1000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nshrinks = 0; /* # of times to shrink the dataset */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned old = 0; /* Whether to use non-latest-format when opening file */
+ unsigned use_seed = 0; /* Set to 1 if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = 1;
+ temp = HDatoi(argv[u + 1]);
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Use non-latest-format when opening file */
+ case 'o':
+ old = 1;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nshrinks = HDatol(argv[u]);
+ if(nshrinks <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nshrinks <= 0)
+ usage();
+ if(flush_count >= nshrinks)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of shrinks between flushes = %ld\n", flush_count);
+ HDfprintf(stderr, "\t# of shrinks = %ld\n", nshrinks);
+ } /* end if */
+
+ /* Set the random seed */
+ if(0 == use_seed) {
+ struct timeval t;
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stderr, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose, old)) < 0) {
+ HDfprintf(stderr, "Error opening skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Removing records\n");
+
+ /* Remove records from datasets */
+ if(remove_records(fid, verbose, (unsigned long)nshrinks, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error removing records from datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_sparse_reader.c b/test/swmr_sparse_reader.c
new file mode 100644
index 0000000..3c98f48
--- /dev/null
+++ b/test/swmr_sparse_reader.c
@@ -0,0 +1,447 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_sparse_reader.c
+ *
+ * Purpose: Reads data from a randomly selected subset of the datasets
+ * in the SWMR test file. Unlike the regular reader, these
+ * datasets will be shrinking.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_sparse_writer program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+#define TIMEOUT 300
+
+/*******************/
+/* Local Variables */
+/*******************/
+
+static hid_t symbol_tid = (-1);
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static int check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol,
+ symbol_t *record, hid_t rec_sid);
+static int read_records(const char *filename, unsigned verbose, unsigned long nrecords,
+ unsigned poll_time, unsigned reopen_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: check_dataset
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: hid_t fid
+ * The SWMR test file's ID.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * const symbol_info_t *symbol
+ * The dataset from which to read (the ID is in the struct).
+ * Must be pre-allocated.
+ *
+ * symbol_t *record
+ * Memory for the record. Must be pre-allocated.
+ *
+ * hid_t rec_sid
+ * The memory dataspace for access. It's always the same so
+ * there is no need to re-create it every time this function
+ * is called.
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+check_dataset(hid_t fid, unsigned verbose, const symbol_info_t *symbol, symbol_t *record,
+ hid_t rec_sid)
+{
+ hid_t dsid; /* Dataset ID */
+ hid_t file_sid; /* Dataset's space ID */
+ hsize_t start[2] = {0, 0}; /* Hyperslab selection values */
+ hsize_t count[2] = {1, 1}; /* Hyperslab selection values */
+
+ HDassert(fid >= 0);
+ HDassert(symbol);
+ HDassert(record);
+ HDassert(rec_sid >= 0);
+
+ /* Open dataset for symbol */
+ if((dsid = H5Dopen2(fid, symbol->name, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(dsid)) < 0)
+ return -1;
+
+ /* Choose the random record in the dataset (will be the same as chosen by
+ * the writer) */
+ start[1] = (hsize_t)HDrandom() % symbol->nrecords;
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Symbol = '%s', location = %lld\n", symbol->name, (long long)start);
+
+ /* Read record from dataset */
+ record->rec_id = (uint64_t)ULLONG_MAX;
+ if(H5Dread(dsid, symbol_tid, rec_sid, file_sid, H5P_DEFAULT, record) < 0)
+ return -1;
+
+ /* Verify record value */
+ if(record->rec_id != start[1]) {
+ HDfprintf(stderr, "*** ERROR ***\n");
+ HDfprintf(stderr, "Incorrect record value!\n");
+ HDfprintf(stderr, "Symbol = '%s', location = %lld, record->rec_id = %llu\n", symbol->name, (long long)start, (unsigned long long)record->rec_id);
+ return -1;
+ } /* end if */
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Close dataset for symbol */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+
+ return 0;
+} /* end check_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: read_records
+ *
+ * Purpose: For a given dataset, checks to make sure that the stated
+ * and actual sizes are the same. If they are not, then
+ * we have an inconsistent dataset due to a SWMR error.
+ *
+ * Parameters: const char *filename
+ * The SWMR test file's name.
+ *
+ * unsigned verbose
+ * Whether verbose console output is desired.
+ *
+ * unsigned long nrecords
+ * The total number of records to read.
+ *
+ * unsigned poll_time
+ * The amount of time to sleep (s).
+ *
+ * unsigned reopen_count
+ *
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+read_records(const char *filename, unsigned verbose, unsigned long nrecords,
+ unsigned poll_time, unsigned reopen_count)
+{
+ hid_t fid; /* File ID */
+ hid_t aid; /* Attribute ID */
+ time_t start_time; /* Starting time */
+ hid_t mem_sid; /* Memory dataspace ID */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned seed; /* Seed for random number generator */
+ unsigned iter_to_reopen = reopen_count; /* # of iterations until reopen */
+ unsigned long u; /* Local index variable */
+ hid_t fapl;
+
+ HDassert(filename);
+ HDassert(poll_time != 0);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ H5Pset_fclose_degree(fapl, H5F_CLOSE_SEMI);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening file: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+
+ /* Seed the random number generator with the attribute in the file */
+ if((aid = H5Aopen(fid, "seed", H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+ HDsrandom(seed);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to read */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Reading records\n");
+
+ /* Get the starting time */
+ start_time = HDtime(NULL);
+
+ /* Read records */
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol = NULL; /* Symbol (dataset) */
+ htri_t attr_exists; /* Whether the sequence number attribute exists */
+ unsigned long file_u; /* Attribute sequence number (writer's "u") */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Fill in "nrecords" field. Note that this depends on the writer
+ * using the same algorithm and "nrecords" */
+ symbol->nrecords = nrecords / 5;
+
+ /* Wait until we can read the dataset */
+ do {
+ /* Check if sequence attribute exists */
+ if((attr_exists = H5Aexists_by_name(fid, symbol->name, "seq", H5P_DEFAULT)) < 0)
+ return -1;
+
+ if(attr_exists) {
+ /* Read sequence number attribute */
+ if((aid = H5Aopen_by_name(fid, symbol->name, "seq", H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_ULONG, &file_u) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ /* Check if sequence number is at least u - if so, this should
+ * guarantee that this record has been written */
+ if(file_u >= u)
+ break;
+ } /* end if */
+
+ /* Check for timeout */
+ if(HDtime(NULL) >= (time_t)(start_time + (time_t)TIMEOUT)) {
+ HDfprintf(stderr, "Reader timed out\n");
+ return -1;
+ } /* end if */
+
+ /* Pause */
+ HDsleep(poll_time);
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ HDfprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Reopen the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+ iter_to_reopen = reopen_count;
+ } while(1);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Checking dataset %lu\n", u);
+
+ /* Check dataset */
+ if(check_dataset(fid, verbose, symbol, &record, mem_sid) < 0)
+ return -1;
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Check for reopen */
+ iter_to_reopen--;
+ if(iter_to_reopen == 0) {
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Reopening file: %s\n", filename);
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ HDfprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Reopen the file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if((fid = H5Fopen(filename, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, fapl)) < 0)
+ return -1;
+ iter_to_reopen = reopen_count;
+ } /* end if */
+ } /* end while */
+
+ /* Retrieve and print the collection of metadata read retries */
+ if(print_metadata_retries_info(fid) < 0)
+ HDfprintf(stderr, "Warning: could not obtain metadata retries info\n");
+
+ /* Close file */
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ return 0;
+} /* end read_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_sparse_reader [-q] [-s <# of seconds to wait for writer>]\n");
+ printf(" [-n <# of reads between reopens>] <# of records>\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), 1 second wait ('-s 1') and 1 read\n");
+ printf("between reopens ('-r 1')\n");
+ printf("\n");
+ printf("Note that the # of records *must* be the same as that supplied to\n");
+ printf("swmr_sparse_writer\n");
+ printf("\n");
+ HDexit(1);
+} /* end usage() */
+
+int main(int argc, const char *argv[])
+{
+ long nrecords = 0; /* # of records to read */
+ int poll_time = 1; /* # of seconds to sleep when waiting for writer */
+ int reopen_count = 1; /* # of reads between reopens */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variables */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of reads between reopens */
+ case 'n':
+ reopen_count = HDatoi(argv[u + 1]);
+ if(reopen_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ /* # of seconds between polling */
+ case 's':
+ poll_time = HDatoi(argv[u + 1]);
+ if(poll_time < 0)
+ usage();
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to read */
+ nrecords = HDatol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of seconds between polling = %d\n", poll_time);
+ HDfprintf(stderr, "\t# of reads between reopens = %d\n", reopen_count);
+ HDfprintf(stderr, "\t# of records to read = %ld\n", nrecords);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0) {
+ HDfprintf(stderr, "Error generating symbol names!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Create datatype for creating datasets */
+ if((symbol_tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Reading records from datasets */
+ if(read_records(FILENAME, verbose, (unsigned long) nrecords, (unsigned)poll_time, (unsigned)reopen_count) < 0) {
+ HDfprintf(stderr, "Error reading records from datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects created */
+ if(H5Tclose(symbol_tid) < 0) {
+ HDfprintf(stderr, "Error closing symbol datatype!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_sparse_writer.c b/test/swmr_sparse_writer.c
new file mode 100644
index 0000000..e46f54c
--- /dev/null
+++ b/test/swmr_sparse_writer.c
@@ -0,0 +1,452 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+ /*-------------------------------------------------------------------------
+ *
+ * Created: swmr_sparse_writer.c
+ *
+ * Purpose: Writes data to a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_sparse_reader program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/****************/
+/* Local Macros */
+/****************/
+
+#ifdef OUT
+#define BUSY_WAIT 100000
+#endif /* OUT */
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, unsigned verbose);
+static int add_records(hid_t fid, unsigned verbose, unsigned long nrecords,
+ unsigned long flush_count);
+static void usage(void);
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, unsigned verbose)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ hid_t aid; /* Attribute ID */
+ unsigned seed; /* Seed for random number generator */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr,"mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+#ifdef QAK
+ H5Pset_fapl_log(fapl, "append.log", H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+#endif /* QAK */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Opening datasets\n");
+
+ /* Seed the random number generator with the attribute in the file */
+ if((aid = H5Aopen(fid, "seed", H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Aread(aid, H5T_NATIVE_UINT, &seed) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+ HDsrandom(seed);
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return(-1);
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * unsigned verbose
+ * Whether or not to emit verbose console messages
+ *
+ * unsigned long nrecords
+ * # of records to write to the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, unsigned verbose, unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}; /* Hyperslab selection values */
+ hsize_t count[2] = {1, 1}; /* Hyperslab selection values */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+#ifdef OUT
+ volatile int dummy; /* Dummy varialbe for busy sleep */
+#endif /* OUT */
+ hsize_t dim[2] = {1,0}; /* Dataspace dimensions */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+ hid_t aid; /* Attribute ID */
+ hbool_t corked; /* Whether the dataset was corked */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* If this is the first time the dataset has been opened, extend it and
+ * add the sequence attribute */
+ if(symbol->nrecords == 0) {
+ symbol->nrecords = nrecords / 5;
+ dim[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ if(H5Odisable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+ corked = TRUE;
+
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ if((file_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(symbol->dsid, "seq", H5T_NATIVE_ULONG, file_sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+ } /* end if */
+ else {
+ if((aid = H5Aopen(symbol->dsid, "seq", H5P_DEFAULT)) < 0)
+ return -1;
+ corked = FALSE;
+ } /* end else */
+
+ /* Get the coordinate to write */
+ start[1] = (hsize_t)HDrandom() % symbol->nrecords;
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = start[1];
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose a random record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Write the sequence number attribute. Since we synchronize the random
+ * number seed, the readers will always generate the same sequence of
+ * randomly chosen datasets and offsets. Therefore, and because of the
+ * flush dependencies on the object header, the reader will be
+ * guaranteed to see the written data if the sequence attribute is >=u.
+ */
+ if(H5Awrite(aid, H5T_NATIVE_ULONG, &u) < 0)
+ return -1;
+
+ /* Close the attribute */
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ /* Uncork the metadata cache, if it's been */
+ if(corked)
+ if(H5Oenable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+
+#ifdef OUT
+ /* Busy wait, to let readers catch up */
+ /* If this is removed, also remove the BUSY_WAIT symbol
+ * at the top of the file.
+ */
+ dummy = 0;
+ for(v=0; v<BUSY_WAIT; v++)
+ dummy++;
+ if((unsigned long)dummy != v)
+ return -1;
+#endif /* OUT */
+
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ fprintf(stderr, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_sparse_writer [-q] [-f <# of records to write between\n");
+ printf(" flushing file contents>] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1)\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given) and flushing every 1000 records\n");
+ printf("('-f 1000')\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 1000; /* # of records to write between flushing file */
+ unsigned verbose = 1; /* Whether to emit some informational messages */
+ unsigned u; /* Local index variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = 0;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = HDatol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(stderr, "Parameters:\n");
+ HDfprintf(stderr, "\t# of records between flushes = %ld\n", flush_count);
+ HDfprintf(stderr, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose)) < 0) {
+ HDfprintf(stderr, "Error opening skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error appending records to datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(stderr, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
diff --git a/test/swmr_start_write.c b/test/swmr_start_write.c
new file mode 100644
index 0000000..2d5c3f9
--- /dev/null
+++ b/test/swmr_start_write.c
@@ -0,0 +1,713 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_start_write.c
+ *
+ * Purpose: This program enables SWMR writing mode via H5Fstart_swmr_write().
+ * It writes data to a randomly selected subset of the datasets
+ * in the SWMR test file; and it is intended to run concurrently
+ * with the swmr_reader program.
+ *
+ * NOTE: The routines in this program are basically copied and modified from
+ * swmr*.c.
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t create_file(const char *filename, hbool_t verbose,
+ FILE *verbose_file, unsigned random_seed);
+static int create_datasets(hid_t fid, int comp_level, hbool_t verbose,
+ FILE *verbose_file, const char *index_type);
+static int create_close_datasets(hid_t fid, int comp_level, hbool_t verbose,
+ FILE *verbose_file);
+static int open_datasets(hid_t fid, hbool_t verbose, FILE *verbose_file);
+static hid_t open_file(const char *filename, hbool_t verbose,
+ FILE *verbose_file);
+static int add_records(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ unsigned long nrecords, unsigned long flush_count);
+static void usage(void);
+
+#define CHUNK_SIZE 50 /* Chunk size for created datasets */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_file
+ *
+ * Purpose: Creates the HDF5 file (without SWMR access) which
+ * which will be used for testing H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * filename: The SWMR test file's name.
+ * verbose: whether verbose console output is desired.
+ * verbose_file: file pointer for verbose output
+ * random_seed: The random seed to store in the file.
+ * The sparse tests use this value.
+ *
+ * Return: Success: the file ID
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+create_file(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File access property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t aid; /* Attribute ID */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* We ALWAYS select the latest file format for SWMR */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+#ifdef QAK
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_start_write.log.%u", random_seed);
+
+ H5Pset_fapl_log(fapl, verbose_name, H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+ } /* end if */
+#endif /* QAK */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Creating file without SWMR access\n");
+
+ /* Create the file */
+ if((fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ return -1;
+
+ /* Close file creation property list */
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Create attribute with (shared) random number seed - for sparse test */
+ if((sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+ if((aid = H5Acreate2(fid, "seed", H5T_NATIVE_UINT, sid, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Awrite(aid, H5T_NATIVE_UINT, &random_seed) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Aclose(aid) < 0)
+ return -1;
+
+ return fid;
+} /* end create_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_datasets
+ *
+ * Purpose: Create datasets (and keep them opened) which will be used for testing
+ * H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * fid: file ID for the SWMR test file
+ * comp_level: the compresssion level
+ * index_type: The chunk index type (b1 | b2 | ea | fa)
+ * verbose: whether verbose console output is desired.
+ * verbose_file: file pointer for verbose output
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+create_datasets(hid_t fid, int comp_level, hbool_t verbose, FILE *verbose_file,
+ const char *index_type)
+{
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+ unsigned u, v; /* Local index variable */
+
+ HDassert(index_type);
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* There are two chunk indexes tested here.
+ * With one unlimited dimension, we get the extensible array index
+ * type, with two unlimited dimensions, we get a v-2 B-tree.
+ */
+ if(!HDstrcmp(index_type, "b2"))
+ max_dims[0] = H5S_UNLIMITED;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+
+ if((symbol_info[u][v].dsid = H5Dcreate2(fid, symbol_info[u][v].name, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+
+ } /* end for */
+
+ return 0;
+} /* create_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_close_datasets
+ *
+ * Purpose: Create and close datasets which will be used for testing
+ * H5Fstart_swmr_write().
+ *
+ * Parameters:
+ * fid: file ID for the SWMR test file
+ * comp_level: the compresssion level
+ * verbose: whether verbose console output is desired.
+ * verbose_file: file pointer for verbose output
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+create_close_datasets(hid_t fid, int comp_level, hbool_t verbose, FILE *verbose_file)
+{
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t tid; /* Datatype for dataset elements */
+ hid_t sid; /* Dataspace ID */
+ hsize_t dims[2] = {1, 0}; /* Dataset starting dimensions */
+ hsize_t max_dims[2] = {1, H5S_UNLIMITED}; /* Dataset maximum dimensions */
+ hsize_t chunk_dims[2] = {1, CHUNK_SIZE}; /* Chunk dimensions */
+ unsigned u, v; /* Local index variable */
+
+ /* Create datatype for creating datasets */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(2, dims, max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 2, chunk_dims) < 0)
+ return -1;
+ if(comp_level >= 0) {
+ if(H5Pset_deflate(dcpl, (unsigned)comp_level) < 0)
+ return -1;
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Creating datasets\n");
+
+ /* Create the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ hid_t dsid; /* Dataset ID */
+ char name_buf[64];
+
+ generate_name(name_buf, u, v);
+ if((dsid = H5Dcreate2(fid, name_buf, tid, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ } /* end for */
+
+ /* Closing */
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ return 0;
+} /* create_close_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_file
+ *
+ * Purpose: Opens the HDF5 test file without SWMR access.
+ *
+ * Parameters:
+ * filename: The filename of the HDF5 file to open
+ * verbose: whether or not to emit verbose console messages
+ * verbose_file: file pointer for verbose output
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_file(const char *filename, hbool_t verbose, FILE *verbose_file)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening the file without SWMR access: %s\n", filename);
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ return fid;
+} /* Open file() */
+
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_datasets
+ *
+ * Purpose: Opens the datasets.
+ *
+ * Parameters:
+ * filename: the filename of the SWMR HDF5 file to open
+ * verbose: whether or not to emit verbose console messages
+ * verbose_file: file pointer for verbose output
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+open_datasets(hid_t fid, hbool_t verbose, FILE *verbose_file)
+{
+ unsigned u, v; /* Local index variable */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return 0;
+} /* open_datasets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters:
+ * fid: The file ID of the SWMR HDF5 file
+ * verbose: Whether or not to emit verbose console messages
+ * verbose_file: file pointer for verbose output
+ * nrecords: # of records to write to the datasets
+ * flush_count: # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = symbol->nrecords;
+
+ /* Get the coordinate to write */
+ start[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ if(H5Odisable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords++;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ if(H5Oenable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+} /* add_records() */
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_start_write [-f <# of records to write between flushing file contents>]\n");
+ printf(" [-i <index type>] [-c <deflate compression level>]\n");
+ printf(" [-r <random seed>] [-q] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1).\n");
+ printf("\n");
+ printf("<index type> should be b2 or ea\n");
+ printf("\n");
+ printf("<deflate compression level> should be -1 (for no compression) or 0-9\n");
+ printf("\n");
+ printf("<# of records> must be specified.\n");
+ printf("\n");
+ printf("Defaults to flushing every 10000 records ('-f 10000'),\n");
+ printf("v1 b-tree indexing (-i b1), compression ('-c -1'),\n");
+ printf("will generate a random seed (no -r given), and verbose (no '-q' given)\n");
+ printf("\n");
+ HDexit(1);
+} /* usage() */
+
+/*
+ * Can test with different scenarios as below:
+ * 1) create_file(), create_datasets(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ * 2) create_file(), create_close_datasets(), open_datasets(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ * 3) create_file(), create_close_datasets(), H5Fclose(),
+ * open_file(), open_dataset(), H5Fstart_swmr_write(), add_records(), H5Fclose().
+ */
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 10000; /* # of records to write between flushing file */
+ hbool_t verbose = TRUE; /* Whether to emit some informational messages */
+ FILE *verbose_file = NULL; /* File handle for verbose output */
+ hbool_t use_seed = FALSE; /* Set to TRUE if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ int comp_level = -1; /* Compression level (-1 is no compression) */
+ const char *index_type = "b1"; /* Chunk index type */
+ unsigned u; /* Local index variable */
+ int temp; /* Temporary variable */
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* Compress dataset chunks */
+ case 'c':
+ comp_level = HDatoi(argv[u + 1]);
+ if(comp_level < -1 || comp_level > 9)
+ usage();
+ u += 2;
+ break;
+
+ /* Chunk index type */
+ case 'i':
+ index_type = argv[u + 1];
+ if(HDstrcmp(index_type, "ea")
+ && HDstrcmp(index_type, "b2"))
+ usage();
+ u += 2;
+ break;
+
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = FALSE;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = TRUE;
+ temp = HDatoi(argv[u + 1]);
+ if(temp < 0)
+ usage();
+ else
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = HDatol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Set the random seed */
+ if(!use_seed) {
+ struct timeval t;
+
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+
+ /* Open output file */
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_writer.out.%u", random_seed);
+ if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) {
+ HDfprintf(stderr, "Can't open verbose output file!\n");
+ HDexit(1);
+ }
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(verbose_file, "Parameters:\n");
+ HDfprintf(verbose_file, "\tindex type = %s\n", index_type);
+ HDfprintf(verbose_file, "\tcompression level = %d\n", comp_level);
+ HDfprintf(verbose_file, "\t# of records between flushes = %ld\n", flush_count);
+ HDfprintf(verbose_file, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stdout, "Using writer random seed: %u\n", random_seed);
+
+ /* Create the test file */
+ if((fid = create_file(FILENAME, verbose, verbose_file, random_seed)) < 0) {
+ HDfprintf(stderr, "Error creating the file...\n");
+ HDexit(1);
+ }
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Create the datasets in the file */
+ if(create_datasets(fid, comp_level, verbose, verbose_file, index_type) < 0) {
+ HDfprintf(stderr, "Error creating datasets...\n");
+ HDexit(1);
+ }
+
+ /* Enable SWMR writing mode */
+ if(H5Fstart_swmr_write(fid) < 0) {
+ HDfprintf(stderr, "Error starting SWMR writing mode...\n");
+ HDexit(1);
+ }
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, verbose_file, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error appending records to datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing the file\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+} /* main() */
+
diff --git a/test/swmr_writer.c b/test/swmr_writer.c
new file mode 100644
index 0000000..ee7e32e
--- /dev/null
+++ b/test/swmr_writer.c
@@ -0,0 +1,449 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*-------------------------------------------------------------------------
+ *
+ * Created: swmr_writer.c
+ *
+ * Purpose: Writes data to a randomly selected subset of the datasets
+ * in the SWMR test file.
+ *
+ * This program is intended to run concurrently with the
+ * swmr_reader program.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+/***********/
+/* Headers */
+/***********/
+
+#include "h5test.h"
+#include "swmr_common.h"
+
+/********************/
+/* Local Prototypes */
+/********************/
+
+static hid_t open_skeleton(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed, hbool_t old);
+static int add_records(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ unsigned long nrecords, unsigned long flush_count);
+static void usage(void);
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_skeleton
+ *
+ * Purpose: Opens the SWMR HDF5 file and datasets.
+ *
+ * Parameters: const char *filename
+ * The filename of the SWMR HDF5 file to open
+ *
+ * hbool_t verbose
+ * Whether or not to emit verbose console messages
+ *
+ * FILE *verbose_file
+ * File handle for verbose output
+ *
+ * unsigned random_seed
+ * Random seed for the file (used for verbose logging)
+ *
+ * hbool_t old
+ * Whether to write in "old" file format
+ *
+ * Return: Success: The file ID of the opened SWMR file
+ * The dataset IDs are stored in a global array
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static hid_t
+open_skeleton(const char *filename, hbool_t verbose, FILE *verbose_file,
+ unsigned random_seed, hbool_t old)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t fapl; /* File access property list */
+ unsigned u, v; /* Local index variable */
+ hbool_t use_log_vfd = FALSE; /* Use the log VFD (set this manually) */
+
+ HDassert(filename);
+
+ /* Create file access property list */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+
+ if(!old) {
+ /* Set to use the latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ }
+
+#ifdef QAK
+ /* Increase the initial size of the metadata cache */
+ {
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ H5Pget_mdc_config(fapl, &mdc_config);
+ HDfprintf(stderr, "mdc_config.initial_size = %lu\n", (unsigned long)mdc_config.initial_size);
+ HDfprintf(stderr, "mdc_config.epoch_length = %lu\n", (unsigned long)mdc_config.epoch_length);
+ mdc_config.set_initial_size = 1;
+ mdc_config.initial_size = 16 * 1024 * 1024;
+ /* mdc_config.epoch_length = 5000; */
+ H5Pset_mdc_config(fapl, &mdc_config);
+ }
+#endif /* QAK */
+
+ if(use_log_vfd) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_writer.log.%u", random_seed);
+
+ H5Pset_fapl_log(fapl, verbose_name, H5FD_LOG_ALL, (size_t)(512 * 1024 * 1024));
+ } /* end if */
+
+ /* Open the file */
+ if((fid = H5Fopen(filename, H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, fapl)) < 0)
+ return -1;
+
+ /* Close file access property list */
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening datasets\n");
+
+ /* Open the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++) {
+ if((symbol_info[u][v].dsid = H5Dopen2(fid, symbol_info[u][v].name, H5P_DEFAULT)) < 0)
+ return -1;
+ symbol_info[u][v].nrecords = 0;
+ } /* end for */
+
+ return fid;
+}
+
+
+/*-------------------------------------------------------------------------
+ * Function: add_records
+ *
+ * Purpose: Writes a specified number of records to random datasets in
+ * the SWMR test file.
+ *
+ * Parameters: hid_t fid
+ * The file ID of the SWMR HDF5 file
+ *
+ * hbool_t verbose
+ * Whether or not to emit verbose console messages
+ *
+ * FILE *verbose_file
+ * File handle for verbose output
+ *
+ * unsigned long nrecords
+ * # of records to write to the datasets
+ *
+ * unsigned long flush_count
+ * # of records to write before flushing the file to disk
+ *
+ * Return: Success: 0
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+add_records(hid_t fid, hbool_t verbose, FILE *verbose_file,
+ unsigned long nrecords, unsigned long flush_count)
+{
+ hid_t tid; /* Datatype ID for records */
+ hid_t mem_sid; /* Memory dataspace ID */
+ hsize_t start[2] = {0, 0}, count[2] = {1, 1}; /* Hyperslab selection values */
+ hsize_t dim[2] = {1, 0}; /* Dataspace dimensions */
+ symbol_t record; /* The record to add to the dataset */
+ unsigned long rec_to_flush; /* # of records left to write before flush */
+ unsigned long u, v; /* Local index variables */
+
+ HDassert(fid >= 0);
+
+ /* Reset the record */
+ /* (record's 'info' field might need to change for each record written, also) */
+ HDmemset(&record, 0, sizeof(record));
+
+ /* Create a dataspace for the record to add */
+ if((mem_sid = H5Screate(H5S_SCALAR)) < 0)
+ return -1;
+
+ /* Create datatype for appending records */
+ if((tid = create_symbol_datatype()) < 0)
+ return -1;
+
+ /* Add records to random datasets, according to frequency distribution */
+ rec_to_flush = flush_count;
+ for(u = 0; u < nrecords; u++) {
+ symbol_info_t *symbol; /* Symbol to write record to */
+ hid_t file_sid; /* Dataset's space ID */
+
+ /* Get a random dataset, according to the symbol distribution */
+ symbol = choose_dataset();
+
+ /* Set the record's ID (equal to its position) */
+ record.rec_id = symbol->nrecords;
+
+ /* Get the coordinate to write */
+ start[1] = symbol->nrecords;
+
+ /* Cork the metadata cache, to prevent the object header from being
+ * flushed before the data has been written */
+ if(H5Odisable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Extend the dataset's dataspace to hold the new record */
+ symbol->nrecords++;
+ dim[1] = symbol->nrecords;
+ if(H5Dset_extent(symbol->dsid, dim) < 0)
+ return -1;
+
+ /* Get the dataset's dataspace */
+ if((file_sid = H5Dget_space(symbol->dsid)) < 0)
+ return -1;
+
+ /* Choose the last record in the dataset */
+ if(H5Sselect_hyperslab(file_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0)
+ return -1;
+
+ /* Write record to the dataset */
+ if(H5Dwrite(symbol->dsid, tid, mem_sid, file_sid, H5P_DEFAULT, &record) < 0)
+ return -1;
+
+ /* Uncork the metadata cache */
+ if(H5Oenable_mdc_flushes(symbol->dsid) < 0)
+ return -1;
+
+ /* Close the dataset's dataspace */
+ if(H5Sclose(file_sid) < 0)
+ return -1;
+
+ /* Check for flushing file */
+ if(flush_count > 0) {
+ /* Decrement count of records to write before flushing */
+ rec_to_flush--;
+
+ /* Check for counter being reached */
+ if(0 == rec_to_flush) {
+ /* Flush contents of file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ return -1;
+
+ /* Reset flush counter */
+ rec_to_flush = flush_count;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+
+ /* Close the memory dataspace */
+ if(H5Sclose(mem_sid) < 0)
+ return -1;
+
+ /* Close the datatype */
+ if(H5Tclose(tid) < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing datasets\n");
+
+ /* Close the datasets */
+ for(u = 0; u < NLEVELS; u++)
+ for(v = 0; v < symbol_count[u]; v++)
+ if(H5Dclose(symbol_info[u][v].dsid) < 0)
+ return -1;
+
+ return 0;
+}
+
+static void
+usage(void)
+{
+ printf("\n");
+ printf("Usage error!\n");
+ printf("\n");
+ printf("Usage: swmr_writer [-q] [-o] [-f <# of records to write between flushing\n");
+ printf(" file contents>] [-r <random seed>] <# of records>\n");
+ printf("\n");
+ printf("<# of records to write between flushing file contents> should be 0\n");
+ printf("(for no flushing) or between 1 and (<# of records> - 1).\n");
+ printf("\n");
+ printf("<# of records> must be specified.\n");
+ printf("\n");
+ printf("Defaults to verbose (no '-q' given), latest format when opening file (no '-o' given),\n");
+ printf("flushing every 10000 records ('-f 10000'), and will generate a random seed (no -r given).\n");
+ printf("\n");
+ HDexit(1);
+}
+
+int main(int argc, const char *argv[])
+{
+ hid_t fid; /* File ID for file opened */
+ long nrecords = 0; /* # of records to append */
+ long flush_count = 10000; /* # of records to write between flushing file */
+ hbool_t verbose = TRUE; /* Whether to emit some informational messages */
+ FILE *verbose_file = NULL; /* File handle for verbose output */
+ hbool_t old = FALSE; /* Whether to use non-latest-format when opening file */
+ hbool_t use_seed = FALSE; /* Set to TRUE if a seed was set on the command line */
+ unsigned random_seed = 0; /* Random # seed */
+ unsigned u; /* Local index variable */
+ int temp;
+
+ /* Parse command line options */
+ if(argc < 2)
+ usage();
+ if(argc > 1) {
+ u = 1;
+ while(u < (unsigned)argc) {
+ if(argv[u][0] == '-') {
+ switch(argv[u][1]) {
+ /* # of records to write between flushing file */
+ case 'f':
+ flush_count = HDatol(argv[u + 1]);
+ if(flush_count < 0)
+ usage();
+ u += 2;
+ break;
+
+ /* Be quiet */
+ case 'q':
+ verbose = FALSE;
+ u++;
+ break;
+
+ /* Random # seed */
+ case 'r':
+ use_seed = TRUE;
+ temp = HDatoi(argv[u + 1]);
+ random_seed = (unsigned)temp;
+ u += 2;
+ break;
+
+ /* Use non-latest-format when opening file */
+ case 'o':
+ old = TRUE;
+ u++;
+ break;
+
+ default:
+ usage();
+ break;
+ } /* end switch */
+ } /* end if */
+ else {
+ /* Get the number of records to append */
+ nrecords = HDatol(argv[u]);
+ if(nrecords <= 0)
+ usage();
+
+ u++;
+ } /* end else */
+ } /* end while */
+ } /* end if */
+ if(nrecords <= 0)
+ usage();
+ if(flush_count >= nrecords)
+ usage();
+
+ /* Set the random seed */
+ if(!use_seed) {
+ struct timeval t;
+
+ HDgettimeofday(&t, NULL);
+ random_seed = (unsigned)(t.tv_usec);
+ } /* end if */
+ HDsrandom(random_seed);
+
+ /* Open output file */
+ if(verbose) {
+ char verbose_name[1024];
+
+ HDsnprintf(verbose_name, sizeof(verbose_name), "swmr_writer.out.%u", random_seed);
+ if(NULL == (verbose_file = HDfopen(verbose_name, "w"))) {
+ HDfprintf(stderr, "Can't open verbose output file!\n");
+ HDexit(1);
+ }
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose) {
+ HDfprintf(verbose_file, "Parameters:\n");
+ HDfprintf(verbose_file, "\t# of records between flushes = %ld\n", flush_count);
+ HDfprintf(verbose_file, "\t# of records to write = %ld\n", nrecords);
+ } /* end if */
+
+ /* ALWAYS emit the random seed for possible debugging */
+ HDfprintf(stdout, "Using writer random seed: %u\n", random_seed);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Generating symbol names\n");
+
+ /* Generate dataset names */
+ if(generate_symbols() < 0)
+ return -1;
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Opening skeleton file: %s\n", FILENAME);
+
+ /* Open file skeleton */
+ if((fid = open_skeleton(FILENAME, verbose, verbose_file, random_seed, old)) < 0) {
+ HDfprintf(stderr, "Error opening skeleton file!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Send a message to indicate "H5Fopen" is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Adding records\n");
+
+ /* Append records to datasets */
+ if(add_records(fid, verbose, verbose_file, (unsigned long)nrecords, (unsigned long)flush_count) < 0) {
+ HDfprintf(stderr, "Error appending records to datasets!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Releasing symbols\n");
+
+ /* Clean up the symbols */
+ if(shutdown_symbols() < 0) {
+ HDfprintf(stderr, "Error releasing symbols!\n");
+ HDexit(1);
+ } /* end if */
+
+ /* Emit informational message */
+ if(verbose)
+ HDfprintf(verbose_file, "Closing objects\n");
+
+ /* Close objects opened */
+ if(H5Fclose(fid) < 0) {
+ HDfprintf(stderr, "Error closing file!\n");
+ HDexit(1);
+ } /* end if */
+
+ return 0;
+}
+
diff --git a/test/tarray.c b/test/tarray.c
index 4a87981..a35b8a3 100644
--- a/test/tarray.c
+++ b/test/tarray.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/tattr.c b/test/tattr.c
index 6f55081..3b0c90e 100644
--- a/test/tattr.c
+++ b/test/tattr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/tcheck_version.c b/test/tcheck_version.c
index 5808680..574d7c9 100644
--- a/test/tcheck_version.c
+++ b/test/tcheck_version.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/tchecksum.c b/test/tchecksum.c
index cf519c0..febaacc 100644
--- a/test/tchecksum.c
+++ b/test/tchecksum.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/test/tconfig.c b/test/tconfig.c
index 1604c61..a9f3378 100644
--- a/test/tconfig.c
+++ b/test/tconfig.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/tcoords.c b/test/tcoords.c
index 306c6b2..d729d4b 100644
--- a/test/tcoords.c
+++ b/test/tcoords.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/test_plugin.sh.in b/test/test_plugin.sh.in
index 1cd87e3..c90a978 100644
--- a/test/test_plugin.sh.in
+++ b/test/test_plugin.sh.in
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic document set and is
-# linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access
-# to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
srcdir=@srcdir@
TOP_BUILDDIR=@top_builddir@
@@ -40,7 +38,7 @@ case $(uname) in
esac
PLUGIN_LIBDIR1=testdir1
PLUGIN_LIBDIR2=testdir2
-CP="cp -p" # Use -p to preserve mode,ownership,timestamps
+CP="cp -p" # Use -p to preserve mode,ownership,timestamps
RM="rm -rf"
# Print a line-line message left justified in a field of 70 characters
@@ -87,6 +85,19 @@ if [ $? != 0 ]; then
nerrors=`expr $nerrors + 1`
fi
+############################################
+# HDFFV-9655 test for relative path disabled
+# setup plugin path relative to test
+# actual executable is in the .libs folder
+#ENVCMD="env HDF5_PLUGIN_PATH=@/../${PLUGIN_LIBDIR1}:@/../${PLUGIN_LIBDIR2}"
+#
+# Run the test
+#$ENVCMD $TEST_BIN
+#if [ $? != 0 ]; then
+# nerrors=`expr $nerrors + 1`
+#fi
+#############################################
+
# print results
if test $nerrors -ne 0 ; then
echo "$nerrors errors encountered"
diff --git a/test/test_usecases.sh.in b/test/test_usecases.sh.in
new file mode 100644
index 0000000..8bc2078
--- /dev/null
+++ b/test/test_usecases.sh.in
@@ -0,0 +1,199 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+# Tests the use cases of swmr features.
+#
+# Created:
+# Albert Cheng, 2013/06/01.
+# Modified:
+#
+
+# This is work in progress.
+# For now, it shows how to run the test cases programs. It only verifies the
+# exit codes are okay (0).
+
+srcdir=@srcdir@
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [[ $rc != 0 ]] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR"
+ echo
+ echo "SWMR use case tests skipped"
+ echo
+ exit 0
+fi
+
+# Define symbols
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+EXIT_VALUE=$EXIT_SUCCESS # Default all tests succeed
+RESULT_PASSED=" PASSED"
+RESULT_FAILED="*FAILED*"
+RESULT_SKIP="-SKIP-"
+USECASES_PROGRAMS="use_append_chunk use_append_mchunks"
+TESTNAME="Use Case"
+
+# Define variables
+nerrors=0
+verbose=yes
+
+# Source in the output filter function definitions.
+. $srcdir/../bin/output_filter.sh
+
+# Define functions
+# Print a line-line message left justified in a field of 72 characters.
+# Results can be " PASSED", "*FAILED*", "-SKIP-", up to 8 characters
+# wide.
+# SPACES should be at least 71 spaces. ($* + ' ' + 71 + 8 >= 80)
+#
+TESTING() {
+ SPACES=" "
+ echo "$* $SPACES" | cut -c1-72 | tr -d '\012'
+}
+
+# Run a test and print PASS or *FAIL*. If a test fails then increment
+# the `nerrors' global variable and (if $verbose is set) display the
+# difference between the actual output and the expected output. The
+# expected output is given as the first argument to this function and
+# the actual output file is calculated by replacing the `.ddl' with
+# `.out'. The actual output is not removed if $HDF5_NOCLEANUP has a
+# non-zero value.
+# ADD_H5_TEST
+TOOLTEST() {
+ program=$1
+ shift
+
+ actual="$program.out"
+ actual_err="$program.err"
+ actual_sav=${actual}-sav
+ actual_err_sav=${actual_err}-sav
+
+ # Run test.
+ TESTING $program $@
+ (
+ $RUNSERIAL ./$program "$@"
+ ) >$actual 2>$actual_err
+ exit_code=$?
+
+ # save actual and actual_err in case they are needed later.
+ cp $actual $actual_sav
+ STDOUT_FILTER $actual
+ cp $actual_err $actual_err_sav
+ STDERR_FILTER $actual_err
+ cat $actual_err >> $actual
+
+ if [ $exit_code -eq 0 ];then
+ echo "$RESULT_PASSED"
+ test yes = "$verbose" && sed 's/^/ /' < $actual
+ else
+ echo "$RESULT_FAILED"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && sed 's/^/ /' < $actual
+ fi
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $actual $actual_err $actual_sav $actual_err_sav $actual_ext
+ fi
+}
+
+# HDF5 has several tests that create and delete signal files to communicate
+# between processes, and it seems that even though the names of the files are
+# different, occasionally the wrong file is deleted, interrupting the flow of
+# the test. Running each of these tests in its own directory should eliminate
+# the problem.
+mkdir usecases_test
+cp twriteorder usecases_test
+for FILE in use_*; do
+ case "$FILE" in
+ *.o) continue ;; ## don't copy the .o files
+ esac
+ cp $FILE usecases_test
+done
+
+# With the --disable-shared option, swmr program files are built in the test
+# directory, otherwise they are in test/.libs with a corresponding wrapper
+# script in the test directory. The programs or wrapper scripts in test should
+# always be copied, swmr files in .libs should be copied only if they exists.
+if [ -f .libs/use_append_chunk ]; then
+ mkdir usecases_test/.libs
+ cp .libs/use_* usecases_test/.libs
+ cp .libs/twriteorder usecases_test/.libs
+fi
+
+cd usecases_test
+
+
+# run tests for H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled here temporary
+USECORK=use_disable_mdc_flushes
+for p in $USECORK; do
+ TOOLTEST $p
+ TOOLTEST $p -y 3
+ TOOLTEST $p -n 3000
+ TOOLTEST $p -n 5000
+done
+
+# run write order test here temporary
+WRITEORDER=twriteorder
+for p in $WRITEORDER; do
+ TOOLTEST $p
+ TOOLTEST $p -b 1000
+ TOOLTEST $p -p 3000
+ TOOLTEST $p -n 2000
+ TOOLTEST $p -l w
+ TOOLTEST $p -l r
+done
+
+# Report test results
+if test $nerrors -eq 0 ; then
+ echo "$WRITEORDER test passed."
+else
+ echo "$WRITEORDER test failed with $nerrors errors."
+ EXIT_VALUE=$EXIT_FAILURE
+ nerrors=0 # reset nerror for the regular tests below.
+fi
+
+# main body
+for p in $USECASES_PROGRAMS; do
+ TOOLTEST ./$p
+ TOOLTEST ./$p -z 256
+ tmpfile=/tmp/datatfile.$$
+ TOOLTEST ./$p -f $tmpfile; rm -f $tmpfile
+ TOOLTEST ./$p -l w
+ TOOLTEST ./$p -l r
+ # use case 1.9, testing with multi-planes chunks
+ TOOLTEST ./$p -z 256 -y 5 # 5 planes chunks
+ # cleanup temp datafile
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $p.h5
+ fi
+done
+
+cd ..
+# Report test results and exit
+if test $nerrors -eq 0 ; then
+ echo "All $TESTNAME tests passed."
+ if test -z "$HDF5_NOCLEANUP"; then
+ # delete the test directory
+ rm -rf usecases_test
+ fi
+else
+ echo "$TESTNAME tests failed with $nerrors errors."
+ EXIT_VALUE=$EXIT_FAILURE
+fi
+
+exit $EXIT_VALUE
diff --git a/test/testcheck_version.sh.in b/test/testcheck_version.sh.in
index ed9845e..a5641f5 100644
--- a/test/testcheck_version.sh.in
+++ b/test/testcheck_version.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the H5check_version function.
diff --git a/test/testerror.sh.in b/test/testerror.sh.in
index 7c03414..734b051 100644
--- a/test/testerror.sh.in
+++ b/test/testerror.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for test_error and err_compat
diff --git a/test/testfiles/plist_files/dapl_32be b/test/testfiles/plist_files/dapl_32be
index 4df4e7f..4dedda2 100644
--- a/test/testfiles/plist_files/dapl_32be
+++ b/test/testfiles/plist_files/dapl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/dapl_32le b/test/testfiles/plist_files/dapl_32le
index 4df4e7f..4dedda2 100644
--- a/test/testfiles/plist_files/dapl_32le
+++ b/test/testfiles/plist_files/dapl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/dapl_64be b/test/testfiles/plist_files/dapl_64be
index 4df4e7f..4dedda2 100644
--- a/test/testfiles/plist_files/dapl_64be
+++ b/test/testfiles/plist_files/dapl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/dapl_64le b/test/testfiles/plist_files/dapl_64le
index 4df4e7f..4dedda2 100644
--- a/test/testfiles/plist_files/dapl_64le
+++ b/test/testfiles/plist_files/dapl_64le
Binary files differ
diff --git a/test/testfiles/plist_files/def_dapl_32be b/test/testfiles/plist_files/def_dapl_32be
index c9b7ea9..3df7289 100644
--- a/test/testfiles/plist_files/def_dapl_32be
+++ b/test/testfiles/plist_files/def_dapl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/def_dapl_32le b/test/testfiles/plist_files/def_dapl_32le
index c9b7ea9..3df7289 100644
--- a/test/testfiles/plist_files/def_dapl_32le
+++ b/test/testfiles/plist_files/def_dapl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/def_dapl_64be b/test/testfiles/plist_files/def_dapl_64be
index c9b7ea9..3df7289 100644
--- a/test/testfiles/plist_files/def_dapl_64be
+++ b/test/testfiles/plist_files/def_dapl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/def_dapl_64le b/test/testfiles/plist_files/def_dapl_64le
index c9b7ea9..3df7289 100644
--- a/test/testfiles/plist_files/def_dapl_64le
+++ b/test/testfiles/plist_files/def_dapl_64le
Binary files differ
diff --git a/test/testfiles/plist_files/def_dxpl_32be b/test/testfiles/plist_files/def_dxpl_32be
index b13f456..3b77a32 100644
--- a/test/testfiles/plist_files/def_dxpl_32be
+++ b/test/testfiles/plist_files/def_dxpl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/def_dxpl_32le b/test/testfiles/plist_files/def_dxpl_32le
index b13f456..3b77a32 100644
--- a/test/testfiles/plist_files/def_dxpl_32le
+++ b/test/testfiles/plist_files/def_dxpl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/def_dxpl_64be b/test/testfiles/plist_files/def_dxpl_64be
index b13f456..3b77a32 100644
--- a/test/testfiles/plist_files/def_dxpl_64be
+++ b/test/testfiles/plist_files/def_dxpl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/def_dxpl_64le b/test/testfiles/plist_files/def_dxpl_64le
index b13f456..3b77a32 100644
--- a/test/testfiles/plist_files/def_dxpl_64le
+++ b/test/testfiles/plist_files/def_dxpl_64le
Binary files differ
diff --git a/test/testfiles/plist_files/def_fapl_32be b/test/testfiles/plist_files/def_fapl_32be
index 6b6baee..3b35501 100644
--- a/test/testfiles/plist_files/def_fapl_32be
+++ b/test/testfiles/plist_files/def_fapl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/def_fapl_32le b/test/testfiles/plist_files/def_fapl_32le
index 6b6baee..3b35501 100644
--- a/test/testfiles/plist_files/def_fapl_32le
+++ b/test/testfiles/plist_files/def_fapl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/def_fapl_64be b/test/testfiles/plist_files/def_fapl_64be
index 6b6baee..3b35501 100644
--- a/test/testfiles/plist_files/def_fapl_64be
+++ b/test/testfiles/plist_files/def_fapl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/def_fapl_64le b/test/testfiles/plist_files/def_fapl_64le
index 6b6baee..3b35501 100644
--- a/test/testfiles/plist_files/def_fapl_64le
+++ b/test/testfiles/plist_files/def_fapl_64le
Binary files differ
diff --git a/test/testfiles/plist_files/def_fcpl_32be b/test/testfiles/plist_files/def_fcpl_32be
index 38dec23..9a67dd5 100644
--- a/test/testfiles/plist_files/def_fcpl_32be
+++ b/test/testfiles/plist_files/def_fcpl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/def_fcpl_32le b/test/testfiles/plist_files/def_fcpl_32le
index 38dec23..9a67dd5 100644
--- a/test/testfiles/plist_files/def_fcpl_32le
+++ b/test/testfiles/plist_files/def_fcpl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/def_fcpl_64be b/test/testfiles/plist_files/def_fcpl_64be
index 38dec23..9a67dd5 100644
--- a/test/testfiles/plist_files/def_fcpl_64be
+++ b/test/testfiles/plist_files/def_fcpl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/def_fcpl_64le b/test/testfiles/plist_files/def_fcpl_64le
index 38dec23..9a67dd5 100644
--- a/test/testfiles/plist_files/def_fcpl_64le
+++ b/test/testfiles/plist_files/def_fcpl_64le
Binary files differ
diff --git a/test/testfiles/plist_files/dxpl_32be b/test/testfiles/plist_files/dxpl_32be
index 5ff2ea0..22fbdc8 100644
--- a/test/testfiles/plist_files/dxpl_32be
+++ b/test/testfiles/plist_files/dxpl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/dxpl_32le b/test/testfiles/plist_files/dxpl_32le
index 5ff2ea0..22fbdc8 100644
--- a/test/testfiles/plist_files/dxpl_32le
+++ b/test/testfiles/plist_files/dxpl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/dxpl_64be b/test/testfiles/plist_files/dxpl_64be
index 5ff2ea0..22fbdc8 100644
--- a/test/testfiles/plist_files/dxpl_64be
+++ b/test/testfiles/plist_files/dxpl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/dxpl_64le b/test/testfiles/plist_files/dxpl_64le
index 5ff2ea0..22fbdc8 100644
--- a/test/testfiles/plist_files/dxpl_64le
+++ b/test/testfiles/plist_files/dxpl_64le
Binary files differ
diff --git a/test/testfiles/plist_files/fapl_32be b/test/testfiles/plist_files/fapl_32be
index 65e2070..43e5e67 100644
--- a/test/testfiles/plist_files/fapl_32be
+++ b/test/testfiles/plist_files/fapl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/fapl_32le b/test/testfiles/plist_files/fapl_32le
index 65e2070..43e5e67 100644
--- a/test/testfiles/plist_files/fapl_32le
+++ b/test/testfiles/plist_files/fapl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/fapl_64be b/test/testfiles/plist_files/fapl_64be
index 65e2070..43e5e67 100644
--- a/test/testfiles/plist_files/fapl_64be
+++ b/test/testfiles/plist_files/fapl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/fapl_64le b/test/testfiles/plist_files/fapl_64le
index 65e2070..43e5e67 100644
--- a/test/testfiles/plist_files/fapl_64le
+++ b/test/testfiles/plist_files/fapl_64le
Binary files differ
diff --git a/test/testfiles/plist_files/fcpl_32be b/test/testfiles/plist_files/fcpl_32be
index ffa5242..4a8ac8a 100644
--- a/test/testfiles/plist_files/fcpl_32be
+++ b/test/testfiles/plist_files/fcpl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/fcpl_32le b/test/testfiles/plist_files/fcpl_32le
index ffa5242..4a8ac8a 100644
--- a/test/testfiles/plist_files/fcpl_32le
+++ b/test/testfiles/plist_files/fcpl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/fcpl_64be b/test/testfiles/plist_files/fcpl_64be
index ffa5242..4a8ac8a 100644
--- a/test/testfiles/plist_files/fcpl_64be
+++ b/test/testfiles/plist_files/fcpl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/fcpl_64le b/test/testfiles/plist_files/fcpl_64le
index ffa5242..4a8ac8a 100644
--- a/test/testfiles/plist_files/fcpl_64le
+++ b/test/testfiles/plist_files/fcpl_64le
Binary files differ
diff --git a/test/testfiles/plist_files/lapl_32be b/test/testfiles/plist_files/lapl_32be
index e9f43e2..f3e9865 100644
--- a/test/testfiles/plist_files/lapl_32be
+++ b/test/testfiles/plist_files/lapl_32be
Binary files differ
diff --git a/test/testfiles/plist_files/lapl_32le b/test/testfiles/plist_files/lapl_32le
index e9f43e2..f3e9865 100644
--- a/test/testfiles/plist_files/lapl_32le
+++ b/test/testfiles/plist_files/lapl_32le
Binary files differ
diff --git a/test/testfiles/plist_files/lapl_64be b/test/testfiles/plist_files/lapl_64be
index e9f43e2..f3e9865 100644
--- a/test/testfiles/plist_files/lapl_64be
+++ b/test/testfiles/plist_files/lapl_64be
Binary files differ
diff --git a/test/testfiles/plist_files/lapl_64le b/test/testfiles/plist_files/lapl_64le
index e9f43e2..f3e9865 100644
--- a/test/testfiles/plist_files/lapl_64le
+++ b/test/testfiles/plist_files/lapl_64le
Binary files differ
diff --git a/test/testflushrefresh.sh.in b/test/testflushrefresh.sh.in
new file mode 100644
index 0000000..ca46dcb
--- /dev/null
+++ b/test/testflushrefresh.sh.in
@@ -0,0 +1,220 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+#
+# Test script for the flush/evict single objects feature.
+#
+# This test file doesn't actually perform any tests, rather, it acts
+# as a process manager for the 'flushrefresh' test file, which is where
+# the tests are actually housed. The reason this script exists is because
+# the verification of this feature needs to occur in separate processes
+# from the one in which the file is being manipulated in. (i.e., we have
+# a single writer process, and various reader processes spawning off
+# and doing the verification that individual objects are being
+# correctly flushed).
+#
+# Programmer:
+# Mike McGreevy
+# Tuesday, July 20, 2010
+
+###############################################################################
+## test variables
+###############################################################################
+
+# Number of errors encountered during test run.
+nerrors=0
+
+# Set up a function to check the current time since the epoch - ideally, we'd
+# like to use Perl. If it wasn't detected by configure, then use date, though
+# this is less portable and might cause problems on machines that don't
+# recognize the +%s option (like Solaris).
+#
+# Note that PERL will resolve to true or false, not a path.
+PERL=@PERL@
+if test -n "$PERL"; then
+ TimeStamp()
+ {
+ time=`perl -e 'print int(time)'`
+ echo "$time"
+ }
+else
+ TimeStamp()
+ {
+ time=`date +%s`
+ echo "$time"
+ }
+fi
+
+###############################################################################
+## Main
+###############################################################################
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [ $rc -ne 0 ] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "flush/refresh tests skipped"
+ echo
+ exit 0
+fi
+
+# HDF5 has several tests that create and delete signal files to communicate
+# between processes, and it seems that even though the names of the files are
+# different, occasionally the wrong file is deleted, interrupting the flow of
+# the test. Running each of these tests in its own directory should eliminate
+# the problem.
+mkdir flushrefresh_test
+cp flushrefresh flushrefresh_test
+
+# With the --disable-shared option, flushrefresh is built in the test directory,
+# otherwise it is in test/.libs with a wrapper script named flushrefresh in
+# the test directory. test/flushrefresh should always be copied,
+# .libs/flushrefresh should be copied only if it exists.
+if [ -f .libs/flushrefresh ]; then
+ mkdir flushrefresh_test/.libs
+ cp .libs/flushrefresh flushrefresh_test/.libs
+fi
+cd flushrefresh_test
+
+# =================================================
+# Set up/initialize some variables to be used later
+# =================================================
+testfile=flushrefresh.h5
+startsignal=flushrefresh_VERIFICATION_START
+endsignal=flushrefresh_VERIFICATION_DONE
+timeout_length=300
+timedout=0
+verification_done=0
+if [ -e $testfile ]; then
+ rm $testfile
+fi
+
+# ========================
+# Launch the Test Program.
+# ========================
+./flushrefresh &
+pid_main=$!
+
+# =======================================
+# Run flush verification on test program.
+# =======================================
+
+until [ $verification_done -eq 1 ]; do
+
+ # Wait for signal from test program that verification routine can run.
+ before=`TimeStamp`
+ until [ -s $startsignal ]; do
+ after=`TimeStamp`
+ timediff=`expr $after - $before`
+ if [ $timediff -gt $timeout_length ]; then
+ nerrors=`expr $nerrors + 1`
+ timedout=1
+ break
+ fi
+ done
+
+ # Check to see if we timed out looking for the signal before continuing.
+ if [ $timedout -gt 0 ]; then
+ echo "timed out waiting for signal from test program (flush)."
+ break
+ fi
+
+ # Read in test routine parameters from signal file, then delete signal file.
+ param1=`head -1 $startsignal`
+ param2=`tail -1 $startsignal`
+ rm $startsignal
+
+ # Check if we're done with verifications, otherwise run the specified verification.
+ if [ "$param1" = "VERIFICATION_DONE" ]; then
+ verification_done=1
+ echo "all flush verification complete" > $endsignal
+ else
+ ./flushrefresh $param1 $param2
+ echo "verification flush process done" > $endsignal
+ fi
+
+done
+
+# =========================================
+# Run refresh verification on test program.
+# =========================================
+if [ $timedout -eq 0 ]; then
+ until [ $verification_done -eq 2 ]; do
+
+ # Wait for signal from test program that verification routine can run.
+ before=`TimeStamp`
+ until [ -s $startsignal ]; do
+ after=`TimeStamp`
+ timediff=`expr $after - $before`
+ if [ $timediff -gt $timeout_length ]; then
+ nerrors=`expr $nerrors + 1`
+ timedout=1
+ break
+ fi
+ done
+
+ # Check to see if we timed out looking for the signal before continuing.
+ if [ $timedout -gt 0 ]; then
+ echo "timed out waiting for signal from test program (refresh)."
+ break
+ fi
+
+ # Read in test routine parameter from signal file, then delete signal file.
+ param1=`head -n 1 $startsignal`
+ rm $startsignal
+
+ # Check if we're done with verifications, otherwise run the specified verification.
+ if [ "$param1" = "VERIFICATION_DONE" ]; then
+ verification_done=2
+ echo "all refresh verification complete" > $endsignal
+ else
+ ./flushrefresh $param1
+ echo "refresh verifiction process done" > $endsignal
+ fi
+
+ done
+fi
+
+# ============================================
+# Wait for main to finish up, and end testing.
+# ============================================
+wait $pid_main
+if test $? -ne 0; then
+ echo flushrefresh had error
+ nerrors=`expr $nerrors + 1`
+fi
+
+###############################################################################
+## Report and exit
+###############################################################################
+
+if test $nerrors -eq 0 ; then
+ echo "flush/refresh objects tests passed."
+ if test -z "$HDF5_NOCLEANUP"; then
+ # delete the test directory
+ rm -rf flushrefresh_test
+ fi
+ exit 0
+else
+ echo "flush/refresh objects tests failed with $nerrors errors."
+ exit 1
+fi
diff --git a/test/testframe.c b/test/testframe.c
index c2db235..21f9ea3 100644
--- a/test/testframe.c
+++ b/test/testframe.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/testhdf5.c b/test/testhdf5.c
index 0e7303e..713cccf 100644
--- a/test/testhdf5.c
+++ b/test/testhdf5.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/testhdf5.h b/test/testhdf5.h
index 62dadde..2cbe6c6 100644
--- a/test/testhdf5.h
+++ b/test/testhdf5.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/testlibinfo.sh.in b/test/testlibinfo.sh.in
index b7aa6c4..1dd744b 100644
--- a/test/testlibinfo.sh.in
+++ b/test/testlibinfo.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
diff --git a/test/testlinks_env.sh.in b/test/testlinks_env.sh.in
index 7120430..94e6c7e 100644
--- a/test/testlinks_env.sh.in
+++ b/test/testlinks_env.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Test for external link with environment variable: HDF5_EXT_PREFIX
diff --git a/test/testmeta.c b/test/testmeta.c
index e1d12e6..b97eb68 100644
--- a/test/testmeta.c
+++ b/test/testmeta.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/testswmr.sh.in b/test/testswmr.sh.in
new file mode 100644
index 0000000..f81a7d7
--- /dev/null
+++ b/test/testswmr.sh.in
@@ -0,0 +1,562 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+# Tests for the swmr feature.
+#
+# Created:
+# Albert Cheng, 2009/07/22
+
+srcdir=@srcdir@
+
+###############################################################################
+## test parameters
+###############################################################################
+
+Nreaders=5 # number of readers to launch
+Nrdrs_spa=3 # number of sparse readers to launch
+Nrecords=200000 # number of records to write
+Nrecs_rem=40000 # number of times to shrink
+Nrecs_spa=20000 # number of records to write in the sparse test
+Nsecs_add=5 # number of seconds per read interval
+Nsecs_rem=3 # number of seconds per read interval
+Nsecs_addrem=8 # number of seconds per read interval
+nerrors=0
+
+###############################################################################
+## definitions for message file to coordinate test runs
+###############################################################################
+WRITER_MESSAGE=SWMR_WRITER_MESSAGE # The message file created by writer that the open is complete
+ # This should be the same as the define in "./swmr_common.h"
+MESSAGE_TIMEOUT=300 # Message timeout length in secs
+ # This should be the same as the define in "./h5test.h"
+
+###############################################################################
+## short hands and function definitions
+###############################################################################
+DPRINT=: # Set to "echo Debug:" for debugging printing,
+ # else ":" for noop.
+IFDEBUG=: # Set to null to turn on debugging, else ":" for noop.
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# To wait for the writer message file or till the maximum # of seconds is reached
+# $1 is the message file to wait for
+# This performs similar function as the routine h5_wait_message() in test/h5test.c
+WAIT_MESSAGE() {
+ message=$1 # Get the name of the message file to wait for
+ t0=`date +%s` # Get current time in seconds
+ difft=0 # Initialize the time difference
+ mexist=0 # Indicate whether the message file is found
+ while [ $difft -lt $MESSAGE_TIMEOUT ] ; # Loop till message times out
+ do
+ t1=`date +%s` # Get current time in seconds
+ difft=`expr $t1 - $t0` # Calculate the time difference
+ if [ -e $message ]; then # If message file is found:
+ mexist=1 # indicate the message file is found
+ rm $message # remove the message file
+ break # get out of the while loop
+ fi
+ done;
+ if test $mexist -eq 0; then
+ # Issue warning that the writer message file is not found, continue with launching the reader(s)
+ echo warning: $WRITER_MESSAGE is not found after waiting $MESSAGE_TIMEOUT seconds
+ else
+ echo $WRITER_MESSAGE is found
+ fi
+}
+
+###############################################################################
+## Main
+##
+## Modifications:
+## Vailin Choi; July 2013
+## Add waiting of message file before launching the reader(s).
+## Due to the implementation of file locking, coordination
+## is needed in file opening for the writer/reader tests
+## to proceed as expected.
+##
+###############################################################################
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [ $rc -ne 0 ] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "SWMR acceptance tests skipped"
+ echo
+ exit 0
+fi
+
+# Parse options (none accepted at this time)
+while [ $# -gt 0 ]; do
+ case "$1" in
+ *) # unknown option
+ echo "$0: Unknown option ($1)"
+ exit 1
+ ;;
+ esac
+done
+
+# HDF5 has several tests that create and delete signal files to communicate
+# between processes, and it seems that even though the names of the files are
+# different, occasionally the wrong file is deleted, interrupting the flow of
+# the test. Running each of these tests in its own directory should eliminate
+# the problem.
+mkdir swmr_test
+for FILE in swmr*; do
+ case "$FILE" in
+ *.o) continue ;; ## don't copy the .o files
+ esac
+ cp $FILE swmr_test
+done
+cp swmr* swmr_test
+
+# With the --disable-shared option, swmr program files are built in the test
+# directory, otherwise they are in test/.libs with a corresponding wrapper
+# script in the test directory. The programs or wrapper scripts in test should
+# always be copied, swmr files in .libs should be copied only if they exists.
+if [ -f .libs/swmr ]; then
+ mkdir swmr_test/.libs
+ for FILE in .libs/swmr*; do
+ case "$FILE" in
+ *.o) continue ;; ## don't copy the .o files
+ esac
+ cp $FILE swmr_test/.libs
+ done
+fi
+
+cd swmr_test
+
+
+# Loop over index types
+for index_type in "-i ea" "-i b2"
+do
+ # Try with and without compression
+ for compress in "" "-c 5"
+ do
+ echo
+ echo "*******************************************************************************"
+ echo "** Loop testing parameters: $index_type $compress"
+ echo "*******************************************************************************"
+ echo
+ echo
+ echo "###############################################################################"
+ echo "## Generator test"
+ echo "###############################################################################"
+ # Launch the Generator without SWMR_WRITE
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Generator with SWMR_WRITE
+ echo launch the swmr_generator with SWMR_WRITE
+ ./swmr_generator -s $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+ fi
+
+ echo
+ echo "###############################################################################"
+ echo "## Use H5Fstart_swmr_write() to enable SWMR writing mode"
+ echo "###############################################################################"
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Writer
+ echo launch the swmr_start_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_start_write $compress $index_type $Nrecords $seed 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+
+ #
+ # Launch the Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ echo launch $Nreaders swmr_readers
+ pid_readers=""
+ n=0
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_reader $Nsecs_add $seed 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+
+ echo
+ echo "###############################################################################"
+ echo "## Writer test - test expanding the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator -s $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Writer
+ echo launch the swmr_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_writer -o $Nrecords $seed 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ echo launch $Nreaders swmr_readers
+ pid_readers=""
+ n=0
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_reader $Nsecs_add $seed 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+
+ echo
+ echo "###############################################################################"
+ echo "## Remove test - test shrinking the dataset"
+ echo "###############################################################################"
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ # Launch the Remove Writer
+ echo launch the swmr_remove_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_remove_writer -o $Nrecs_rem $seed 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Remove Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ n=0
+ pid_readers=""
+ echo launch $Nreaders swmr_remove_readers
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_remove_reader $Nsecs_rem $seed 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+
+ echo
+ echo "###############################################################################"
+ echo "## Add/remove test - randomly grow or shrink the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ echo launch the swmr_generator
+ ./swmr_generator $compress $index_type
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Launch the Writer (not in parallel - just to rebuild the datasets)
+ echo launch the swmr_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_writer $Nrecords $seed
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ #
+ # Launch the Add/Remove Writer
+ echo launch the swmr_addrem_writer
+ seed="" # Put -r <random seed> command here
+ ./swmr_addrem_writer $Nrecords $seed 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Add/Remove Readers
+ #declare -a seeds=(<seed1> <seed2> <seed3> ... )
+ n=0
+ pid_readers=""
+ echo launch $Nreaders swmr_remove_readers
+ while [ $n -lt $Nreaders ]; do
+ #seed="-r ${seeds[$n]}"
+ seed=""
+ ./swmr_remove_reader $Nsecs_addrem $seed 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the readers first because they usually finish
+ # before the writer.
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+
+ echo
+ echo "###############################################################################"
+ echo "## Sparse writer test - test writing to random locations in the dataset"
+ echo "###############################################################################"
+
+ # Launch the Generator
+ # NOTE: Random seed is shared between readers and writers and is
+ # created by the generator.
+ echo launch the swmr_generator
+ seed="" # Put -r <random seed> command here
+ ./swmr_generator $compress $index_type $seed
+ if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Remove any possible writer message file before launching writer
+ rm -f $WRITER_MESSAGE
+ # Launch the Sparse writer
+ echo launch the swmr_sparse_writer
+ nice -n 20 ./swmr_sparse_writer $Nrecs_spa 2>&1 |tee swmr_writer.out &
+ pid_writer=$!
+ $DPRINT pid_writer=$pid_writer
+
+ # Wait for message from writer process before starting reader(s)
+ WAIT_MESSAGE $WRITER_MESSAGE
+ #
+ # Launch the Sparse readers
+ n=0
+ pid_readers=""
+ echo launch $Nrdrs_spa swmr_sparse_readers
+ while [ $n -lt $Nrdrs_spa ]; do
+ # The sparse reader spits out a LOT of data so it's set to 'quiet'
+ ./swmr_sparse_reader -q $Nrecs_spa 2>&1 |tee swmr_reader.out.$n &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+ done
+ $DPRINT pid_readers=$pid_readers
+ $IFDEBUG ps
+
+ # Collect exit code of the writer
+ $DPRINT checked writer $pid_writer
+ wait $pid_writer
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+
+ # Collect exit code of the readers
+ for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+ done
+
+ # Check for error and exit if one occured
+ $DPRINT nerrors=$nerrors
+ if test $nerrors -ne 0 ; then
+ echo "SWMR tests failed with $nerrors errors."
+ echo "(Writer and reader output preserved)"
+ exit 1
+ fi
+
+ # Clean up output files
+ rm -f swmr_writer.out
+ rm -f swmr_reader.out.*
+ done
+done
+
+###############################################################################
+## Report and exit
+###############################################################################
+cd ..
+$DPRINT nerrors=$nerrors
+if test $nerrors -eq 0 ; then
+ echo "SWMR tests passed."
+ if test -z "$HDF5_NOCLEANUP"; then
+ # delete the test directory
+ rm -rf swmr_test
+ fi
+ exit 0
+else
+ echo "SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
diff --git a/test/testvdsswmr.sh.in b/test/testvdsswmr.sh.in
new file mode 100644
index 0000000..32af072
--- /dev/null
+++ b/test/testvdsswmr.sh.in
@@ -0,0 +1,226 @@
+#! /bin/bash
+#
+# Copyright by The HDF Group.
+# Copyright by the Board of Trustees of the University of Illinois.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+# Tests for the swmr feature using virtual datasets.
+#
+# Created:
+# Dana Robinson, November 2015
+
+srcdir=@srcdir@
+
+###############################################################################
+## test parameters
+###############################################################################
+
+Nwriters=6 # number of writers (1 per source dataset)
+Nreaders=5 # number of readers to launch
+nerrors=0
+
+###############################################################################
+## definitions for message file to coordinate test runs
+###############################################################################
+WRITER_MESSAGE=SWMR_WRITER_MESSAGE # The message file created by writer that the open is complete
+ # This should be the same as the define in "./swmr_common.h"
+MESSAGE_TIMEOUT=300 # Message timeout length in secs
+ # This should be the same as the define in "./h5test.h"
+
+###############################################################################
+## short hands and function definitions
+###############################################################################
+DPRINT=: # Set to "echo Debug:" for debugging printing,
+ # else ":" for noop.
+IFDEBUG=: # Set to null to turn on debugging, else ":" for noop.
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# To wait for the writer message file or till the maximum # of seconds is reached
+# $1 is the message file to wait for
+# This performs similar function as the routine h5_wait_message() in test/h5test.c
+WAIT_MESSAGE() {
+ message=$1 # Get the name of the message file to wait for
+ t0=`date +%s` # Get current time in seconds
+ difft=0 # Initialize the time difference
+ mexist=0 # Indicate whether the message file is found
+ while [ $difft -lt $MESSAGE_TIMEOUT ] ; # Loop till message times out
+ do
+ t1=`date +%s` # Get current time in seconds
+ difft=`expr $t1 - $t0` # Calculate the time difference
+ if [ -e $message ]; then # If message file is found:
+ mexist=1 # indicate the message file is found
+ rm $message # remove the message file
+ break # get out of the while loop
+ fi
+ done;
+ if test $mexist -eq 0; then
+ # Issue warning that the writer message file is not found, continue with launching the reader(s)
+ echo warning: $WRITER_MESSAGE is not found after waiting $MESSAGE_TIMEOUT seconds
+ else
+ echo $WRITER_MESSAGE is found
+ fi
+}
+
+###############################################################################
+## Main
+###############################################################################
+# The build (current) directory might be different than the source directory.
+if test -z "$srcdir"; then
+ srcdir=.
+fi
+
+# Check to see if the VFD specified by the HDF5_DRIVER environment variable
+# supports SWMR.
+./swmr_check_compat_vfd
+rc=$?
+if [ $rc -ne 0 ] ; then
+ echo
+ echo "The VFD specified by the HDF5_DRIVER environment variable"
+ echo "does not support SWMR."
+ echo
+ echo "SWMR acceptance tests skipped"
+ echo
+ exit 0
+fi
+
+# Parse options (none accepted at this time)
+while [ $# -gt 0 ]; do
+ case "$1" in
+ *) # unknown option
+ echo "$0: Unknown option ($1)"
+ exit 1
+ ;;
+ esac
+done
+
+# HDF5 has several tests that create and delete signal files to communicate
+# between processes, and it seems that even though the names of the files are
+# different, occasionally the wrong file is deleted, interrupting the flow of
+# the test. Running each of these tests in its own directory should eliminate
+# the problem.
+mkdir vds_swmr_test
+for FILE in vds_swmr*; do
+ case "$FILE" in
+ *.o) continue ;; ## don't copy the .o files
+ esac
+ cp $FILE vds_swmr_test
+done
+
+# With the --disable-shared option, swmr program files are built in the test
+# directory, otherwise they are in test/.libs with a corresponding wrapper
+# script in the test directory. The programs or wrapper scripts in test should
+# always be copied, swmr files in .libs should be copied only if they exists.
+if [ -f .libs/vds_swmr_writer ]; then
+ mkdir vds_swmr_test/.libs
+ cp .libs/vds_swmr* vds_swmr_test/.libs
+fi
+
+cd vds_swmr_test
+
+
+echo
+echo "###############################################################################"
+echo "## Basic VDS SWMR test - writing to a tiled plane"
+echo "###############################################################################"
+
+# Launch the file generator
+echo launch the generator
+./vds_swmr_gen
+if test $? -ne 0; then
+ echo generator had error
+ nerrors=`expr $nerrors + 1`
+fi
+
+# Check for error and exit if one occured
+$DPRINT nerrors=$nerrors
+if test $nerrors -ne 0 ; then
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
+# Launch the writers
+echo "launch the $Nwriters SWMR VDS writers (1 per source)"
+pid_writers=""
+n=0
+while [ $n -lt $Nwriters ]; do
+ ./vds_swmr_writer $n &
+ pid_writers="$pid_writers $!"
+ n=`expr $n + 1`
+done
+$DPRINT pid_writers=$pid_writers
+$IFDEBUG ps
+
+# Sleep to ensure that the writers have started
+sleep 3
+
+# Launch the readers
+echo launch $Nreaders SWMR readers
+pid_readers=""
+n=0
+while [ $n -lt $Nreaders ]; do
+ ./vds_swmr_reader &
+ pid_readers="$pid_readers $!"
+ n=`expr $n + 1`
+done
+$DPRINT pid_readers=$pid_readers
+$IFDEBUG ps
+
+# Collect exit code of the writers
+for xpid in $pid_writers; do
+ $DPRINT checked writer $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo writer had error
+ nerrors=`expr $nerrors + 1`
+ fi
+done
+
+# Collect exit code of the readers
+# (they usually finish after the writers)
+for xpid in $pid_readers; do
+ $DPRINT checked reader $xpid
+ wait $xpid
+ if test $? -ne 0; then
+ echo reader had error
+ nerrors=`expr $nerrors + 1`
+ fi
+done
+
+# Check for error and exit if one occured
+$DPRINT nerrors=$nerrors
+if test $nerrors -ne 0 ; then
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
+###############################################################################
+## Report and exit
+###############################################################################
+cd ..
+$DPRINT nerrors=$nerrors
+if test $nerrors -eq 0 ; then
+ echo "VDS SWMR tests passed."
+ if test -z "$HDF5_NOCLEANUP"; then
+ # delete the test directory
+ rm -rf vds_swmr_test
+ fi
+ exit 0
+else
+ echo "VDS SWMR tests failed with $nerrors errors."
+ exit 1
+fi
+
diff --git a/test/tfile.c b/test/tfile.c
index 19a2df4..7274c82 100644
--- a/test/tfile.c
+++ b/test/tfile.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
@@ -103,14 +101,14 @@
#define USERBLOCK_SIZE ((hsize_t) 512)
/* Declarations for test_filespace_*() */
-#define FILENAME_LEN 1024 /* length of file name */
-#define CORE_INCREMENT 1024 /* core file */
-#define FAMILY_SIZE 1024 /* family file */
-#define DSETNAME "dset" /* Name of dataset */
-#define NELMTS(X) (sizeof(X)/sizeof(X[0])) /* # of elements */
-#define READ_OLD_BUFSIZE 1024 /* Buffer for holding file data */
-#define FILE5 "tfile5.h5" /* Test file */
-#define TEST_THRESHOLD10 10 /* Free space section threshold */
+#define FILENAME_LEN 1024 /* length of file name */
+#define DSETNAME "dset" /* Name of dataset */
+#define NELMTS(X) (sizeof(X)/sizeof(X[0])) /* # of elements */
+#define READ_OLD_BUFSIZE 1024 /* Buffer for holding file data */
+#define FILE5 "tfile5.h5" /* Test file */
+#define TEST_THRESHOLD10 10 /* Free space section threshold */
+#define FSP_SIZE_DEF 4096 /* File space page size default */
+#define FSP_SIZE512 512 /* File space page size */
/* Declaration for test_libver_macros2() */
#define FILE6 "tfile6.h5" /* Test file */
@@ -120,21 +118,35 @@
#define NGROUPS 2
#define NDSETS 4
-const char *OLD_FILENAME[] = { /* Files created under 1.6 branch and 1.8 branch */
+/* Files created under 1.6 branch and 1.8 branch--used in test_filespace_compatible() */
+const char *OLD_FILENAME[] = {
"filespace_1_6.h5", /* 1.6 HDF5 file */
"filespace_1_8.h5" /* 1.8 HDF5 file */
};
-const char *FILESPACE_NAME[] = {
- "tfilespace",
- NULL
+
+/* Files created in 1.10.0 release --used in test_filespace_1.10.0_compatible() */
+/* These files are copied from release 1.10.0 tools/h5format_convert/testfiles */
+const char *OLD_1_10_0_FILENAME[] = {
+ "h5fc_ext1_i.h5", /* 0 */
+ "h5fc_ext1_f.h5", /* 1 */
+ "h5fc_ext2_if.h5", /* 2 */
+ "h5fc_ext2_sf.h5", /* 3 */
+ "h5fc_ext3_isf.h5", /* 4 */
+ "h5fc_ext_none.h5" /* 5 */
+};
+
+/* Files used in test_filespace_round_compatible() */
+const char *FSPACE_FILENAMES[] = {
+ "fsm_aggr_nopersist.h5", /* H5F_FILE_SPACE_AGGR, not persisting free-space */
+ "fsm_aggr_persist.h5", /* H5F_FILE_SPACE_AGGR, persisting free-space */
+ "paged_nopersist.h5", /* H5F_FILE_SPACE_PAGE, not persisting free-space */
+ "paged_persist.h5", /* H5F_FILE_SPACE_PAGE, persisting free-space */
+ "aggr.h5", /* H5F_FILE_SPACE_AGGR */
+ "none.h5" /* H5F_FILE_SPACE_NONE */
};
-const char *FILENAME[] = {
- "sec2_tfile",
- "split_tfile",
- "stdio_tfile",
- "core_tfile",
- "family_tfile",
+const char *FILESPACE_NAME[] = {
+ "tfilespace",
NULL
};
@@ -888,9 +900,8 @@ test_file_close(void)
ret = H5Gclose(group_id3);
CHECK(ret, FAIL, "H5Gclose");
break;
-
- case H5F_CLOSE_DEFAULT:
- default:
+ case H5F_CLOSE_DEFAULT:
+ default:
CHECK(fc_degree, H5F_CLOSE_DEFAULT, "H5Pget_fclose_degree");
break;
}
@@ -1538,112 +1549,7 @@ test_file_perm2(void)
CHECK(ret, FAIL, "H5Sclose");
} /* end test_file_perm2() */
-/****************************************************************
-**
-** test_file_freespace(): low-level file test routine.
-** This test checks the free space available in a file in various
-** situations.
-**
-** Modifications:
-** Vailin Choi; July 2012
-** Remove datasets in reverse order so that all file spaces are shrunk.
-** (A change due to H5FD_FLMAP_DICHOTOMY.)
-**
-*****************************************************************/
-static void
-test_file_freespace(void)
-{
- hid_t file; /* File opened with read-write permission */
- h5_stat_size_t empty_filesize; /* Size of file when empty */
- h5_stat_size_t mod_filesize; /* Size of file after being modified */
- hssize_t free_space; /* Amount of free space in file */
- hid_t dspace; /* Dataspace ID */
- hid_t dset; /* Dataset ID */
- hid_t dcpl; /* Dataset creation property list */
- int k; /* Local index variable */
- unsigned u; /* Local index variable */
- char name[32]; /* Dataset name */
- herr_t ret;
-
- /* Output message about test being performed */
- MESSAGE(5, ("Testing Low-Level File Free Space\n"));
-
- /* Create an "empty" file */
- file = H5Fcreate(FILE1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
- CHECK(file, FAIL, "H5Fcreate");
-
- ret = H5Fclose(file);
- CHECK_I(ret, "H5Fclose");
-
- /* Get the "empty" file size */
- empty_filesize = h5_get_file_size(FILE1, H5P_DEFAULT);
-
- /* Re-open the file (with read-write permission) */
- file = H5Fopen(FILE1, H5F_ACC_RDWR, H5P_DEFAULT);
- CHECK_I(file, "H5Fopen");
-
- /* Check that the free space is 0 */
- free_space = H5Fget_freespace(file);
- CHECK(free_space, FAIL, "H5Fget_freespace");
- VERIFY(free_space, 0, "H5Fget_freespace");
- /* Create dataspace for datasets */
- dspace = H5Screate(H5S_SCALAR);
- CHECK(dspace, FAIL, "H5Screate");
-
- /* Create a dataset creation property list */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- CHECK(dcpl, FAIL, "H5Pcreate");
-
- /* Set the space allocation time to early */
- ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
- CHECK(ret, FAIL, "H5Pset_alloc_time");
-
- /* Create datasets in file */
- for(u = 0; u < 10; u++) {
- sprintf(name, "Dataset %u", u);
- dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- CHECK(dset, FAIL, "H5Dcreate2");
-
- ret = H5Dclose(dset);
- CHECK(ret, FAIL, "H5Dclose");
- } /* end for */
-
- /* Close dataspace */
- ret = H5Sclose(dspace);
- CHECK(ret, FAIL, "H5Sclose");
-
- /* Close dataset creation property list */
- ret = H5Pclose(dcpl);
- CHECK(ret, FAIL, "H5Pclose");
-
- /* Check that there is the right amount of free space in the file */
- free_space = H5Fget_freespace(file);
- CHECK(free_space, FAIL, "H5Fget_freespace");
- VERIFY(free_space, 2360, "H5Fget_freespace");
-
- /* Delete datasets in file */
- for(k = 9; k >= 0; k--) {
- sprintf(name, "Dataset %u", (unsigned)k);
- ret = H5Ldelete(file, name, H5P_DEFAULT);
- CHECK(ret, FAIL, "H5Ldelete");
- } /* end for */
-
- /* Check that there is the right amount of free space in the file */
- free_space = H5Fget_freespace(file);
- CHECK(free_space, FAIL, "H5Fget_freespace");
- VERIFY(free_space, 0, "H5Fget_freespace");
-
- /* Close file */
- ret = H5Fclose(file);
- CHECK(ret, FAIL, "H5Fclose");
-
- /* Get the file size after modifications*/
- mod_filesize = h5_get_file_size(FILE1, H5P_DEFAULT);
-
- /* Check that the file reverted to empty size */
- VERIFY(mod_filesize, empty_filesize, "H5Fget_freespace");
-} /* end test_file_freespace() */
/****************************************************************
**
@@ -3185,419 +3091,1227 @@ test_userblock_alignment(void)
/****************************************************************
**
-** test_free_sections():
-** This routine does the actual work of checking information for
-** free space sections available in a file in various situations.
+** test_userblock_alignment_paged(): low-level file test routine.
+** This test checks to ensure that files with both a userblock and
+** alignment interact properly:
+** -- alignment via H5Pset_alignment
+** -- alignment via paged aggregation
+**
+** Programmer: Vailin Choi; March 2013
**
*****************************************************************/
static void
-test_free_sections(hid_t fapl, char *fname)
+test_userblock_alignment_paged(void)
{
- hid_t file; /* File ID */
- hid_t fcpl; /* File creation property list template */
- hssize_t free_space; /* Amount of free space in file */
- hid_t dspace; /* Dataspace ID */
- hid_t dset; /* Dataset ID */
- hid_t dcpl; /* Dataset creation property list */
- unsigned u; /* Local index variable */
- char name[32]; /* Dataset name */
- hssize_t nsects; /* # of free-space sections */
- hssize_t saved_nsects; /* saved copy for the # of free-space sections */
- int i; /* local index variable */
- hsize_t total; /* sum of the free-space section sizes */
- hsize_t last_size; /* size of last free-space section */
- H5F_sect_info_t *sect_info; /* array to hold the free-space information */
- H5F_sect_info_t *saved_sect_info; /* array to hold the free-space information */
- herr_t ret; /* return value */
-
- /* Create file-creation template */
+ hid_t fid; /* File ID */
+ hid_t fcpl; /* File creation property list ID */
+ hid_t fapl; /* File access property list ID */
+ herr_t ret; /* Generic return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing interaction between userblock and alignment (via paged aggregation and H5Pset_alignment)\n"));
+
+ /*
+ * Case 1:
+ * Userblock size = 0
+ * Alignment in use = 4096
+ * Strategy = H5F_FILE_SPACE_PAGE; fsp_size = alignment = 4096
+ * Outcome:
+ * Should succeed:
+ * userblock is 0 and alignment != 0
+ */
+ /* Create file creation property list with user block */
fcpl = H5Pcreate(H5P_FILE_CREATE);
CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)0);
+ CHECK(ret, FAIL, "H5Pset_userblock");
- /* Set file space strategy and free space section threshold */
- ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0);
- CHECK(ret, FAIL, "H5Pget_file_space");
+ /* Create file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
- /* Create the file */
- file = H5Fcreate(fname, H5F_ACC_TRUNC, fcpl, fapl);
- CHECK(file, FAIL, "H5Fcreate");
+ /* Set the "use the latest version of the format" bounds */
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
- /* Close the FCPL */
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
ret = H5Pclose(fcpl);
CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
- /* Create dataspace for datasets */
- dspace = H5Screate(H5S_SCALAR);
- CHECK(dspace, FAIL, "H5Screate");
+ /*
+ * Case 2a:
+ * Userblock size = 1024
+ * Alignment in use = 512
+ * Strategy = H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512
+ * H5Pset_alignment() is 3
+ * Outcome:
+ * Should succeed:
+ * userblock (1024) is integral mult. of alignment (512)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
- /* Create a dataset creation property list */
- dcpl = H5Pcreate(H5P_DATASET_CREATE);
- CHECK(dcpl, FAIL, "H5Pcreate");
+ /* Create file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
- /* Set the space allocation time to early */
- ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
- CHECK(ret, FAIL, "H5Pset_alloc_time");
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
- /* Create datasets in file */
- for(u = 0; u < 10; u++) {
- sprintf(name, "Dataset %u", u);
- dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
- CHECK(dset, FAIL, "H5Dcreate2");
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
- ret = H5Dclose(dset);
- CHECK(ret, FAIL, "H5Dclose");
- } /* end for */
+ /*
+ * Case 2b:
+ * Userblock size = 1024
+ * Alignment in use = 3
+ * Strategy = H5F_FILE_SPACE_AGGR; fsp_size = 512
+ * (via default file creation property)
+ * H5Pset_alignment() is 3
+ * Outcome:
+ * Should fail at file creation:
+ * userblock (1024) is non-integral mult. of alignment (3)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
- /* Close dataspace */
- ret = H5Sclose(dspace);
- CHECK(ret, FAIL, "H5Sclose");
+ /* Create file access property list */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
- /* Close dataset creation property list */
- ret = H5Pclose(dcpl);
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ } H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
CHECK(ret, FAIL, "H5Pclose");
- /* Delete odd-numbered datasets in file */
- for(u = 0; u < 10; u++) {
- sprintf(name, "Dataset %u", u);
- if(u % 2) {
- ret = H5Ldelete(file, name, H5P_DEFAULT);
- CHECK(ret, FAIL, "H5Ldelete");
- } /* end if */
- } /* end for */
+ /*
+ * Case 3a:
+ * Userblock size = 512
+ * Alignment in use = 512
+ * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512
+ * H5Pset_alignment() is 3
+ * Outcome:
+ * Should succeed:
+ * userblock (512) is equal to alignment (512)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
- /* Close file */
- ret = H5Fclose(file);
- CHECK(ret, FAIL, "H5Fclose");
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
- /* Re-open the file with read-only permission */
- file = H5Fopen(fname, H5F_ACC_RDONLY, fapl);
- CHECK_I(file, "H5Fopen");
-
- /* Get the amount of free space in the file */
- free_space = H5Fget_freespace(file);
- CHECK(free_space, FAIL, "H5Fget_freespace");
-
- /* Get the # of free-space sections in the file */
- saved_nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, NULL);
- CHECK(saved_nsects, FAIL, "H5Fget_free_sections");
-
- /* Allocate storage for the free space section information */
- saved_sect_info = (H5F_sect_info_t *)HDcalloc((size_t)saved_nsects, sizeof(H5F_sect_info_t));
- CHECK(saved_sect_info, NULL, "HDcalloc");
-
- /* Should return failure when nsects is 0 with a nonnull sect_info */
- nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, saved_sect_info);
- VERIFY(nsects, FAIL, "H5Fget_free_sections");
-
- /* Verify the correct # of free-space sections */
- nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)saved_nsects, saved_sect_info);
- VERIFY(nsects, saved_nsects, "H5Fget_free_sections");
-
- /* Verify the amount of free-space is correct */
- total = 0;
- for(i = 0; i < nsects; i++)
- total += saved_sect_info[i].size;
- VERIFY(free_space, total, "H5Fget_free_sections");
-
- /* save the last section's size */
- last_size = saved_sect_info[nsects-1].size;
-
- /* Allocate storage for -1 free space section information */
- sect_info = (H5F_sect_info_t *)HDcalloc((size_t)(saved_nsects - 1), sizeof(H5F_sect_info_t));
- CHECK(sect_info, NULL, "HDcalloc");
-
- /* Retrieve free space info for -1 sections */
- nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(saved_nsects - 1), sect_info);
- VERIFY(nsects, saved_nsects, "H5Fget_free_sections");
-
- /* Verify the amount of free-space is correct */
- total = 0;
- for(i = 0; i < (saved_nsects - 1); i++) {
- VERIFY(sect_info[i].addr, saved_sect_info[i].addr, "H5Fget_free_sections");
- VERIFY(sect_info[i].size, saved_sect_info[i].size, "H5Fget_free_sections");
- total += sect_info[i].size;
- }
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
- VERIFY(((hsize_t)free_space - last_size), total, "H5Fget_free_sections");
- HDfree(sect_info);
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 3b:
+ * Userblock size = 512
+ * Alignment in use = 3
+ * Strategy is H5F_FILE_SPACE_NONE; fsp_size = 512
+ * H5Pset_alignment() is 3
+ * Outcome:
+ * Should fail at file creation:
+ * userblock (512) is non-integral mult. of alignment (3)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
- /* Allocate storage for +1 free space section information */
- sect_info = (H5F_sect_info_t *)HDcalloc((size_t)(saved_nsects + 1), sizeof(H5F_sect_info_t));
- CHECK(sect_info, NULL, "HDcalloc");
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
- /* Retrieve free-space info for +1 sections */
- nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(saved_nsects + 1), sect_info);
- VERIFY(nsects, saved_nsects, "H5Fget_free_sections");
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ } H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
- /* Verify free-space info is correct */
- total = 0;
- for(i = 0; i < nsects; i++) {
- VERIFY(sect_info[i].addr, saved_sect_info[i].addr, "H5Fget_free_sections");
- VERIFY(sect_info[i].size, saved_sect_info[i].size, "H5Fget_free_sections");
- total += sect_info[i].size;
- }
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
- VERIFY(sect_info[nsects].addr, 0, "H5Fget_free_sections");
- VERIFY(sect_info[nsects].size, 0, "H5Fget_free_sections");
- VERIFY(free_space, total, "H5Fget_free_sections");
- HDfree(sect_info);
+ /*
+ * Case 4a:
+ * Userblock size = 1024
+ * Alignment in use = 1023
+ * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 1023
+ * H5Pset_alignment() is 16
+ * Outcome:
+ * Should fail at file creation:
+ * userblock (1024) is non-integral multiple of alignment (1023)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1023);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
- /* Verify that there is no free-space section for this type */
- nsects = H5Fget_free_sections(file, H5FD_MEM_BTREE, (size_t)0, NULL);
- VERIFY(nsects, 0, "H5Fget_free_sections");
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
- /* Close file */
- ret = H5Fclose(file);
- CHECK(ret, FAIL, "H5Fclose");
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ } H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 4b:
+ * Userblock size = 1024
+ * Alignment in use = 16
+ * Strategy is H5F_FILE_SPACE_FSM_AGGR; fsp_size = 1023
+ * H5Pset_alignment() is 16
+ * Outcome:
+ * Should succeed:
+ * userblock (512) is integral multiple of alignment (16)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1023);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 5a:
+ * Userblock size = 512
+ * Alignment in use = 1024
+ * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 1024
+ * H5Pset_alignment() is 16
+ * Outcome:
+ * Should fail at file creation:
+ * userblock (512) is less than alignment (1024)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Create a file with FAPL & FCPL */
+ H5E_BEGIN_TRY {
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC, fcpl, fapl);
+ } H5E_END_TRY;
+ VERIFY(fid, FAIL, "H5Fcreate");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 5b:
+ * Userblock size = 512
+ * Alignment in use = 16
+ * Strategy is H5F_FILE_SPACE_NONE; fsp_size = 1024
+ * H5Pset_alignment() is 16
+ * Outcome:
+ * Should succed:
+ * userblock (512) is integral multiple of alignment (16)
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)16);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /*
+ * Case 6:
+ * Userblock size = 512
+ * Alignment in use = 512
+ * Strategy is H5F_FILE_SPACE_PAGE; fsp_size = alignment = 512
+ * H5Pset_alignment() is 3
+ * Reopen the file; H5Pset_alignment() is 1024
+ * Outcome:
+ * Should succed:
+ * Userblock (512) is the same as alignment (512);
+ * The H5Pset_alignment() calls have no effect
+ */
+ /* Create file creation property list with user block */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+ ret = H5Pset_userblock(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_userblock");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+
+ /* Create file access property list with alignment */
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ CHECK(fapl, FAIL, "H5Pcreate");
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pset_alignment");
+
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper1(fcpl, fapl);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper1");
+
+ /* Change alignment in FAPL */
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_alignment");
- HDfree(saved_sect_info);
+ /* Call helper routines to perform file manipulations */
+ ret = test_userblock_alignment_helper2(fapl, FALSE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
+ ret = test_userblock_alignment_helper2(fapl, TRUE);
+ CHECK(ret, FAIL, "test_userblock_alignment_helper2");
-} /* end test_free_sections() */
+ /* Release property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fapl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+} /* end test_userblock_alignment_paged() */
/****************************************************************
**
-** test_filespace_sects():
-** This test checks free space section info for
-** files created with sec2 and split drivers.
+** test_filespace_info():
+** Verify the following public routines retrieve and set file space
+** information correctly:
+** (1) H5Pget/set_file_space_strategy():
+** Retrieve and set file space strategy, persisting free-space,
+** and free-space section threshold as specified
+** (2) H5Pget/set_file_space_page_size():
+** Retrieve and set the page size for paged aggregation
**
-*****************************************************************/
+****************************************************************/
static void
-test_filespace_sects(void)
+test_filespace_info(const char *env_h5_drvr)
{
- hid_t fapl_sec2; /* File access property id with sec2 driver */
- hid_t fapl_split; /* File access property id with split driver */
- hid_t fapl_core; /* File access property id with core driver */
- hid_t fapl_stdio; /* File access property id with stdio driver */
- hid_t fapl_family; /* File access property id with family driver */
- char filename[FILENAME_LEN]; /* Filename to use */
- herr_t ret; /* Return value */
+ hid_t fid; /* File IDs */
+ hid_t fapl, new_fapl; /* File access property lists */
+ hid_t fcpl, fcpl1, fcpl2; /* File creation property lists */
+ H5F_fspace_strategy_t strategy; /* File space strategy */
+ hbool_t persist; /* Persist free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ unsigned new_format; /* New or old format */
+ H5F_fspace_strategy_t fs_strategy; /* File space strategy--iteration variable */
+ unsigned fs_persist; /* Persist free-space or not--iteration variable */
+ hsize_t fs_threshold; /* Free-space section threshold--iteration variable */
+ hsize_t fsp_size; /* File space page size */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hbool_t contig_addr_vfd; /* Whether VFD used has a contigous address space */
+ herr_t ret; /* Return value */
- /* SEC2 */
- MESSAGE(5, ("Testing File free space information for a sec2 file\n"));
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing file creation public routines: H5Pget/set_file_space_strategy & H5Pget/set_file_space_page_size\n"));
- fapl_sec2 = H5Pcreate(H5P_FILE_ACCESS);
+ contig_addr_vfd = (hbool_t)(HDstrcmp(env_h5_drvr, "split") && HDstrcmp(env_h5_drvr, "multi"));
- ret = H5Pset_fapl_sec2(fapl_sec2);
- CHECK(ret, FAIL, "H5Pset_fapl_sec2");
+ fapl = h5_fileaccess();
+ h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename);
- /* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[0], fapl_sec2, filename, sizeof(filename));
+ /* Get a copy of the file access property list */
+ new_fapl = H5Pcopy(fapl);
+ CHECK(new_fapl, FAIL, "H5Pcopy");
+
+ /* Set the "use the latest version of the format" bounds */
+ ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /*
+ * Case (1)
+ * Check file space information from a default file creation property list.
+ * Values expected:
+ * strategy--H5F_FILE_SPACE_AGGR
+ * persist--FALSE
+ * threshold--1
+ * file space page size--4096
+ */
+ /* Create file creation property list template */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+
+ /* Retrieve file space page size */
+ ret = H5Pget_file_space_page_size(fcpl, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size");
- /* perform free space information test for file with sec2 driver */
- test_free_sections(fapl_sec2, filename);
+ /* Close property list */
+ H5Pclose(fcpl);
+
+ /*
+ * Case (2)
+ * File space page size has a minimum size of 512.
+ * Setting value less than 512 will return an error;
+ * --setting file space page size to 0
+ * --setting file space page size to 511
+ */
+ /* Create file creation property list template */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
- /* close fapl_sec2 and remove the file */
- h5_clean_files(FILENAME, fapl_sec2);
+ /* Setting to 0: should fail */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_file_space_page_size(fcpl, 0);
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_file_space_page_size");
+ /* Setting to 511: should fail */
+ H5E_BEGIN_TRY {
+ ret = H5Pset_file_space_page_size(fcpl, 511);
+ } H5E_END_TRY;
+ VERIFY(ret, FAIL, "H5Pset_file_space_page_size");
- /* SPLIT */
- MESSAGE(5, ("Testing File free space information for a split file\n"));
+ /* Setting to 512: should succeed */
+ ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512);
+ CHECK(ret, FAIL, "H5Pset_file_space_page_size");
+ ret = H5Pget_file_space_page_size(fcpl, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
- fapl_split = H5Pcreate(H5P_FILE_ACCESS);
- CHECK(fapl_split, FAIL, "h5_fileaccess");
+ /* Close property list */
+ H5Pclose(fcpl);
- ret = H5Pset_fapl_split(fapl_split, "-m.h5", H5P_DEFAULT, "-r.h5", H5P_DEFAULT);
- CHECK(ret, FAIL, "H5Pset_fapl_split");
+ /*
+ * Case (3)
+ * Check file space information when creating a file with default properties.
+ * Values expected:
+ * strategy--H5F_FILE_SPACE_AGGR
+ * persist--FALSE
+ * threshold--1
+ * file space page size--4096
+ */
+ /* Create a file with default file creation and access property lists */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
- /* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[1], fapl_split, filename, sizeof(filename));
+ /* Get the file's creation property list */
+ fcpl1 = H5Fget_create_plist(fid);
+ CHECK(fcpl1, FAIL, "H5Fget_create_plist");
- /* perform free space information test for file with split driver */
- test_free_sections(fapl_split, filename);
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
- /* close fapl and remove the file */
- h5_clean_files(FILENAME, fapl_split);
+ /* Verify file space information */
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+ /* Retrieve file space page size */
+ ret = H5Pget_file_space_page_size(fcpl1, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size");
- /* STDIO */
- MESSAGE(5, ("Testing File free space information for a stdio file\n"));
+ /* Close property lists */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Pclose(fcpl1);
+ CHECK(ret, FAIL, "H5Pclose");
- fapl_stdio = H5Pcreate(H5P_FILE_ACCESS);
- CHECK(fapl_stdio, FAIL, "h5_fileaccess");
+ /*
+ * Case (4)
+ * Check file space information when creating a file with the
+ * latest library format and default properties.
+ * Values expected:
+ * strategy--H5F_FILE_SPACE_AGGR
+ * persist--FALSE
+ * threshold--1
+ * file space page size--4096
+ */
+ /* Create a file with the latest library format */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, H5P_DEFAULT, new_fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
- ret = H5Pset_fapl_stdio(fapl_stdio);
- CHECK(ret, FAIL, "H5Pset_fapl_split");
+ /* Get the file's creation property */
+ fcpl1 = H5Fget_create_plist(fid);
+ CHECK(fcpl1, FAIL, "H5Fget_create_plist");
- /* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[2], fapl_stdio, filename, sizeof(filename));
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
- /* perform free space information test for file with stdio driver */
- test_free_sections(fapl_stdio, filename);
+ /* Verify file space information */
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
- /* close fapl and remove the file */
- h5_clean_files(FILENAME, fapl_stdio);
+ /* Retrieve file space page size */
+ ret = H5Pget_file_space_page_size(fcpl1, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE_DEF, "H5Pget_file_space_page_size");
+ /* Close property lists */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Pclose(fcpl1);
+ CHECK(ret, FAIL, "H5Pclose");
- /* CORE */
- MESSAGE(5, ("Testing File free space information for a core file\n"));
+ /*
+ * Case (5)
+ * Check file space information with the following combinations:
+ * Create file with --
+ * New or old format
+ * Persist or not persist free-space
+ * Different sizes for free-space section threshold (0 to 10)
+ * The four file space strategies:
+ * H5F_FSPACE_STRATEGY_FSM_AGGR, H5F_FSPACE_STRATEGY_PAGE,
+ * H5F_FSPACE_STRATEGY_AGGR, H5F_FSPACE_STRATEGY_NONE
+ * File space page size: set to 512
+ *
+ */
+ for(new_format = FALSE; new_format <= TRUE; new_format++) {
+ hid_t my_fapl;
- fapl_core = H5Pcreate(H5P_FILE_ACCESS);
- CHECK(fapl_core, FAIL, "h5_fileaccess");
+ /* Set the FAPL for the type of format */
+ if(new_format) {
+ MESSAGE(5, ("Testing with new group format\n"));
+ my_fapl = new_fapl;
+ } /* end if */
+ else {
+ MESSAGE(5, ("Testing with old group format\n"));
+ my_fapl = fapl;
+ } /* end else */
- ret = H5Pset_fapl_core(fapl_core, (size_t)CORE_INCREMENT, TRUE);
- CHECK(ret, FAIL, "H5Pset_fapl_core");
+ /* Test with TRUE or FALSE for persisting free-space */
+ for(fs_persist = FALSE; fs_persist <= TRUE; fs_persist++) {
+
+ /* Test with free-space section threshold size: 0 to 10 */
+ for(fs_threshold = 0; fs_threshold <= TEST_THRESHOLD10; fs_threshold++) {
+
+ /* Test with 4 file space strategies */
+ for(fs_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR; fs_strategy < H5F_FSPACE_STRATEGY_NTYPES; H5_INC_ENUM(H5F_fspace_strategy_t, fs_strategy)) {
+
+ if(!contig_addr_vfd && (fs_strategy == H5F_FSPACE_STRATEGY_PAGE || fs_persist))
+ continue;
+
+ /* Create file creation property list template */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Set file space information */
+ ret = H5Pset_file_space_strategy(fcpl, fs_strategy, (hbool_t)fs_persist, fs_threshold);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+
+ ret = H5Pset_file_space_page_size(fcpl, FSP_SIZE512);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy");
+
+ if(fs_strategy < H5F_FSPACE_STRATEGY_AGGR) {
+ VERIFY(persist, (hbool_t)fs_persist, "H5Pget_file_space_strategy");
+ VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy");
+ } else {
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+ }
+
+ /* Retrieve and verify file space page size */
+ ret = H5Pget_file_space_page_size(fcpl, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
+
+ /* Create the file with the specified file space info */
+ fid = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ /* Get the file's creation property */
+ fcpl1 = H5Fget_create_plist(fid);
+ CHECK(fcpl1, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl1, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy");
+
+ if(fs_strategy < H5F_FSPACE_STRATEGY_AGGR) {
+ VERIFY(persist, fs_persist, "H5Pget_file_space_strategy");
+ VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy");
+ } else {
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+ }
+
+ /* Retrieve and verify file space page size */
+ ret = H5Pget_file_space_page_size(fcpl1, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file */
+ fid = H5Fopen(filename, H5F_ACC_RDWR, my_fapl);
+ CHECK(ret, FAIL, "H5Fopen");
+
+ /* Get the file's creation property */
+ fcpl2 = H5Fget_create_plist(fid);
+ CHECK(fcpl2, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve file space information */
+ ret = H5Pget_file_space_strategy(fcpl2, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Verify file space information */
+ VERIFY(strategy, fs_strategy, "H5Pget_file_space_strategy");
+ if(fs_strategy < H5F_FSPACE_STRATEGY_AGGR) {
+ VERIFY(persist, fs_persist, "H5Pget_file_space_strategy");
+ VERIFY(threshold, fs_threshold, "H5Pget_file_space_strategy");
+ } else {
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+ }
+
+ /* Retrieve and verify file space page size */
+ ret = H5Pget_file_space_page_size(fcpl2, &fsp_size);
+ CHECK(ret, FAIL, "H5Pget_file_space_page_size");
+ VERIFY(fsp_size, FSP_SIZE512, "H5Pget_file_space_page_size");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Release file creation property lists */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fcpl1);
+ CHECK(ret, FAIL, "H5Pclose");
+ ret = H5Pclose(fcpl2);
+ CHECK(ret, FAIL, "H5Pclose");
+ } /* end for file space strategy type */
+ } /* end for free-space section threshold */
+ } /* end for fs_persist */
+
+ /* close fapl_ and remove the file */
+ h5_clean_files(FILESPACE_NAME, my_fapl);
+ } /* end for new_format */
- /* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[3], fapl_core, filename, sizeof(filename));
+} /* test_filespace_info() */
- /* perform free space information test for file with core driver */
- test_free_sections(fapl_core, filename);
+/****************************************************************
+**
+** set_multi_split():
+** Internal routine to set up page-aligned address space for multi/split driver
+** when testing paged aggregation.
+** This is used by test_file_freespace() and test_sects_freespace().
+**
+*****************************************************************/
+static int
+set_multi_split(hid_t fapl, hsize_t pagesize, hbool_t multi, hbool_t split)
+{
+ H5FD_mem_t memb_map[H5FD_MEM_NTYPES];
+ hid_t memb_fapl_arr[H5FD_MEM_NTYPES];
+ char *memb_name[H5FD_MEM_NTYPES];
+ haddr_t memb_addr[H5FD_MEM_NTYPES];
+ hbool_t relax;
+ H5FD_mem_t mt;
- /* close fapl_ and remove the file */
- h5_clean_files(FILENAME, fapl_core);
+ HDassert(split || multi);
+ HDmemset(memb_name, 0, sizeof memb_name);
- /* FAMILY */
- MESSAGE(5, ("Testing File free space information for a family file\n"));
+ /* Get current split settings */
+ if(H5Pget_fapl_multi(fapl, memb_map, memb_fapl_arr, memb_name, memb_addr, &relax) < 0)
+ TEST_ERROR
- fapl_family = H5Pcreate(H5P_FILE_ACCESS);
- CHECK(fapl_family, FAIL, "h5_fileaccess");
+ if(split) {
+ /* Set memb_addr aligned */
+ memb_addr[H5FD_MEM_SUPER] = ((memb_addr[H5FD_MEM_SUPER] + pagesize - 1) / pagesize) * pagesize;
+ memb_addr[H5FD_MEM_DRAW] = ((memb_addr[H5FD_MEM_DRAW] + pagesize - 1) / pagesize) * pagesize;
+ } else {
+ /* Set memb_addr aligned */
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ memb_addr[mt] = ((memb_addr[mt] + pagesize - 1) / pagesize) * pagesize;
+ } /* end else */
- ret = H5Pset_fapl_family(fapl_family, (hsize_t)FAMILY_SIZE, H5P_DEFAULT);
- CHECK(ret, FAIL, "H5Pset_fapl_family");
+ /* Set multi driver with new FAPLs */
+ if(H5Pset_fapl_multi(fapl, memb_map, memb_fapl_arr, (const char * const *)memb_name, memb_addr, relax) < 0)
+ TEST_ERROR
- /* Set the filename to use for this test (dependent on fapl) */
- h5_fixname(FILENAME[4], fapl_family, filename, sizeof(filename));
+ /* Free memb_name */
+ for(mt = H5FD_MEM_DEFAULT; mt < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, mt))
+ free(memb_name[mt]);
- /* perform free space information test for file with family driver */
- test_free_sections(fapl_family, filename);
+ return 0;
- /* close fapl and remove the file */
- h5_clean_files(FILENAME, fapl_family);
+error:
+ return(-1);
-} /* end test_filespace_sects() */
+} /* set_multi_split() */
/****************************************************************
**
-** test_filespace_info():
-** Verify that the public routines H5Pget/set_file_space()
-** retrieve and set the file space strategy and free space
-** section threshold as specified.
+** test_file_freespace():
+** This routine checks the free space available in a file as
+** returned by the public routine H5Fget_freespace().
**
-****************************************************************/
+** Modifications:
+** Vailin Choi; July 2012
+** Remove datasets in reverse order so that all file spaces are shrunk.
+** (A change due to H5FD_FLMAP_DICHOTOMY.)
+**
+** Vailin Choi; Dec 2012
+** Add changes due to paged aggregation via new format:
+** the amount of freespace is different.
+**
+*****************************************************************/
static void
-test_filespace_info(void)
+test_file_freespace(const char *env_h5_drvr)
{
- hid_t fid1, fid2; /* HDF5 File IDs */
- hid_t fapl, new_fapl; /* File access property */
- hid_t fcpl, fcpl1, fcpl2; /* File creation property */
- char filename[FILENAME_LEN]; /* Filename to use */
- H5F_file_space_type_t strategy, fs_type, def_type; /* File space handling strategy */
- hsize_t threshold, fs_size, def_size; /* Free space section threshold */
- unsigned new_format; /* new format or old format */
- herr_t ret; /* return value */
+ hid_t file; /* File opened with read-write permission */
+ h5_stat_size_t empty_filesize; /* Size of file when empty */
+ h5_stat_size_t mod_filesize; /* Size of file after being modified */
+ hssize_t free_space; /* Amount of free space in file */
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl, new_fapl; /* File access property list IDs */
+ hid_t dspace; /* Dataspace ID */
+ hid_t dset; /* Dataset ID */
+ hid_t dcpl; /* Dataset creation property list */
+ int k; /* Local index variable */
+ unsigned u; /* Local index variable */
+ char filename[FILENAME_LEN]; /* Filename to use */
+ char name[32]; /* Dataset name */
+ unsigned new_format; /* To use old or new format */
+ hbool_t split_vfd, multi_vfd; /* Indicate multi/split driver */
+ hsize_t expected_freespace; /* Freespace expected */
+ hsize_t expected_fs_del; /* Freespace expected after delete */
+ herr_t ret; /* Return value */
+
+ split_vfd = !HDstrcmp(env_h5_drvr, "split");
+ multi_vfd = !HDstrcmp(env_h5_drvr, "multi");
+
+ if(!split_vfd && !multi_vfd) {
+ fapl = h5_fileaccess();
+ h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename);
+
+ new_fapl = H5Pcopy(fapl);
+ CHECK(new_fapl, FAIL, "H5Pcopy");
+
+ /* Set the "use the latest version of the format" bounds */
+ ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
- /* Output message about test being performed */
- MESSAGE(5, ("Testing File Space Management public routines: H5Pget/set_file_space()\n"));
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ /* Test with old & new format */
+ for(new_format = FALSE; new_format <= TRUE; new_format++) {
+ hid_t my_fapl;
+
+ /* Set the FAPL for the type of format */
+ if(new_format) {
+ MESSAGE(5, ("Testing with new group format\n"));
+
+ my_fapl = new_fapl;
+
+ if(multi_vfd || split_vfd) {
+ ret = set_multi_split(new_fapl, FSP_SIZE_DEF, multi_vfd, split_vfd);
+ CHECK(ret, FAIL, "set_multi_split");
+ }
+
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5P_set_file_space_strategy");
+
+ expected_freespace = 4534;
+ if(split_vfd) expected_freespace = 427;
+ if(multi_vfd) expected_freespace = 248;
+ expected_fs_del = 0;
+ } /* end if */
+ else {
+ MESSAGE(5, ("Testing with old group format\n"));
+ /* Default: non-paged aggregation, non-persistent free-space */
+ my_fapl = fapl;
+ expected_freespace = 2464;
+ if(split_vfd) expected_freespace = 264;
+ if(multi_vfd) expected_freespace = 0;
+ expected_fs_del = 4096;
+
+ } /* end else */
+
+ /* Create an "empty" file */
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, my_fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ ret = H5Fclose(file);
+ CHECK_I(ret, "H5Fclose");
+
+ /* Get the "empty" file size */
+ empty_filesize = h5_get_file_size(filename, H5P_DEFAULT);
+
+ /* Re-open the file (with read-write permission) */
+ file = H5Fopen(filename, H5F_ACC_RDWR, my_fapl);
+ CHECK_I(file, "H5Fopen");
+
+ /* Check that the free space is 0 */
+ free_space = H5Fget_freespace(file);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, 0, "H5Fget_freespace");
+
+ /* Create dataspace for datasets */
+ dspace = H5Screate(H5S_SCALAR);
+ CHECK(dspace, FAIL, "H5Screate");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create datasets in file */
+ for(u = 0; u < 10; u++) {
+ sprintf(name, "Dataset %u", u);
+ dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
+
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end for */
- fapl = h5_fileaccess();
- h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename);
+ /* Close dataspace */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
- new_fapl = H5Pcopy(fapl);
- CHECK(new_fapl, FAIL, "H5Pcopy");
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
- /* Set the "use the latest version of the format" bounds */
- ret = H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
- CHECK(ret, FAIL, "H5Pset_libver_bounds");
+ /* Check that there is the right amount of free space in the file */
+ free_space = H5Fget_freespace(file);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, expected_freespace, "H5Fget_freespace");
- /* Create file-creation template */
- fcpl = H5Pcreate(H5P_FILE_CREATE);
- CHECK(fcpl, FAIL, "H5Pcreate");
+ /* Delete datasets in file */
+ for(k = 9; k >= 0; k--) {
+ sprintf(name, "Dataset %u", (unsigned)k);
+ ret = H5Ldelete(file, name, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end for */
- /* Get default file space information */
- ret = H5Pget_file_space(fcpl, &def_type, &def_size);
- CHECK(ret, FAIL, "H5Pget_file_space");
+ /* Check that there is the right amount of free space in the file */
+ free_space = H5Fget_freespace(file);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ if(new_format)
+ VERIFY(free_space, expected_fs_del, "H5Fget_freespace");
+ else
+ VERIFY(free_space, expected_fs_del, "H5Fget_freespace");
- /* Test with old & new format groups */
- for(new_format = FALSE; new_format <= TRUE; new_format++) {
- hid_t my_fapl;
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Get the file size after modifications*/
+ mod_filesize = h5_get_file_size(filename, H5P_DEFAULT);
+
+ /* Check that the file reverted to empty size */
+ VERIFY(mod_filesize, empty_filesize, "H5Fget_freespace");
+
+ h5_clean_files(FILESPACE_NAME, my_fapl);
+
+ } /* end for */
+ }
+
+} /* end test_file_freespace() */
+
+/****************************************************************
+**
+** test_sects_freespace():
+** This routine checks free-space section information for the
+** file as returned by the public routine H5Fget_free_sections().
+**
+*****************************************************************/
+static void
+test_sects_freespace(const char *env_h5_drvr, hbool_t new_format)
+{
+ char filename[FILENAME_LEN]; /* Filename to use */
+ hid_t file; /* File ID */
+ hid_t fcpl; /* File creation property list template */
+ hid_t fapl; /* File access property list template */
+ hssize_t free_space; /* Amount of free-space in the file */
+ hid_t dspace; /* Dataspace ID */
+ hid_t dset; /* Dataset ID */
+ hid_t dcpl; /* Dataset creation property list */
+ char name[32]; /* Dataset name */
+ hssize_t nsects = 0; /* # of free-space sections */
+ hssize_t nall; /* # of free-space sections for all types of data */
+ hssize_t nmeta = 0, nraw = 0; /* # of free-space sections for meta/raw/generic data */
+ H5F_sect_info_t sect_info[15]; /* Array to hold free-space information */
+ H5F_sect_info_t all_sect_info[15]; /* Array to hold free-space information for all types of data */
+ H5F_sect_info_t meta_sect_info[15]; /* Array to hold free-space information for metadata */
+ H5F_sect_info_t raw_sect_info[15]; /* Array to hold free-space information for raw data */
+ hsize_t total = 0; /* sum of the free-space section sizes */
+ hsize_t tmp_tot = 0; /* Sum of the free-space section sizes */
+ hsize_t last_size; /* Size of last free-space section */
+ hsize_t dims[1]; /* Dimension sizes */
+ unsigned u; /* Local index variable */
+ H5FD_mem_t type;
+ hbool_t split_vfd = FALSE, multi_vfd = FALSE;
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("Testing H5Fget_free_sections()--free-space section info in the file\n"));
+
+ split_vfd = !HDstrcmp(env_h5_drvr, "split");
+ multi_vfd = !HDstrcmp(env_h5_drvr, "multi");
+
+ if(!split_vfd && !multi_vfd) {
+
+ fapl = h5_fileaccess();
+ h5_fixname(FILESPACE_NAME[0], fapl, filename, sizeof filename);
+
+ /* Create file-creation template */
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
- /* Set the FAPL for the type of format */
if(new_format) {
- MESSAGE(5, ("Testing with new group format\n"));
- my_fapl = new_fapl;
- } /* end if */
- else {
- MESSAGE(5, ("Testing with old group format\n"));
- my_fapl = fapl;
- } /* end else */
+ ret = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ CHECK(ret, FAIL, "H5Pset_libver_bounds");
+
+ /* Set to paged aggregation and persistent free-space */
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ /* Set up paged aligned address space for multi/split driver */
+ if(multi_vfd || split_vfd) {
+ ret = set_multi_split(fapl, FSP_SIZE_DEF, multi_vfd, split_vfd);
+ CHECK(ret, FAIL, "set_multi_split");
+ }
+
+ } else {
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+ }
+
+ /* Create the file */
+ file = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ /* Create a dataset creation property list */
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ CHECK(dcpl, FAIL, "H5Pcreate");
+
+ /* Set the space allocation time to early */
+ ret = H5Pset_alloc_time(dcpl, H5D_ALLOC_TIME_EARLY);
+ CHECK(ret, FAIL, "H5Pset_alloc_time");
+
+ /* Create 1 large dataset */
+ dims[0] = 1200;
+ dspace = H5Screate_simple(1, dims, NULL);
+ dset = H5Dcreate2(file, "Dataset_large", H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
- /* Test with different sized free space section threshold */
- for(fs_size = 0; fs_size <= TEST_THRESHOLD10; fs_size++) {
+ /* Close dataset */
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
- /* Test with different file space handling strategies */
- for(fs_type = H5F_FILE_SPACE_DEFAULT; fs_type < H5F_FILE_SPACE_NTYPES; H5_INC_ENUM(H5F_file_space_type_t, fs_type)) {
+ /* Close dataspace */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
- /* Get a copy of the default file creation property */
- fcpl1 = H5Pcopy(fcpl);
- CHECK(fcpl1, FAIL, "H5Pcopy");
+ /* Create dataspace for datasets */
+ dspace = H5Screate(H5S_SCALAR);
+ CHECK(dspace, FAIL, "H5Screate");
- /* Set file space strategy and free space section threshold */
- ret = H5Pset_file_space(fcpl1, fs_type, fs_size);
- CHECK(ret, FAIL, "H5Pget_file_space");
+ /* Create datasets in file */
+ for(u = 0; u < 10; u++) {
+ sprintf(name, "Dataset %u", u);
+ dset = H5Dcreate2(file, name, H5T_STD_U32LE, dspace, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ CHECK(dset, FAIL, "H5Dcreate2");
- /* Get the file space info from the creation property */
- ret = H5Pget_file_space(fcpl1, &strategy, &threshold);
- CHECK(ret, FAIL, "H5Pget_file_space");
+ ret = H5Dclose(dset);
+ CHECK(ret, FAIL, "H5Dclose");
+ } /* end for */
- /* A 0 value for strategy retains existing strategy in use */
- VERIFY(strategy, (H5F_file_space_type_t)(fs_type ? fs_type : def_type), "H5Pget_file_space");
- /* A 0 value for threshold retains existing threshold in use */
- VERIFY(threshold, (hsize_t)(fs_size ? fs_size : def_size), "H5Pget_file_space");
+ /* Close dataspace */
+ ret = H5Sclose(dspace);
+ CHECK(ret, FAIL, "H5Sclose");
- /* Create the file with the specified file space info */
- fid1 = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl1, my_fapl);
- CHECK(fid1, FAIL, "H5Fcreate");
+ /* Close dataset creation property list */
+ ret = H5Pclose(dcpl);
+ CHECK(ret, FAIL, "H5Pclose");
- /* Close the file */
- ret = H5Fclose(fid1);
- CHECK(ret, FAIL, "H5Fclose");
+ /* Delete odd-numbered datasets in file */
+ for(u = 0; u < 10; u++) {
+ sprintf(name, "Dataset %u", u);
+ if(u % 2) {
+ ret = H5Ldelete(file, name, H5P_DEFAULT);
+ CHECK(ret, FAIL, "H5Ldelete");
+ } /* end if */
+ } /* end for */
- /* Re-open the file */
- fid2 = H5Fopen(filename, H5F_ACC_RDWR, my_fapl);
- CHECK(fid2, FAIL, "H5Fopen");
+ /* Close file */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-open the file with read-only permission */
+ file = H5Fopen(filename, H5F_ACC_RDONLY, fapl);
+ CHECK_I(file, "H5Fopen");
+
+ /* Get the amount of free space in the file */
+ free_space = H5Fget_freespace(file);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+
+ /* Get the total # of free-space sections in the file */
+ nall = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, NULL);
+ CHECK(nall, FAIL, "H5Fget_free_sections");
+
+ /* Should return failure when nsects is 0 with a nonnull sect_info */
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)0, all_sect_info);
+ VERIFY(nsects, FAIL, "H5Fget_free_sections");
+
+ /* Retrieve and verify free space info for all the sections */
+ HDmemset(all_sect_info, 0, sizeof(all_sect_info));
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)nall, all_sect_info);
+ VERIFY(nsects, nall, "H5Fget_free_sections");
+
+ /* Verify the amount of free-space is correct */
+ for(u = 0; u < nall; u++)
+ total += all_sect_info[u].size;
+ VERIFY(free_space, total, "H5Fget_free_sections");
+
+ /* Save the last section's size */
+ last_size = all_sect_info[nall-1].size;
+
+ /* Retrieve and verify free space info for -1 sections */
+ HDmemset(sect_info, 0, sizeof(sect_info));
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall - 1), sect_info);
+ VERIFY(nsects, nall, "H5Fget_free_sections");
+
+ /* Verify the amount of free-space is correct */
+ total = 0;
+ for(u = 0; u < (nall - 1); u++) {
+ VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections");
+ VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections");
+ total += sect_info[u].size;
+ }
+ VERIFY(((hsize_t)free_space - last_size), total, "H5Fget_free_sections");
+
+ /* Retrieve and verify free-space info for +1 sections */
+ HDmemset(sect_info, 0, sizeof(sect_info));
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DEFAULT, (size_t)(nall + 1), sect_info);
+ VERIFY(nsects, nall, "H5Fget_free_sections");
+
+ /* Verify amount of free-space is correct */
+ total = 0;
+ for(u = 0; u < nall; u++) {
+ VERIFY(sect_info[u].addr, all_sect_info[u].addr, "H5Fget_free_sections");
+ VERIFY(sect_info[u].size, all_sect_info[u].size, "H5Fget_free_sections");
+ total += sect_info[u].size;
+ }
+ VERIFY(sect_info[nall].addr, 0, "H5Fget_free_sections");
+ VERIFY(sect_info[nall].size, 0, "H5Fget_free_sections");
+ VERIFY(free_space, total, "H5Fget_free_sections");
+
+ HDmemset(meta_sect_info, 0, sizeof(meta_sect_info));
+ if(multi_vfd) {
+ hssize_t ntmp;
+
+ for(type = H5FD_MEM_SUPER; type < H5FD_MEM_NTYPES; H5_INC_ENUM(H5FD_mem_t, type)) {
+ if(type == H5FD_MEM_DRAW || type == H5FD_MEM_GHEAP)
+ continue;
+ /* Get the # of free-space sections in the file for metadata */
+ ntmp = H5Fget_free_sections(file, type, (size_t)0, NULL);
+ CHECK(ntmp, FAIL, "H5Fget_free_sections");
+
+ if(ntmp > 0) {
+ nsects = H5Fget_free_sections(file, type, (size_t)ntmp, &meta_sect_info[nmeta]);
+ VERIFY(nsects, ntmp, "H5Fget_free_sections");
+ nmeta += ntmp;
+ }
+ }
+ } else {
+ /* Get the # of free-space sections in the file for metadata */
+ nmeta = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)0, NULL);
+ CHECK(nmeta, FAIL, "H5Fget_free_sections");
+
+ /* Retrieve and verify free-space sections for metadata */
+ nsects = H5Fget_free_sections(file, H5FD_MEM_SUPER, (size_t)nmeta, meta_sect_info);
+ VERIFY(nsects, nmeta, "H5Fget_free_sections");
+ }
- /* Get the file's creation property */
- fcpl2 = H5Fget_create_plist(fid2);
- CHECK(fcpl2, FAIL, "H5Fget_create_plist");
+ /* Get the # of free-space sections in the file for raw data */
+ nraw = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)0, NULL);
+ CHECK(nraw, FAIL, "H5Fget_free_sections");
- strategy = H5F_FILE_SPACE_DEFAULT;
- threshold = 0;
+ /* Retrieve and verify free-space sections for raw data */
+ HDmemset(raw_sect_info, 0, sizeof(raw_sect_info));
+ nsects = H5Fget_free_sections(file, H5FD_MEM_DRAW, (size_t)nraw, raw_sect_info);
+ VERIFY(nsects, nraw, "H5Fget_free_sections");
- /* Get the file space info from the creation property list */
- ret = H5Pget_file_space(fcpl2, &strategy, &threshold);
- CHECK(ret, FAIL, "H5Pget_file_space");
+ /* Sum all the free-space sections */
+ for(u = 0; u < nmeta; u++)
+ tmp_tot += meta_sect_info[u].size;
- VERIFY(strategy, (H5F_file_space_type_t)(fs_type ? fs_type : def_type), "H5Pget_file_space");
- VERIFY(threshold, (hsize_t)(fs_size ? fs_size : def_size), "H5Pget_file_space");
+ for(u = 0; u < nraw; u++)
+ tmp_tot += raw_sect_info[u].size;
- /* Close the file */
- ret = H5Fclose(fid2);
- CHECK(ret, FAIL, "H5Fclose");
+ /* Verify free-space info */
+ VERIFY(nmeta+nraw, nall, "H5Fget_free_sections");
+ VERIFY(tmp_tot, total, "H5Fget_free_sections");
- /* Release file-creation template */
- ret = H5Pclose(fcpl1);
- CHECK(ret, FAIL, "H5Pclose");
- ret = H5Pclose(fcpl2);
- CHECK(ret, FAIL, "H5Pclose");
- } /* end for file space strategy type */
- } /* end for free space threshold */
+ /* Closing */
+ ret = H5Fclose(file);
+ CHECK(ret, FAIL, "H5Fclose");
+ ret = H5Pclose(fcpl);
+ CHECK(fcpl, FAIL, "H5Pclose");
- h5_clean_files(FILESPACE_NAME, my_fapl);
+ h5_clean_files(FILESPACE_NAME, fapl);
+ }
- } /* end for new/old format */
+} /* end test_sects_freespace() */
- /* Close the file creation property list */
- ret = H5Pclose(fcpl);
- CHECK(ret, FAIL, "H5Pclose");
-} /* test_filespace_info() */
/****************************************************************
**
** test_filespace_compatible():
-** Verify that the branch with file space management enhancement
+** Verify that the trunk with the latest file space management
** can open, read and modify 1.6 HDF5 file and 1.8 HDF5 file.
-** Also verify the correct file space strategy/threshold in use
+** Also verify the correct file space handling information
** and the amount of free space.
**
****************************************************************/
@@ -3605,36 +4319,37 @@ static void
test_filespace_compatible(void)
{
int fd_old = (-1), fd_new = (-1); /* File descriptors for copying data */
- hid_t fid; /* File id */
- hid_t fcpl; /* File creation property list template */
- hid_t did; /* Dataset id */
- int check[100]; /* Temporary buffer for verifying dataset data */
- int rdbuf[100]; /* Temporary buffer for reading in dataset data */
+ hid_t fid = -1; /* File id */
+ hid_t did = -1; /* Dataset id */
+ hid_t fcpl; /* File creation property list template */
+ int check[100]; /* Temporary buffer for verifying dataset data */
+ int rdbuf[100]; /* Temporary buffer for reading in dataset data */
uint8_t buf[READ_OLD_BUFSIZE]; /* temporary buffer for reading */
- ssize_t nread; /* Number of bytes read in */
- unsigned i, j; /* Local index variable */
- hssize_t free_space; /* Amount of free space in the file */
- hsize_t threshold; /* Free space section threshold */
- H5F_file_space_type_t strategy; /* File space handling strategy */
- herr_t ret; /* Return value */
+ ssize_t nread; /* Number of bytes read in */
+ unsigned i, j; /* Local index variable */
+ hssize_t free_space; /* Amount of free-space in the file */
+ hbool_t persist; /* Persist free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ H5F_fspace_strategy_t strategy; /* File space handling strategy */
+ herr_t ret; /* Return value */
/* Output message about test being performed */
- MESSAGE(5, ("Testing File space compatibility for 1.6 and 1.8 files\n"));
+ MESSAGE(5, ("File space compatibility testing for 1.6 and 1.8 files\n"));
for(j = 0; j < NELMTS(OLD_FILENAME); j++) {
const char *filename = H5_get_srcdir_filename(OLD_FILENAME[j]); /* Corrected test file name */
- /* Copy old file into test file */
+ /* Open and copy the test file into a temporary file */
fd_old = HDopen(filename, O_RDONLY, 0666);
CHECK(fd_old, FAIL, "HDopen");
fd_new = HDopen(FILE5, O_RDWR|O_CREAT|O_TRUNC, 0666);
CHECK(fd_new, FAIL, "HDopen");
/* Copy data */
- while((nread = HDread(fd_old, buf, (size_t)READ_OLD_BUFSIZE)) > 0) {
- ssize_t write_err = HDwrite(fd_new, buf, (size_t)nread);
- CHECK(write_err, -1, "HDwrite");
- } /* end while */
+ while((nread = HDread(fd_old, buf, (size_t)READ_OLD_BUFSIZE)) > 0) {
+ ssize_t write_err = HDwrite(fd_new, buf, (size_t)nread);
+ CHECK(write_err, -1, "HDwrite");
+ } /* end while */
/* Close the files */
ret = HDclose(fd_old);
@@ -3642,7 +4357,7 @@ test_filespace_compatible(void)
ret = HDclose(fd_new);
CHECK(ret, FAIL, "HDclose");
- /* Open the test file */
+ /* Open the temporary test file */
fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT);
CHECK(fid, FAIL, "H5Fopen");
@@ -3651,17 +4366,20 @@ test_filespace_compatible(void)
CHECK(free_space, FAIL, "H5Fget_freespace");
VERIFY(free_space, (hssize_t)0, "H5Fget_freespace");
- /* Get the file's file creation property list */
- /* Retrieve the file space handling strategy and threshold */
- fcpl = H5Fget_create_plist(fid);
- CHECK(fcpl, FAIL, "H5Fget_create_plist");
- ret = H5Pget_file_space(fcpl, &strategy, &threshold);
- CHECK(ret, FAIL, "H5Pget_file_space");
+ /* Get the file's file creation property list */
+ fcpl = H5Fget_create_plist(fid);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve the file space info */
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
- /* File space handling strategy should be H5F_FILE_SPACE_ALL = 2 */
- /* Free space section threshold should be 1 */
- VERIFY(strategy, 2, "H5Pget_file_space");
- VERIFY(threshold, 1, "H5Pget_file_space");
+ /* File space handling strategy should be H5F_FSPACE_STRATEGY_FSM_AGGR */
+ /* Persisting free-space should be FALSE */
+ /* Free-space section threshold should be 1 */
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
/* Generate raw data */
for(i = 0; i < 100; i++)
@@ -3685,13 +4403,13 @@ test_filespace_compatible(void)
ret = H5Ldelete(fid, DSETNAME, H5P_DEFAULT);
CHECK(ret, FAIL, "H5Ldelete");
- /* Close the plist */
- ret = H5Pclose(fcpl);
- CHECK(ret, FAIL, "H5Pclose");
+ /* Close the plist */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
- /* Close the file */
- ret = H5Fclose(fid);
- CHECK(ret, FAIL, "H5Fclose");
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
/* Re-Open the file */
fid = H5Fopen(FILE5, H5F_ACC_RDONLY, H5P_DEFAULT);
@@ -3714,6 +4432,383 @@ test_filespace_compatible(void)
/****************************************************************
**
+** test_filespace_1.10.0_compatible():
+** Verify that the latest file space management can open, read and
+** modify 1.10.0 HDF5 files :
+** h5fc_ext1_i.h5: H5F_FILE_SPACE_ALL, default threshold; has superblock extension but no fsinfo message
+** h5fc_ext1_f.h5: H5F_FILE_SPACE_ALL_PERSIST, default threshold; has superblock extension with fsinfo message
+** h5fc_ext2_if.h5: H5F_FILE_SPACE_ALL, non-default threshold; has superblock extension with fsinfo message
+** h5fc_ext2_sf.h5: H5F_FILE_SPACE_VFD, default threshold; has superblock extension with fsinfo message
+** h5fc_ext3_isf.h5: H5F_FILE_SPACE_AGGR_VFD, default threshold; has superblock extension with fsinfo message
+** h5fc_ext_none.h5: H5F_FILE_SPACE_ALL, default threshold; without superblock extension
+** The above files are copied from release 1.10.0 tools/h5format_convert/testfiles.
+**
+****************************************************************/
+static void
+test_filespace_1_10_0_compatible(void)
+{
+ hid_t fid = -1; /* File id */
+ hid_t did = -1; /* Dataset id */
+ hid_t fcpl; /* File creation property list */
+ hbool_t persist; /* Persist free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ H5F_fspace_strategy_t strategy; /* File space handling strategy */
+ int wbuf[24]; /* Buffer for dataset data */
+ int rdbuf[24]; /* Buffer for dataset data */
+ int status; /* Status from copying the existing file */
+ unsigned i, j; /* Local index variable */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("File space compatibility testing for 1.10.0 files\n"));
+
+ for(j = 0; j < NELMTS(OLD_1_10_0_FILENAME); j++) {
+ /* Make a copy of the test file */
+ status = h5_make_local_copy(OLD_1_10_0_FILENAME[j], FILE5);
+ CHECK(status, FAIL, "h5_make_local_copy");
+
+ /* Open the temporary test file */
+ fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Get the file's file creation property list */
+ fcpl = H5Fget_create_plist(fid);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ /* Retrieve the file space info */
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+
+ switch(j) {
+ case 0:
+ VERIFY(strategy, H5F_FILE_SPACE_STRATEGY_DEF, "H5Pget_file_space_strategy");
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/DSET_EA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for(i = 0; i < 24; i++)
+ wbuf[i] = (int)j+1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 1:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, TRUE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/DSET_NDATA_BT2", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for(i = 0; i < 24; i++)
+ wbuf[i] = (int)j+1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 2:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 2, "H5Pget_file_space_strategy");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/DSET_NONE", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for(i = 0; i < 24; i++)
+ wbuf[i] = (int)j+1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 3:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_NONE, "H5Pget_file_space_strategy");
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_EA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for(i = 0; i < 24; i++)
+ wbuf[i] = (int)j+1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 4:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_FA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for(i = 0; i < 24; i++)
+ wbuf[i] = (int)j+1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+ case 5:
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, H5F_FREE_SPACE_PERSIST_DEF, "H5Pget_file_space_strategy");
+ VERIFY(threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space_strategy");
+
+ /* Open the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_NONE", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ for(i = 0; i < 24; i++)
+ wbuf[i] = (int)j+1;
+
+ /* Write to the dataset */
+ ret = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, wbuf);
+ CHECK(ret, FAIL, "H5Dwrite");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ default:
+ break;
+ }
+
+ /* Close the plist */
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ /* Re-Open the file */
+ fid = H5Fopen(FILE5, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ switch(j) {
+ case 0:
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/DSET_EA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for(i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j+1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 1:
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/DSET_NDATA_BT2", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for(i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j+1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 2:
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/DSET_NONE", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for(i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j+1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 3:
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_EA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for(i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j+1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 4:
+
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_FA", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for(i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j+1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ case 5:
+
+ /* Open and read the dataset */
+ did = H5Dopen2(fid, "/GROUP/DSET_NDATA_NONE", H5P_DEFAULT);
+ CHECK(did, FAIL, "H5Dopen");
+
+ ret = H5Dread(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, &rdbuf);
+ CHECK(ret, FAIL, "H5Dread");
+
+ /* Verify the data read is correct */
+ for(i = 0; i < 24; i++)
+ VERIFY(rdbuf[i], j+1, "test_compatible");
+
+ /* Close the dataset */
+ ret = H5Dclose(did);
+ CHECK(ret, FAIL, "H5Dclose");
+ break;
+
+ default:
+ break;
+ }
+
+ /* Close the file */
+ ret = H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+} /* test_filespace_1_10_0_compatible */
+
+/****************************************************************
+**
+** test_filespace_round_compatible():
+** Verify that the trunk can open, read and modify these files--
+** 1) They are initially created (via gen_filespace.c) in the trunk
+** with combinations of file space strategies, default/non-default
+** threshold, and file spacing paging enabled/disbled.
+** The library creates the file space info message with
+** "mark if unknown" in these files.
+** 2) They are copied to the 1.8 branch, and are opened/read/modified
+** there via test_filespace_compatible() in test/tfile.c.
+** The 1.8 library marks the file space info message as "unknown"
+** in these files.
+** 3) They are then copied back from the 1.8 branch to the trunk for
+** compatibility testing via this routine.
+** 4) Upon encountering the file space info message which is marked
+** as "unknown", the library will use the default file space management
+** from then on: non-persistent free-space managers, default threshold,
+** and non-paging file space.
+**
+****************************************************************/
+static void
+test_filespace_round_compatible(void)
+{
+ hid_t fid = -1; /* File id */
+ hid_t fcpl = -1; /* File creation property list ID */
+ unsigned j; /* Local index variable */
+ H5F_fspace_strategy_t strategy; /* File space strategy */
+ hbool_t persist; /* Persist free-space or not */
+ hsize_t threshold; /* Free-space section threshold */
+ hssize_t free_space; /* Amount of free space in the file */
+ int status; /* Status from copying the existing file */
+ herr_t ret; /* Return value */
+
+ /* Output message about test being performed */
+ MESSAGE(5, ("File space compatibility testing for files from trunk to 1_8 to trunk\n"));
+
+ for(j = 0; j < NELMTS(FSPACE_FILENAMES); j++) {
+ /* Make a copy of the test file */
+ status = h5_make_local_copy(FSPACE_FILENAMES[j], FILE5);
+ CHECK(status, FAIL, "h5_make_local_copy");
+
+ /* Open the temporary test file */
+ fid = H5Fopen(FILE5, H5F_ACC_RDWR, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fopen");
+
+ /* Get the file's creation property list */
+ fcpl = H5Fget_create_plist(fid);
+ CHECK(fcpl, FAIL, "H5Fget_create_plist");
+
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+ VERIFY(strategy, H5F_FSPACE_STRATEGY_FSM_AGGR, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
+
+ /* There should not be any free space in the file */
+ free_space = H5Fget_freespace(fid);
+ CHECK(free_space, FAIL, "H5Fget_freespace");
+ VERIFY(free_space, (hssize_t)0, "H5Fget_freespace");
+
+ /* Closing */
+ ret = H5Fclose(fid);
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Fclose");
+ } /* end for */
+
+} /* test_filespace_round_compatible */
+
+
+/****************************************************************
+**
** test_libver_bounds_real():
** Verify that a file created and modified with the
** specified libver bounds has the specified object header
@@ -3950,6 +5045,9 @@ test_deprec(void)
{
hid_t file; /* File IDs for old & new files */
hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File creation property list */
+ hid_t new_fapl;
+ hsize_t align;
unsigned super; /* Superblock version # */
unsigned freelist; /* Free list version # */
unsigned stab; /* Symbol table entry version # */
@@ -4002,17 +5100,25 @@ test_deprec(void)
CHECK(fcpl, FAIL, "H5Pcreate");
/* Set a property in the FCPL that will push the superblock version up */
- ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_VFD, (hsize_t)0);
- CHECK(ret, FAIL, "H5Pset_file_space");
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
+ ret = H5Pset_file_space_page_size(fcpl, (hsize_t)512);
+ CHECK(ret, FAIL, "H5Pset_file_space_strategy");
+
+ fapl = H5Pcreate(H5P_FILE_ACCESS);
+ ret = H5Pset_alignment(fapl, (hsize_t)1, (hsize_t)1024);
+ CHECK(ret, FAIL, "H5Pset_alignment");
/* Creating a file with the non-default file creation property list should
* create a version 2 superblock
*/
/* Create file with custom file creation property list */
- file= H5Fcreate(FILE1, H5F_ACC_TRUNC , fcpl, H5P_DEFAULT);
+ file= H5Fcreate(FILE1, H5F_ACC_TRUNC , fcpl, fapl);
CHECK(file, FAIL, "H5Fcreate");
+ new_fapl = H5Fget_access_plist(file);
+ H5Pget_alignment(new_fapl, NULL, &align);
+
/* Close FCPL */
ret=H5Pclose(fcpl);
CHECK(ret, FAIL, "H5Pclose");
@@ -4020,7 +5126,7 @@ test_deprec(void)
/* Get the file's version information */
ret = H5Fget_info1(file, &finfo);
CHECK(ret, FAIL, "H5Fget_info1");
- VERIFY(finfo.super_ext_size, 40,"H5Fget_info1");
+ VERIFY(finfo.super_ext_size, 152,"H5Fget_info1");
VERIFY(finfo.sohm.hdr_size, 0,"H5Fget_info1");
VERIFY(finfo.sohm.msgs_info.index_size, 0,"H5Fget_info1");
VERIFY(finfo.sohm.msgs_info.heap_size, 0,"H5Fget_info1");
@@ -4052,7 +5158,7 @@ test_deprec(void)
/* Get the file's version information */
ret = H5Fget_info1(file, &finfo);
CHECK(ret, FAIL, "H5Fget_info1");
- VERIFY(finfo.super_ext_size, 40,"H5Fget_info1");
+ VERIFY(finfo.super_ext_size, 152,"H5Fget_info1");
VERIFY(finfo.sohm.hdr_size, 0,"H5Fget_info1");
VERIFY(finfo.sohm.msgs_info.index_size, 0,"H5Fget_info1");
VERIFY(finfo.sohm.msgs_info.heap_size, 0,"H5Fget_info1");
@@ -4076,6 +5182,81 @@ test_deprec(void)
/* Close file */
ret=H5Fclose(file);
CHECK(ret, FAIL, "H5Fclose");
+
+ { /* Test deprecated H5Pget/set_file_space() */
+
+ H5F_file_space_type_t old_strategy;
+ hsize_t old_threshold;
+ hid_t fid;
+ hid_t ffcpl;
+
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ CHECK(fcpl, FAIL, "H5Pcreate");
+
+ ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL, "H5Pget_file_space");
+ VERIFY(old_threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space");
+
+ /* Set file space strategy and free space section threshold */
+ ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+
+ /* Get the file space info from the creation property */
+ ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space");
+ VERIFY(old_threshold, H5F_FREE_SPACE_THRESHOLD_DEF, "H5Pget_file_space");
+
+ ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_DEFAULT, (hsize_t)3);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+
+ ret = H5Pget_file_space(fcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space");
+ VERIFY(old_threshold, 3, "H5Pget_file_space");
+
+ /* Create a file */
+ fid = H5Fcreate(FILE1, H5F_ACC_TRUNC , fcpl, H5P_DEFAULT);
+ CHECK(file, FAIL, "H5Fcreate");
+
+ old_strategy = H5F_FILE_SPACE_DEFAULT;
+ old_threshold = 0;
+ ffcpl = H5Fget_create_plist(fid);
+ ret = H5Pget_file_space(ffcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space");
+ VERIFY(old_threshold, 3, "H5Pget_file_space");
+
+ /* Close file */
+ ret=H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+
+ ret = H5Pclose(ffcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret = H5Pclose(fcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ /* Reopen the file */
+ fid = H5Fopen(FILE1, H5F_ACC_RDONLY, H5P_DEFAULT);
+ CHECK(fid, FAIL, "H5Fcreate");
+
+ old_strategy = H5F_FILE_SPACE_DEFAULT;
+ old_threshold = 0;
+ ffcpl = H5Fget_create_plist(fid);
+ ret = H5Pget_file_space(ffcpl, &old_strategy, &old_threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space");
+ VERIFY(old_strategy, H5F_FILE_SPACE_ALL_PERSIST, "H5Pget_file_space");
+ VERIFY(old_threshold, 3, "H5Pget_file_space");
+
+ ret = H5Pclose(ffcpl);
+ CHECK(ret, FAIL, "H5Pclose");
+
+ ret=H5Fclose(fid);
+ CHECK(ret, FAIL, "H5Fclose");
+ }
+
} /* test_deprec */
#endif /* H5_NO_DEPRECATED_SYMBOLS */
@@ -4087,40 +5268,58 @@ test_deprec(void)
void
test_file(void)
{
+ const char *env_h5_drvr; /* File Driver value from environment */
+
/* Output message about test being performed */
MESSAGE(5, ("Testing Low-Level File I/O\n"));
- test_file_create(); /* Test file creation(also creation templates)*/
- test_file_open(); /* Test file opening */
- test_file_reopen(); /* Test file reopening */
- test_file_close(); /* Test file close behavior */
- test_get_file_id(); /* Test H5Iget_file_id */
- test_get_obj_ids(); /* Test H5Fget_obj_ids for Jira Issue 8528 */
- test_file_perm(); /* Test file access permissions */
- test_file_perm2(); /* Test file access permission again */
- test_file_freespace(); /* Test file free space information */
- test_file_ishdf5(); /* Test detecting HDF5 files correctly */
- test_file_open_dot(); /* Test opening objects with "." for a name */
- test_file_open_overlap(); /* Test opening files in an overlapping manner */
- test_file_getname(); /* Test basic H5Fget_name() functionality */
- test_file_double_root_open(); /* Test opening root group from two files works properly */
- test_file_double_group_open(); /* Test opening same group from two files works properly */
- test_file_double_dataset_open(); /* Test opening same dataset from two files works properly */
- test_file_double_datatype_open(); /* Test opening same named datatype from two files works properly */
+ /* Get the VFD to use */
+ env_h5_drvr = HDgetenv("HDF5_DRIVER");
+ if(env_h5_drvr == NULL)
+ env_h5_drvr = "nomatch";
+
+ test_file_create(); /* Test file creation(also creation templates)*/
+ test_file_open(); /* Test file opening */
+ test_file_reopen(); /* Test file reopening */
+ test_file_close(); /* Test file close behavior */
+ test_get_file_id(); /* Test H5Iget_file_id */
+ test_get_obj_ids(); /* Test H5Fget_obj_ids for Jira Issue 8528 */
+ test_file_perm(); /* Test file access permissions */
+ test_file_perm2(); /* Test file access permission again */
+ test_file_ishdf5(); /* Test detecting HDF5 files correctly */
+ test_file_open_dot(); /* Test opening objects with "." for a name */
+ test_file_open_overlap(); /* Test opening files in an overlapping manner */
+ test_file_getname(); /* Test basic H5Fget_name() functionality */
+ test_file_double_root_open(); /* Test opening root group from two files works properly */
+ test_file_double_group_open(); /* Test opening same group from two files works properly */
+ test_file_double_dataset_open(); /* Test opening same dataset from two files works properly */
+ test_file_double_datatype_open(); /* Test opening same named datatype from two files works properly */
test_file_double_file_dataset_open(TRUE);
test_file_double_file_dataset_open(FALSE);
- test_userblock_file_size(); /* Tests that files created with a userblock have the correct size */
- test_cached_stab_info(); /* Tests that files are created with cached stab info in the superblock */
- test_rw_noupdate(); /* Test to ensure that RW permissions don't write the file unless dirtied */
- test_userblock_alignment(); /* Tests that files created with a userblock and alignment interact properly */
- test_filespace_sects(); /* Test file free space section information */
- test_filespace_info(); /* Test file creation public routines:H5Pget/set_file_space */
- test_filespace_compatible();/* Test compatibility for file space management */
- test_libver_bounds(); /* Test compatibility for file space management */
- test_libver_macros(); /* Test the macros for library version comparison */
- test_libver_macros2(); /* Test the macros for library version comparison */
+ test_userblock_file_size(); /* Tests that files created with a userblock have the correct size */
+ test_cached_stab_info(); /* Tests that files are created with cached stab info in the superblock */
+ test_rw_noupdate(); /* Test to ensure that RW permissions don't write the file unless dirtied */
+ test_userblock_alignment(); /* Tests that files created with a userblock and alignment interact properly */
+ test_userblock_alignment_paged(); /* Tests files created with a userblock and alignment (via paged aggregation) interact properly */
+ test_filespace_info(env_h5_drvr); /* Test file creation public routines: */
+ /* H5Pget/set_file_space_strategy() & H5Pget/set_file_space_page_size() */
+ /* Skipped testing for multi/split drivers */
+ test_file_freespace(env_h5_drvr); /* Test file public routine H5Fget_freespace() */
+ /* Skipped testing for multi/split drivers */
+ /* Setup for multi/split drivers are there already */
+ test_sects_freespace(env_h5_drvr, TRUE); /* Test file public routine H5Fget_free_sections() for new format */
+ /* Skipped testing for multi/split drivers */
+ /* Setup for multi/split drivers are there already */
+ test_sects_freespace(env_h5_drvr, FALSE); /* Test file public routine H5Fget_free_sections() */
+ /* Skipped testing for multi/split drivers */
+ test_filespace_compatible(); /* Test compatibility for file space management */
+ test_filespace_round_compatible(); /* Testing file space compatibility for files from trunk to 1_8 to trunk */
+ test_filespace_1_10_0_compatible(); /* Testing file space compatibility for files from release 1.10.0 */
+ test_libver_bounds(); /* Test compatibility for file space management */
+ test_libver_macros(); /* Test the macros for library version comparison */
+ test_libver_macros2(); /* Test the macros for library version comparison */
#ifndef H5_NO_DEPRECATED_SYMBOLS
- test_deprec(); /* Test deprecated routines */
+ test_deprec(); /* Test deprecated routines */
#endif /* H5_NO_DEPRECATED_SYMBOLS */
} /* test_file() */
diff --git a/test/tgenprop.c b/test/tgenprop.c
index e6f9692..0e6b5c9 100644
--- a/test/tgenprop.c
+++ b/test/tgenprop.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/th5o.c b/test/th5o.c
index c2c4034..4baac20 100644
--- a/test/th5o.c
+++ b/test/th5o.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/th5s.c b/test/th5s.c
index 664c23a..c63320a 100644
--- a/test/th5s.c
+++ b/test/th5s.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/theap.c b/test/theap.c
index 9c509a1..3c23025 100644
--- a/test/theap.c
+++ b/test/theap.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/tid.c b/test/tid.c
index 6570b1c..494ee60 100644
--- a/test/tid.c
+++ b/test/tid.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Test user-created identifiers (hid_t's) and identifier types. */
diff --git a/test/titerate.c b/test/titerate.c
index 3004d62..aad62c9 100644
--- a/test/titerate.c
+++ b/test/titerate.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/tmeta.c b/test/tmeta.c
index 43fdeac..7eeb493 100644
--- a/test/tmeta.c
+++ b/test/tmeta.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/tmisc.c b/test/tmisc.c
index f93500d..dc69e18 100644
--- a/test/tmisc.c
+++ b/test/tmisc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
@@ -1819,8 +1817,9 @@ test_misc11(void)
unsigned sym_lk; /* Symbol table B-tree leaf 'K' value */
unsigned nindexes; /* Shared message number of indexes */
H5F_info2_t finfo; /* global information about file */
- H5F_file_space_type_t strategy; /* File/free space strategy */
+ H5F_fspace_strategy_t strategy; /* File space strategy */
hsize_t threshold; /* Free-space section threshold */
+ hbool_t persist; /* To persist free-space or not */
herr_t ret; /* Generic return value */
/* Output message about test being performed */
@@ -1878,11 +1877,11 @@ test_misc11(void)
ret=H5Pset_shared_mesg_nindexes(fcpl,MISC11_NINDEXES);
CHECK(ret, FAIL, "H5Pset_shared_mesg");
- ret = H5Pset_file_space(fcpl, H5F_FILE_SPACE_VFD, (hsize_t)0);
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1);
CHECK(ret, FAIL, "H5Pset_file_space");
/* Creating a file with the non-default file creation property list should
- * create a version 1 superblock
+ * create a version 2 superblock
*/
/* Create file with custom file creation property list */
@@ -1942,10 +1941,11 @@ test_misc11(void)
CHECK(ret, FAIL, "H5Pget_shared_mesg_nindexes");
VERIFY(nindexes, MISC11_NINDEXES, "H5Pget_shared_mesg_nindexes");
- ret = H5Pget_file_space(fcpl, &strategy, &threshold);
- CHECK(ret, FAIL, "H5Pget_file_space");
- VERIFY(strategy, 4, "H5Pget_file_space");
- VERIFY(threshold, 1, "H5Pget_file_space");
+ ret = H5Pget_file_space_strategy(fcpl, &strategy, &persist, &threshold);
+ CHECK(ret, FAIL, "H5Pget_file_space_strategy");
+ VERIFY(strategy, 3, "H5Pget_file_space_strategy");
+ VERIFY(persist, FALSE, "H5Pget_file_space_strategy");
+ VERIFY(threshold, 1, "H5Pget_file_space_strategy");
/* Close file */
ret=H5Fclose(file);
diff --git a/test/trefer.c b/test/trefer.c
index 433239a..05c0130 100644
--- a/test/trefer.c
+++ b/test/trefer.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/trefstr.c b/test/trefstr.c
index dd76cc1..40ce344 100644
--- a/test/trefstr.c
+++ b/test/trefstr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/tselect.c b/test/tselect.c
index 9230d8b..85c21bb 100644
--- a/test/tselect.c
+++ b/test/tselect.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/tskiplist.c b/test/tskiplist.c
index f30948e..b9b00df 100644
--- a/test/tskiplist.c
+++ b/test/tskiplist.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/tsohm.c b/test/tsohm.c
index 9d56fcc..133b196 100644
--- a/test/tsohm.c
+++ b/test/tsohm.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/ttime.c b/test/ttime.c
index 16acd3e..2eb2117 100644
--- a/test/ttime.c
+++ b/test/ttime.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/ttsafe.c b/test/ttsafe.c
index bfd24ba..ae4d7c5 100644
--- a/test/ttsafe.c
+++ b/test/ttsafe.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/ttsafe.h b/test/ttsafe.h
index f2e9e86..6cf449f 100644
--- a/test/ttsafe.h
+++ b/test/ttsafe.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/ttsafe_acreate.c b/test/ttsafe_acreate.c
index cc3f405..42d0851 100644
--- a/test/ttsafe_acreate.c
+++ b/test/ttsafe_acreate.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/********************************************************************
diff --git a/test/ttsafe_cancel.c b/test/ttsafe_cancel.c
index 7f8cd53..4bbb326 100644
--- a/test/ttsafe_cancel.c
+++ b/test/ttsafe_cancel.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/********************************************************************
diff --git a/test/ttsafe_dcreate.c b/test/ttsafe_dcreate.c
index df51f72..65051c7 100644
--- a/test/ttsafe_dcreate.c
+++ b/test/ttsafe_dcreate.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/********************************************************************
diff --git a/test/ttsafe_error.c b/test/ttsafe_error.c
index a2f25db..889d64b 100644
--- a/test/ttsafe_error.c
+++ b/test/ttsafe_error.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/********************************************************************
diff --git a/test/ttst.c b/test/ttst.c
index a7c43a2..b26d582 100644
--- a/test/ttst.c
+++ b/test/ttst.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/test/tunicode.c b/test/tunicode.c
index 2bba9ae..85f5af0 100644
--- a/test/tunicode.c
+++ b/test/tunicode.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* Unicode test */
diff --git a/test/tvlstr.c b/test/tvlstr.c
index dbc3083..7b520f2 100644
--- a/test/tvlstr.c
+++ b/test/tvlstr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/tvltypes.c b/test/tvltypes.c
index d00519d..5121a66 100644
--- a/test/tvltypes.c
+++ b/test/tvltypes.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/***********************************************************
diff --git a/test/twriteorder.c b/test/twriteorder.c
new file mode 100644
index 0000000..0e1d0d4
--- /dev/null
+++ b/test/twriteorder.c
@@ -0,0 +1,461 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/***********************************************************
+*
+* Test program: twriteorder
+*
+* Test to verify that the write order is strictly consistent.
+* The SWMR feature requires that the order of write is strictly consistent.
+* "Strict consistency in computer science is the most stringent consistency
+* model. It says that a read operation has to return the result of the
+* latest write operation which occurred on that data item."--
+* (http://en.wikipedia.org/wiki/Linearizability#Definition_of_linearizability).
+* This is also an alternative form of what POSIX write require that after a
+* write operation has returned success, all reads issued afterward should
+* get the same data the write has written.
+*
+* Created: Albert Cheng, 2013/8/28.
+* Modified:
+*************************************************************/
+
+/***********************************************************
+*
+* Algorithm
+*
+* The test simulates what SWMR does by writing chained blocks and see if
+* they can be read back correctly.
+* There is a writer process and multiple read processes.
+* The file is divided into 2KB partitions. Then writer writes 1 chained
+* block, each of 1KB big, in each partition after the first partition.
+* Each chained block has this structure:
+* Byte 0-3: offset address of its child block. The last child uses 0 as NULL.
+* Byte 4-1023: some artificial data.
+* The child block address of Block 1 is NULL (0).
+* The child block address of Block 2 is the offset address of Block 1.
+* The child block address of Block n is the offset address of Block n-1.
+* After all n blocks are written, the offset address of Block n is written
+* to the offset 0 of the first partition.
+* Therefore, by the time the offset address of Block n is written to this
+* position, all n chain-linked blocks have been written.
+*
+* The other reader processes will try to read the address value at the
+* offset 0. The value is initially NULL(0). When it changes to non-zero,
+* it signifies the writer process has written all the chain-link blocks
+* and they are ready for the reader processes to access.
+*
+* If the system, in which the writer and reader processes run, the readers
+* will always get all chain-linked blocks correctly. If the order of write
+* is not maintained, some reader processes may found unexpect block data.
+*
+*************************************************************/
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#define DATAFILE "twriteorder.dat"
+/* #define READERS_MAX 10 */ /* max number of readers */
+#define BLOCKSIZE_DFT 1024 /* 1KB */
+#define PARTITION_DFT 2048 /* 2KB */
+#define NLINKEDBLOCKS_DFT 512 /* default 512 */
+#define SIZE_BLKADDR 4 /* expected sizeof blkaddr */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+
+/* type declarations */
+typedef enum part_t {
+ UC_READWRITE =0, /* both writer and reader */
+ UC_WRITER, /* writer only */
+ UC_READER /* reader only */
+} part_t;
+
+/* prototypes */
+int create_wo_file(void);
+int write_wo_file(void);
+int read_wo_file(void);
+void usage(const char *prog);
+int setup_parameters(int argc, char * const argv[]);
+int parse_option(int argc, char * const argv[]);
+
+/* Global Variable definitions */
+const char *progname_g="twriteorder"; /* program name */
+int write_fd_g;
+int blocksize_g, part_size_g, nlinkedblock_g;
+part_t launch_g;
+
+/* Function definitions */
+
+/* Show help page */
+void
+usage(const char *prog)
+{
+ fprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ fprintf(stderr, " OPTIONS\n");
+ fprintf(stderr, " -h Print a usage message and exit\n");
+ fprintf(stderr, " -l w|r launch writer or reader only. [default: launch both]\n");
+ fprintf(stderr, " -b N Block size [default: %d]\n", BLOCKSIZE_DFT);
+ fprintf(stderr, " -p N Partition size [default: %d]\n", PARTITION_DFT);
+ fprintf(stderr, " -n N Number of linked blocks [default: %d]\n", NLINKEDBLOCKS_DFT);
+ fprintf(stderr, " where N is an integer value\n");
+ fprintf(stderr, "\n");
+}
+
+/* Setup test parameters by parsing command line options.
+ * Setup default values if not set by options. */
+int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *cmd_options = "hb:l:n:p:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, cmd_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'b': /* number of planes to write/read */
+ if ((blocksize_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad blocksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((nlinkedblock_g = atoi(optarg)) < 2){
+ fprintf(stderr, "bad number of linked blocks %s, must be greater than 1.\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'p': /* number of planes to write/read */
+ if ((part_size_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad partition size %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'l': /* launch reader or writer only */
+ switch (*optarg) {
+ case 'r': /* reader only */
+ launch_g = UC_READER;
+ break;
+ case 'w': /* writer only */
+ launch_g = UC_WRITER;
+ break;
+ default:
+ fprintf(stderr, "launch value(%c) should be w or r only.\n", *optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ break;
+ }
+ printf("launch = %d\n", launch_g);
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ usage(progname_g);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* verify partition size must be >= blocksize */
+ if (part_size_g < blocksize_g ){
+ fprintf(stderr, "Blocksize %d should not be bigger than partition size %d\n",
+ blocksize_g, part_size_g);
+ Hgoto_error(-1);
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+}
+
+/* Setup parameters for the test case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* test case defaults */
+ blocksize_g = BLOCKSIZE_DFT;
+ part_size_g = PARTITION_DFT;
+ nlinkedblock_g = NLINKEDBLOCKS_DFT;
+ launch_g = UC_READWRITE;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+
+ /* show parameters and return */
+ printf("blocksize = %ld\n", (long)blocksize_g);
+ printf("part_size = %ld\n", (long)part_size_g);
+ printf("nlinkedblock = %ld\n", (long)nlinkedblock_g);
+ printf("launch = %d\n", launch_g);
+ return(0);
+}
+
+/* Create the test file with initial "empty" file, that is,
+ * partition 0 has a null (0) address.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int create_wo_file(void)
+{
+ int blkaddr=0; /* blkaddress of next linked block */
+ int ret_code;
+
+ /* Create the data file */
+ if ((write_fd_g = HDopen(DATAFILE, O_RDWR|O_TRUNC|O_CREAT, 0664)) < 0) {
+ printf("WRITER: error from open\n");
+ return -1;
+ }
+ blkaddr=0;
+ /* write it to partition 0 */
+ if ((ret_code=HDwrite(write_fd_g, &blkaddr, (size_t)SIZE_BLKADDR)) != SIZE_BLKADDR){
+ printf("blkaddr write failed\n");
+ return -1;
+ }
+
+ /* File initialized, return success */
+ return 0;
+}
+
+int write_wo_file(void)
+{
+ int blkaddr;
+ int blkaddr_old=0;
+ int i;
+ char buffer[BLOCKSIZE_DFT];
+ int ret_code;
+
+
+ /* write block 1, 2, ... */
+ for (i=1; i<nlinkedblock_g; i++){
+ /* calculate where to write this block */
+ blkaddr = i*part_size_g + i;
+ /* store old block address in byte 0-3 */
+ HDmemcpy(&buffer[0], &blkaddr_old, sizeof(blkaddr_old));
+ /* fill the rest with the lowest byte of i */
+ HDmemset(&buffer[4], i & 0xff, (size_t) (BLOCKSIZE_DFT-4));
+ /* write the block */
+#ifdef DEBUG
+ printf("writing block at %d\n", blkaddr);
+#endif
+ HDlseek(write_fd_g, (HDoff_t)blkaddr, SEEK_SET);
+ if ((ret_code=HDwrite(write_fd_g, buffer, (size_t)blocksize_g)) != blocksize_g){
+ printf("blkaddr write failed in partition %d\n", i);
+ return -1;
+ }
+ blkaddr_old = blkaddr;
+ }
+ /* write the last blkaddr in partition 0 */
+ HDlseek(write_fd_g, (HDoff_t)0, SEEK_SET);
+ if ((ret_code=HDwrite(write_fd_g, &blkaddr_old, (size_t)sizeof(blkaddr_old))) != sizeof(blkaddr_old)){
+ printf("blkaddr write failed in partition %d\n", 0);
+ return -1;
+ }
+
+ /* all writes done. return succeess. */
+#ifdef DEBUG
+ printf("wrote %d blocks\n", nlinkedblock_g);
+#endif
+ return 0;
+}
+
+int read_wo_file(void)
+{
+ int read_fd;
+ int blkaddr=0;
+ int ret_code;
+ int linkedblocks_read=0;
+ char buffer[BLOCKSIZE_DFT];
+
+ /* Open the data file */
+ if ((read_fd = HDopen(DATAFILE, O_RDONLY, 0)) < 0) {
+ printf("READER: error from open\n");
+ return -1;
+ }
+ /* keep reading the initial block address until it is non-zero before proceeding. */
+ while (blkaddr == 0){
+ HDlseek(read_fd, (HDoff_t)0, SEEK_SET);
+ if ((ret_code=HDread(read_fd, &blkaddr, (size_t)sizeof(blkaddr))) != sizeof(blkaddr)){
+ printf("blkaddr read failed in partition %d\n", 0);
+ return -1;
+ }
+ }
+ linkedblocks_read++;
+
+ /* got a non-zero blkaddr. Proceed down the linked blocks. */
+#ifdef DEBUG
+ printf("got initial block address=%d\n", blkaddr);
+#endif
+ while (blkaddr != 0){
+ HDlseek(read_fd, (HDoff_t)blkaddr, SEEK_SET);
+ if ((ret_code=HDread(read_fd, buffer, (size_t)blocksize_g)) != blocksize_g){
+ printf("blkaddr read failed in partition %d\n", 0);
+ return -1;
+ }
+ linkedblocks_read++;
+ /* retrieve the block address in byte 0-3 */
+ HDmemcpy(&blkaddr, &buffer[0], sizeof(blkaddr));
+#ifdef DEBUG
+ printf("got next block address=%d\n", blkaddr);
+#endif
+ }
+
+#ifdef DEBUG
+ printf("read %d blocks\n", linkedblocks_read);
+#endif
+ return 0;
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created the test file needed and close it;
+ * fork: child processes become the reader processes;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ /*pid_t childpid[READERS_MAX];
+ int child_ret_value[READERS_MAX];*/
+ pid_t childpid=0;
+ int child_ret_value;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (launch_g != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_wo_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+ /* flush output before possible fork */
+ HDfflush(stdout);
+
+ if (launch_g==UC_READWRITE){
+ /* fork process */
+ if((childpid = fork()) < 0) {
+ perror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = getpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (launch_g != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_wo_file() < 0){
+ fprintf(stderr, "read_wo_file encountered error\n");
+ exit(1);
+ }
+ /* Reader is done. Clean up by removing the data file */
+ HDremove(DATAFILE);
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+#ifdef DEBUG
+ printf("%d: continue as the writer process\n", mypid);
+#endif
+ if (write_wo_file() < 0){
+ fprintf(stderr, "write_wo_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (launch_g == UC_READWRITE){
+ if ((tmppid = waitpid(childpid, &child_status, child_wait_option)) < 0){
+ perror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
+
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/unlink.c b/test/unlink.c
index 660a155..f5754f8 100644
--- a/test/unlink.c
+++ b/test/unlink.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -2440,9 +2438,9 @@ int
main(void)
{
hid_t fapl, fapl2, file;
- int nerrors = 0;
+ int nerrors = 0;
char filename[1024];
- unsigned new_format;
+ unsigned new_format;
/* Metadata cache parameters */
int mdc_nelmts;
diff --git a/test/unregister.c b/test/unregister.c
index 878270f..a4bf24e 100644
--- a/test/unregister.c
+++ b/test/unregister.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Programmer: Raymond Lu
diff --git a/test/use.h b/test/use.h
new file mode 100644
index 0000000..0885cc5
--- /dev/null
+++ b/test/use.h
@@ -0,0 +1,63 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * Use Case Header file: common definitions for use cases tests.
+ */
+#include "h5test.h"
+
+/* Macro definitions */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+#define Hgoto_done {goto done;}
+#define Chunksize_DFT 256 /* chunksize default */
+#define ErrorReportMax 10 /* max number of errors reported */
+/* these two definitions must match each other */
+#define UC_DATATYPE H5T_NATIVE_SHORT /* use case HDF5 data type */
+#define UC_CTYPE short /* use case C data type */
+#define UC_RANK 3 /* use case dataset rank */
+
+/* Name of message file that is sent by the writer */
+#define WRITER_MESSAGE "USE_WRITER_MESSAGE"
+
+/* type declarations */
+typedef enum part_t {
+ UC_READWRITE =0, /* both writer and reader */
+ UC_WRITER, /* writer only */
+ UC_READER /* reader only */
+} part_t;
+
+typedef struct options_t {
+ hsize_t chunksize; /* chunks are chunksize^2 planes */
+ hsize_t chunkplanes; /* number of planes per chunk, default 1 */
+ hsize_t chunkdims[UC_RANK]; /* chunk dims is (chunkplan, chunksize, chunksize) */
+ hsize_t dims[UC_RANK]; /* dataset initial dims */
+ hsize_t max_dims[UC_RANK]; /* dataset max dims */
+ hsize_t nplanes; /* number of planes to write, default proportional to chunksize */
+ char *filename; /* use case data filename */
+ part_t launch; /* launch writer, reader or both */
+ hbool_t use_swmr; /* use swmr open (1) or not */
+ int iterations; /* iterations, default 1 */
+} options_t;
+
+/* global variables declarations */
+extern options_t UC_opts; /* Use Case Options */
+extern const char *progname_g; /* Program name */
+
+/* prototype declarations */
+int parse_option(int argc, char * const argv[]);
+int setup_parameters(int argc, char * const argv[]);
+void show_parameters(void);
+void usage(const char *prog);
+int create_uc_file(void);
+int write_uc_file(hbool_t tosend);
+int read_uc_file(hbool_t towait);
+
diff --git a/test/use_append_chunk.c b/test/use_append_chunk.c
new file mode 100644
index 0000000..005eb51
--- /dev/null
+++ b/test/use_append_chunk.c
@@ -0,0 +1,231 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Use Case 1.7 Appending a single chunk
+ * Description:
+ * Appending a single chunk of raw data to a dataset along an unlimited
+ * dimension within a pre-created file and reading the new data back.
+ * Goal:
+ * Read data appended by the Writer to a pre-existing dataset in a
+ * file. The dataset has one or more unlimited dimensions. The data is
+ * appended by a hyperslab that is contained in one chunk (for example,
+ * appending 2-dim planes along the slowest changing dimension in the
+ * 3-dim dataset).
+ * Level:
+ * User Level
+ * Guarantees:
+ * o Readers will see the modified dimension sizes after the Writer
+ * finishes HDF5 metadata updates and issues H5Fflush or H5Oflush calls.
+ * o Readers will see newly appended data after the Writer finishes
+ * the flush operation.
+ *
+ * Preconditions:
+ * o Readers are not allowed to modify the file. o All datasets
+ * that are modified by the Writer exist when the Writer opens the file.
+ * o All datasets that are modified by the Writer exist when a Reader
+ * opens the file. o Data is written by a hyperslab contained in
+ * one chunk.
+ *
+ * Main Success Scenario:
+ * 1. An application creates a file with required objects (groups,
+ * datasets, and attributes).
+ * 2. The Writer application opens the file and datasets in the file
+ * and starts adding data along the unlimited dimension using a hyperslab
+ * selection that corresponds to an HDF5 chunk.
+ * 3. A Reader opens the file and a dataset in a file, and queries
+ * the sizes of the dataset; if the extent of the dataset has changed,
+ * reads the appended data back.
+ *
+ * Discussion points:
+ * 1. Since the new data is written to the file, and metadata update
+ * operation of adding pointer to the newly written chunk is atomic and
+ * happens after the chunk is on the disk, only two things may happen
+ * to the Reader:
+ * o The Reader will not see new data.
+ * o The Reader will see all new data written by Writer.
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Created: Albert Cheng, 2013/5/28 */
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#include "use.h"
+
+/* Global Variable definitions */
+options_t UC_opts; /* Use Case Options */
+const char *progname_g="use_append_chunk"; /* program name */
+
+/* Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ HDmemset(&UC_opts, 0, sizeof(options_t));
+ UC_opts.chunksize = Chunksize_DFT;
+ UC_opts.use_swmr = TRUE; /* use swmr open */
+ UC_opts.iterations = 1;
+ UC_opts.chunkplanes = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0)
+ return(-1);
+
+ /* set chunk dims */
+ UC_opts.chunkdims[0] = UC_opts.chunkplanes;
+ UC_opts.chunkdims[1] = UC_opts.chunkdims[2] = UC_opts.chunksize;
+
+ /* set dataset initial and max dims */
+ UC_opts.dims[0] = 0;
+ UC_opts.max_dims[0] = H5S_UNLIMITED;
+ UC_opts.dims[1] = UC_opts.dims[2] = UC_opts.max_dims[1] = UC_opts.max_dims[2] = UC_opts.chunksize;
+
+ /* set nplanes */
+ if (UC_opts.nplanes == 0)
+ UC_opts.nplanes = (hsize_t)UC_opts.chunksize;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * fork: child process becomes the reader process;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ pid_t childpid=0;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+ int child_ret_value;
+ hbool_t send_wait = FALSE;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* Determine the need to send/wait message file*/
+ if(UC_opts.launch == UC_READWRITE) {
+ HDunlink(WRITER_MESSAGE);
+ send_wait = TRUE;
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (UC_opts.launch != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_uc_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+
+ if (UC_opts.launch==UC_READWRITE){
+ /* fork process */
+ if((childpid = HDfork()) < 0) {
+ HDperror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = HDgetpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (UC_opts.launch != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_uc_file(send_wait) < 0){
+ fprintf(stderr, "read_uc_file encountered error\n");
+ exit(1);
+ }
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+ printf("%d: continue as the writer process\n", mypid);
+ if (write_uc_file(send_wait) < 0){
+ fprintf(stderr, "write_uc_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (UC_opts.launch == UC_READWRITE){
+ if ((tmppid = HDwaitpid(childpid, &child_status, child_wait_option)) < 0){
+ HDperror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
+
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/use_append_mchunks.c b/test/use_append_mchunks.c
new file mode 100644
index 0000000..9ee37cb
--- /dev/null
+++ b/test/use_append_mchunks.c
@@ -0,0 +1,224 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Use Case 1.8 Appending a hyperslab of multiple chunks.
+ * Description:
+ * Appending a hyperslab that spans several chunks of a dataset with
+ * unlimited dimensions within a pre-created file and reading the new
+ * data back.
+ * Goal:
+ * Read data appended by the Writer to a pre-existing dataset in a
+ * file. The dataset has one or more unlimited dimensions. The data
+ * is appended by a hyperslab that is contained in several chunks (for
+ * example, appending 2-dim planes along the slowest changing dimension
+ * in the 3-dim dataset and each plane is covered by 4 chunks).
+ * Level:
+ * User Level
+ * Guarantees:
+ * o Readers will see the modified dimension sizes after the Writer
+ * finishes HDF5 metadata updates and issues H5Fflush or H5Oflush calls.
+ * o Readers will see newly appended data after the Writer finishes
+ * the flush operation.
+ *
+ * Preconditions:
+ * o Readers are not allowed to modify the file.
+ * o All datasets that are modified by the Writer exist when the
+ * Writer opens the file.
+ * o All datasets that are modified by the Writer exist when a Reader
+ * opens the file.
+ *
+ * Main Success Scenario:
+ * 1. An application creates a file with required objects (groups,
+ * datasets, and attributes).
+ * 2. The Writer opens the file and datasets in the file and starts
+ * adding data using H5Dwrite call with a hyperslab selection that
+ * spans several chunks.
+ * 3. A Reader opens the file and a dataset in a file; if the size of
+ * the unlimited dimension has changed, reads the appended data back.
+ *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Created: Albert Cheng, 2013/6/1 */
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#include "use.h"
+
+/* Global Variable definitions */
+options_t UC_opts; /* Use Case Options */
+const char *progname_g="use_append_mchunks"; /* program name */
+
+/* Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+int setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ HDmemset(&UC_opts, 0, sizeof(options_t));
+ UC_opts.chunksize = Chunksize_DFT;
+ UC_opts.use_swmr = 1; /* use swmr open */
+ UC_opts.iterations = 1;
+ UC_opts.chunkplanes = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+ /* set chunk dims */
+ UC_opts.chunkdims[0] = (hsize_t)UC_opts.chunkplanes;
+ UC_opts.chunkdims[1] = UC_opts.chunkdims[2] = (hsize_t)UC_opts.chunksize;
+
+ /* set dataset initial and max dims */
+ UC_opts.dims[0] = 0;
+ UC_opts.max_dims[0] = H5S_UNLIMITED;
+ UC_opts.dims[1] = UC_opts.dims[2] = UC_opts.max_dims[1] = UC_opts.max_dims[2] = 2 * (hsize_t)UC_opts.chunksize;
+
+ /* set nplanes */
+ if (UC_opts.nplanes == 0)
+ UC_opts.nplanes = 2 * (hsize_t)UC_opts.chunksize;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+}
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * fork: child process becomes the reader process;
+ * while parent process continues as the writer process;
+ * both run till ending conditions are met.
+ */
+int
+main(int argc, char *argv[])
+{
+ pid_t childpid=0;
+ pid_t mypid, tmppid;
+ int child_status;
+ int child_wait_option=0;
+ int ret_value = 0;
+ int child_ret_value;
+ hbool_t send_wait = 0;
+
+ /* initialization */
+ if (setup_parameters(argc, argv) < 0){
+ Hgoto_error(1);
+ }
+
+ /* Determine the need to send/wait message file*/
+ if(UC_opts.launch == UC_READWRITE) {
+ HDunlink(WRITER_MESSAGE);
+ send_wait = 1;
+ }
+
+ /* ==============================================================*/
+ /* UC_READWRITE: create datafile, launch both reader and writer. */
+ /* UC_WRITER: create datafile, skip reader, launch writer. */
+ /* UC_READER: skip create, launch reader, exit. */
+ /* ==============================================================*/
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ if (UC_opts.launch != UC_READER){
+ printf("Creating skeleton data file for test...\n");
+ if (create_uc_file() < 0){
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ }else
+ printf("File created.\n");
+ }
+
+ if (UC_opts.launch==UC_READWRITE){
+ /* fork process */
+ if((childpid = fork()) < 0) {
+ perror("fork");
+ Hgoto_error(1);
+ };
+ };
+ mypid = getpid();
+
+ /* ============= */
+ /* launch reader */
+ /* ============= */
+ if (UC_opts.launch != UC_WRITER){
+ /* child process launch the reader */
+ if(0 == childpid) {
+ printf("%d: launch reader process\n", mypid);
+ if (read_uc_file(send_wait) < 0){
+ fprintf(stderr, "read_uc_file encountered error\n");
+ exit(1);
+ }
+ exit(0);
+ }
+ }
+
+ /* ============= */
+ /* launch writer */
+ /* ============= */
+ /* this process continues to launch the writer */
+ printf("%d: continue as the writer process\n", mypid);
+ if (write_uc_file(send_wait) < 0){
+ fprintf(stderr, "write_uc_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+ /* ================================================ */
+ /* If readwrite, collect exit code of child process */
+ /* ================================================ */
+ if (UC_opts.launch == UC_READWRITE){
+ if ((tmppid = waitpid(childpid, &child_status, child_wait_option)) < 0){
+ perror("waitpid");
+ Hgoto_error(1);
+ }
+ if (WIFEXITED(child_status)){
+ if ((child_ret_value=WEXITSTATUS(child_status)) != 0){
+ printf("%d: child process exited with non-zero code (%d)\n",
+ mypid, child_ret_value);
+ Hgoto_error(2);
+ }
+ } else {
+ printf("%d: child process terminated abnormally\n", mypid);
+ Hgoto_error(2);
+ }
+ }
+
+done:
+ /* Print result and exit */
+ if (ret_value != 0){
+ printf("Error(s) encountered\n");
+ }else{
+ printf("All passed\n");
+ }
+
+ return(ret_value);
+}
+
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/use_common.c b/test/use_common.c
new file mode 100644
index 0000000..908cac9
--- /dev/null
+++ b/test/use_common.c
@@ -0,0 +1,651 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#include "use.h"
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#include "H5Dpkg.h"
+
+void
+usage(const char *prog)
+{
+ HDfprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ HDfprintf(stderr, " OPTIONS\n");
+ HDfprintf(stderr, " -h, --help Print a usage message and exit\n");
+ HDfprintf(stderr, " -f FN Test file name [default: %s.h5]\n", prog);
+ HDfprintf(stderr, " -i N, --iteration=N Number of iterations to repeat the whole thing. [default: 1]\n");
+ HDfprintf(stderr, " -l w|r launch writer or reader only. [default: launch both]\n");
+ HDfprintf(stderr, " -n N, --nplanes=N Number of planes to write/read. [default: 1000]\n");
+ HDfprintf(stderr, " -s N, --swmr=N Use SWMR mode (0: no, non-0: yes) default is yes\n");
+ HDfprintf(stderr, " -z N, --chunksize=N Chunk size [default: %d]\n", Chunksize_DFT);
+ HDfprintf(stderr, " -y N, --chunkplanes=N Number of planes per chunk [default: 1]\n");
+ HDfprintf(stderr, "\n");
+} /* end usage() */
+
+/* Setup Use Case parameters by parsing command line options.
+* Setup default values if not set by options. */
+int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *nagg_options = "f:hi:l:n:s:y:z:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, nagg_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'f': /* usecase data file name */
+ UC_opts.filename = optarg;
+ break;
+ case 'i': /* iterations */
+ if ((UC_opts.iterations = HDatoi(optarg)) <= 0) {
+ fprintf(stderr, "bad iterations number %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'l': /* launch reader or writer only */
+ switch (*optarg) {
+ case 'r': /* reader only */
+ UC_opts.launch = UC_READER;
+ break;
+ case 'w': /* writer only */
+ UC_opts.launch = UC_WRITER;
+ break;
+ default:
+ fprintf(stderr, "launch value(%c) should be w or r only.\n", *optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ break;
+ }
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((UC_opts.nplanes = HDstrtoul(optarg, NULL, 0)) <= 0) {
+ fprintf(stderr, "bad number of planes %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 's': /* use swmr file open mode */
+ if ((UC_opts.use_swmr = HDatoi(optarg)) < 0) {
+ fprintf(stderr, "swmr value should be 0(no) or 1(yes)\n");
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'y': /* Number of planes per chunk */
+ if ((UC_opts.chunkplanes = HDstrtoul(optarg, NULL, 0)) <= 0) {
+ fprintf(stderr, "bad number of planes per chunk %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'z': /* size of chunk=(z,z) */
+ if ((UC_opts.chunksize = HDstrtoull(optarg, NULL, 0)) <= 0) {
+ fprintf(stderr, "bad chunksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* set test file name if not given */
+ if (!UC_opts.filename){
+ /* default data file name is <progname>.h5 */
+ if ((UC_opts.filename=(char*)HDmalloc(HDstrlen(progname_g)+4))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ Hgoto_error(-1);
+ };
+ HDstrcpy(UC_opts.filename, progname_g);
+ HDstrcat(UC_opts.filename, ".h5");
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+}
+
+/* Show parameters used for this use case */
+void show_parameters(void){
+ printf("===Parameters used:===\n");
+ printf("chunk dims=(%llu, %llu, %llu)\n", (unsigned long long)UC_opts.chunkdims[0],
+ (unsigned long long)UC_opts.chunkdims[1], (unsigned long long)UC_opts.chunkdims[2]);
+ printf("dataset max dims=(%llu, %llu, %llu)\n", (unsigned long long)UC_opts.max_dims[0],
+ (unsigned long long)UC_opts.max_dims[1], (unsigned long long)UC_opts.max_dims[2]);
+ printf("number of planes to write=%llu\n", (unsigned long long)UC_opts.nplanes);
+ printf("using SWMR mode=%s\n", UC_opts.use_swmr ? "yes(1)" : "no(0)");
+ printf("data filename=%s\n", UC_opts.filename);
+ printf("launch part=");
+ switch (UC_opts.launch){
+ case UC_READWRITE:
+ printf("Reader/Writer\n");
+ break;
+ case UC_WRITER:
+ printf("Writer\n");
+ break;
+ case UC_READER:
+ printf("Reader\n");
+ break;
+ default:
+ /* should not happen */
+ printf("Illegal part(%d)\n", UC_opts.launch);
+ };
+ printf("number of iterations=%d (not used yet)\n", UC_opts.iterations);
+ printf("===Parameters shown===\n");
+}
+
+/* Create the skeleton use case file for testing.
+ * It has one 3d dataset using chunked storage.
+ * The dataset is (unlimited, chunksize, chunksize).
+ * Dataset type is 2 bytes integer.
+ * It starts out "empty", i.e., first dimension is 0.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int create_uc_file(void)
+{
+ hsize_t dims[3]; /* Dataset starting dimensions */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t dsid; /* Dataset ID */
+ hid_t fapl; /* File access property list */
+ H5D_chunk_index_t idx_type; /* Chunk index type */
+
+ /* Create the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fcreate(UC_opts.filename, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ return -1;
+
+ /* Set up dimension sizes */
+ dims[0] = 0;
+ dims[1] = dims[2] = UC_opts.max_dims[1];
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(3, dims, UC_opts.max_dims)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 3, UC_opts.chunkdims) < 0)
+ return -1;
+
+ /* create dataset of progname */
+ if((dsid = H5Dcreate2(fid, progname_g, UC_DATATYPE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Check that the chunk index type is not version 1 B-tree.
+ * Version 1 B-trees are not supported under SWMR.
+ */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0)
+ return -1;
+ if(idx_type == H5D_CHUNK_IDX_BTREE) {
+ fprintf(stderr, "ERROR: Chunk index is version 1 B-tree: aborting.\n");
+ return -1;
+ }
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ if(H5Pclose(fapl) < 0)
+ return -1;
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+}
+
+/* Append planes, each of (1,2*chunksize,2*chunksize) to the dataset.
+ * In other words, 4 chunks are appended to the dataset at a time.
+ * Fill each plan with the plane number and then write it at the nth plane.
+ * Increase the plane number and repeat till the end of dataset, when it
+ * reaches chunksize long. End product is a (2*chunksize)^3 cube.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int write_uc_file(hbool_t tosend)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* data buffer */
+ hsize_t cz=UC_opts.chunksize; /* Chunk size */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t chunk_dims[3]; /* Chunk dimensions */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hsize_t i, j, k;
+
+ name = UC_opts.filename;
+
+ /* Open the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if(UC_opts.use_swmr)
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDWR | (UC_opts.use_swmr ? H5F_ACC_SWMR_WRITE : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+
+ if(tosend)
+ /* Send a message that H5Fopen is complete--releasing the file lock */
+ h5_send_message(WRITER_MESSAGE, NULL, NULL);
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* Find chunksize used */
+ if ((dcpl = H5Dget_create_plist(dsid)) < 0){
+ fprintf(stderr, "H5Dget_create_plist failed\n");
+ return -1;
+ }
+ if (H5D_CHUNKED != H5Pget_layout(dcpl)){
+ fprintf(stderr, "storage layout is not chunked\n");
+ return -1;
+ }
+ if ((rank = H5Pget_chunk(dcpl, 3, chunk_dims)) != 3){
+ fprintf(stderr, "storage rank is not 3\n");
+ return -1;
+ }
+
+ /* verify chunk_dims against set paramenters */
+ if (chunk_dims[0]!=UC_opts.chunkdims[0] || chunk_dims[1] != cz || chunk_dims[2] != cz){
+ fprintf(stderr, "chunk size is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)chunk_dims[0], (unsigned long long)chunk_dims[1],
+ (unsigned long long)chunk_dims[2]);
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = UC_opts.dims[1];
+ memdims[2] = UC_opts.dims[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[0] != 0 || dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset is not empty. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* write planes */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ for (i=0; i<UC_opts.nplanes; i++){
+ /* fill buffer with value i+1 */
+ bufptr = buffer;
+ for (j=0; j<dims[1]; j++)
+ for (k=0; k<dims[2]; k++)
+ *bufptr++ = (UC_CTYPE)i;
+
+ /* Cork the dataset's metadata in the cache, if SWMR is enabled */
+ if(UC_opts.use_swmr)
+ if(H5Odisable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "H5Odisable_mdc_flushes failed\n");
+ return -1;
+ }
+
+ /* extend the dataset by one for new plane */
+ dims[0]=i+1;
+ if(H5Dset_extent(dsid, dims) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ start[0]=i;
+ /* Choose the next plane to write */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "Failed H5Sselect_hyperslab\n");
+ return -1;
+ }
+
+ /* Write plane to the dataset */
+ if(H5Dwrite(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "Failed H5Dwrite\n");
+ return -1;
+ }
+
+ /* Uncork the dataset's metadata from the cache, if SWMR is enabled */
+ if(UC_opts.use_swmr)
+ if(H5Oenable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "H5Oenable_mdc_flushes failed\n");
+ return -1;
+ }
+
+ /* flush file to make the just written plane available. */
+ if(H5Dflush(dsid) < 0)
+ {
+ fprintf(stderr, "Failed to H5Fflush file\n");
+ return -1;
+ }
+ }
+
+ /* Done writing. Free/Close all resources including data file */
+ HDfree(buffer);
+ if (H5Dclose(dsid) < 0){
+ fprintf(stderr, "Failed to close datasete\n");
+ return -1;
+ }
+ if (H5Sclose(m_sid) < 0){
+ fprintf(stderr, "Failed to close memory space\n");
+ return -1;
+ }
+ if (H5Sclose(f_sid) < 0){
+ fprintf(stderr, "Failed to close file space\n");
+ return -1;
+ }
+ if (H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+ if (H5Fclose(fid) < 0){
+ fprintf(stderr, "Failed to close file id\n");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/* Read planes from the dataset.
+ * It expects the dataset is being changed (growing).
+ * It checks the unlimited dimension (1st one). When it increases,
+ * it will read in the new planes, one by one, and verify the data correctness.
+ * (The nth plan should contain all "n".)
+ * When the unlimited dimension grows to the chunksize (it becomes a cube),
+ * that is the expected end of data, the reader exits.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+int read_uc_file(hbool_t towait)
+{
+ hid_t fapl; /* file access property list ID */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* read data buffer */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t nplane=0, nplane_old=0; /* nth plane, last nth plane */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hsize_t j, k;
+ int nreadererr=0;
+ int nerrs;
+ int nonewplane;
+
+ /* Before reading, wait for the message that H5Fopen is complete--file lock is released */
+ if(towait && h5_wait_message(WRITER_MESSAGE) < 0) {
+ fprintf(stderr, "Cannot find writer message file...failed\n");
+ return -1;
+ }
+
+ name = UC_opts.filename;
+
+ /* Open the file */
+ if((fapl = h5_fileaccess()) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDONLY | (UC_opts.use_swmr ? H5F_ACC_SWMR_READ : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+ if (H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = UC_opts.dims[1];
+ memdims[2] = UC_opts.dims[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ * Verify dimension is as expected (unlimited,2*chunksize,2*chunksize).
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset dimension is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ fprintf(stderr, "But memdims=(%llu,%llu,%llu)\n",
+ (unsigned long long)memdims[0], (unsigned long long)memdims[1],
+ (unsigned long long)memdims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* Read 1 plane at a time whenever the dataset grows larger
+ * (along dim[0]) */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ /* quit when all nplanes have been read */
+ nonewplane=0;
+ while (nplane_old < UC_opts.nplanes ){
+ /* print progress message according to if new planes are availalbe */
+ if (nplane_old < dims[0]) {
+ if (nonewplane){
+ /* end the previous message */
+ printf("\n");
+ nonewplane=0;
+ }
+ printf("reading planes %llu to %llu\n", (unsigned long long)nplane_old,
+ (unsigned long long)dims[0]);
+ }else{
+ if (nonewplane){
+ printf(".");
+ if (nonewplane>=30){
+ fprintf(stderr, "waited too long for new plane, quit.\n");
+ return -1;
+ }
+ }else{
+ /* print mesg only the first time; dots still no new plane */
+ printf("no new planes to read ");
+ }
+ nonewplane++;
+ /* pause for a second */
+ HDsleep(1);
+ }
+ for (nplane=nplane_old; nplane < dims[0]; nplane++){
+ /* read planes between last old nplanes and current extent */
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dget_space failed\n");
+ return -1;
+ }
+
+ start[0]=nplane;
+ /* Choose the next plane to read */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "H5Sselect_hyperslab failed\n");
+ return -1;
+ }
+
+ /* Read the plane from the dataset */
+ if(H5Dread(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "H5Dread failed\n");
+ return -1;
+ }
+
+ /* compare read data with expected data value which is nplane */
+ bufptr = buffer;
+ nerrs=0;
+ for (j=0; j<dims[1]; j++){
+ for (k=0; k<dims[2]; k++){
+ if ((hsize_t)*bufptr++ != nplane){
+ if (++nerrs < ErrorReportMax){
+ fprintf(stderr,
+ "found error %llu plane(%llu,%llu), expected %llu, got %d\n",
+ (unsigned long long)nplane, (unsigned long long)j,
+ (unsigned long long)k, (unsigned long long)nplane, (int)*(bufptr-1));
+ }
+ }
+ }
+ }
+ if (nerrs){
+ nreadererr++;
+ fprintf(stderr, "found %d unexpected values in plane %llu\n", nerrs,
+ (unsigned long long)nplane);
+ }
+ }
+ /* Have read all current planes */
+ nplane_old=dims[0];
+
+ /* check if dataset has grown since last time */
+#if 0
+ /* close dsid and file, then reopen them */
+ if (H5Dclose(dsid) < 0){
+ fprintf(stderr, "H5Dclose failed\n");
+ return -1;
+ }
+ if (H5Fclose(fid) < 0){
+ fprintf(stderr, "H5Fclose failed\n");
+ return -1;
+ }
+ if((fid = H5Fopen(name, H5F_ACC_RDONLY | (UC_opts.use_swmr ? H5F_ACC_SWMR_READ : 0), H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+#else
+ H5Drefresh(dsid);
+#endif
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ }
+
+ if (nreadererr)
+ return -1;
+ else
+ return 0;
+}
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/use_disable_mdc_flushes.c b/test/use_disable_mdc_flushes.c
new file mode 100644
index 0000000..340f578
--- /dev/null
+++ b/test/use_disable_mdc_flushes.c
@@ -0,0 +1,547 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/*
+ * This is copied from use_append_chunk.c with modifications to show
+ * the usage of H5Odisable_mdc_flushes/H5Oenable_mdc_flushes/H5Oare_mdc_flushes_disabled public routines.
+ */
+
+#include "h5test.h"
+
+/* This test uses many POSIX things that are not available on
+ * Windows. We're using a check for fork(2) here as a proxy for
+ * all POSIX/Unix/Linux things until this test can be made
+ * more platform-independent.
+ */
+#ifdef H5_HAVE_FORK
+
+#define H5D_FRIEND /*suppress error about including H5Dpkg */
+#define H5D_TESTING
+#include "H5Dpkg.h"
+
+/* Global Variable definitions */
+const char *progname_g="use_disable_mdc_flushes"; /* program name */
+
+/* these two definitions must match each other */
+#define UC_DATATYPE H5T_NATIVE_SHORT /* use case HDF5 data type */
+#define UC_CTYPE short /* use case C data type */
+#define UC_RANK 3 /* use case dataset rank */
+#define Chunksize_DFT 256 /* chunksize default */
+#define Hgoto_error(val) {ret_value=val; goto done;}
+
+
+char *filename_g;
+hsize_t nplanes_g;
+int use_swmr_g;
+int chunkplanes_g;
+int chunksize_g;
+hsize_t dims_g[UC_RANK];
+hsize_t max_dims_g[UC_RANK];
+hsize_t chunkdims_g[UC_RANK];
+
+static void usage(const char *prog);
+static int parse_option(int argc, char * const argv[]);
+static void show_parameters(void);
+static int create_file(void);
+static int setup_parameters(int argc, char * const argv[]);
+
+/*
+ * Note: Long options are not yet implemented.
+ *
+ * usage: use_disable_mdc_flushes [OPTIONS]
+ * OPTIONS
+ * -h, --help Print a usage message and exit
+ * -f FN Test file name [default: use_disable_mdc_flushes.h5]
+ * -n N, --nplanes=N Number of planes to write. [default: 1000]
+ * -s N, --swmr=N Use SWMR mode (0: no, non-0: yes) default is yes
+ * -z N, --chunksize=N Chunk size [default: 256]
+ * -y N, --chunkplanes=N Number of planes per chunk [default: 1]
+ */
+static void
+usage(const char *prog)
+{
+ fprintf(stderr, "usage: %s [OPTIONS]\n", prog);
+ fprintf(stderr, " OPTIONS\n");
+ fprintf(stderr, " -h Print a usage message and exit\n");
+ fprintf(stderr, " -f FN Test file name [default: %s.h5]\n", prog);
+ fprintf(stderr, " -n N Number of planes to write. [default: 1000]\n");
+ fprintf(stderr, " -s N Use SWMR mode (0: no, non-0: yes) default is yes\n");
+ fprintf(stderr, " -z N Chunk size [default: %d]\n", Chunksize_DFT);
+ fprintf(stderr, " -y N Number of planes per chunk [default: 1]\n");
+ fprintf(stderr, "\n");
+} /* usage() */
+
+
+/*
+ * Setup Use Case parameters by parsing command line options.
+ * Setup default values if not set by options. */
+static int
+parse_option(int argc, char * const argv[])
+{
+ int ret_value=0;
+ int c;
+ /* command line options: See function usage for a description */
+ const char *cmd_options = "f:hn:s:y:z:";
+
+ /* suppress getopt from printing error */
+ opterr = 0;
+
+ while (1){
+ c = getopt (argc, argv, cmd_options);
+ if (-1 == c)
+ break;
+ switch (c) {
+ case 'h':
+ usage(progname_g);
+ exit(0);
+ break;
+ case 'f': /* usecase data file name */
+ filename_g = optarg;
+ break;
+ case 'n': /* number of planes to write/read */
+ if ((nplanes_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 's': /* use swmr file open mode */
+ if ((use_swmr_g = atoi(optarg)) < 0){
+ fprintf(stderr, "swmr value should be 0(no) or 1(yes)\n");
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'y': /* Number of planes per chunk */
+ if ((chunkplanes_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad number of planes per chunk %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case 'z': /* size of chunk=(z,z) */
+ if ((chunksize_g = atoi(optarg)) <= 0){
+ fprintf(stderr, "bad chunksize %s, must be a positive integer\n", optarg);
+ usage(progname_g);
+ Hgoto_error(-1);
+ };
+ break;
+ case '?':
+ fprintf(stderr, "getopt returned '%c'.\n", c);
+ Hgoto_error(-1);
+ default:
+ fprintf(stderr, "getopt returned unexpected value.\n");
+ fprintf(stderr, "Unexpected value is %d\n", c);
+ Hgoto_error(-1);
+ }
+ }
+
+ /* set test file name if not given */
+ if (!filename_g){
+ /* default data file name is <progname>.h5 */
+ if ((filename_g = (char*)HDmalloc(HDstrlen(progname_g)+4))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ Hgoto_error(-1);
+ };
+ HDstrcpy(filename_g, progname_g);
+ HDstrcat(filename_g, ".h5");
+ }
+
+done:
+ /* All done. */
+ return(ret_value);
+} /* parse_option() */
+
+/* Show parameters used for this use case */
+static void
+show_parameters(void)
+{
+ printf("===Parameters used:===\n");
+ printf("chunk dims=(%llu, %llu, %llu)\n", (unsigned long long)chunkdims_g[0],
+ (unsigned long long)chunkdims_g[1], (unsigned long long)chunkdims_g[2]);
+ printf("dataset max dims=(%llu, %llu, %llu)\n", (unsigned long long)max_dims_g[0],
+ (unsigned long long)max_dims_g[1], (unsigned long long)max_dims_g[2]);
+ printf("number of planes to write=%llu\n", (unsigned long long)nplanes_g);
+ printf("using SWMR mode=%s\n", use_swmr_g ? "yes(1)" : "no(0)");
+ printf("data filename=%s\n", filename_g);
+ printf("===Parameters shown===\n");
+} /* show_parameters() */
+
+/*
+ * Setup parameters for the use case.
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+setup_parameters(int argc, char * const argv[])
+{
+ /* use case defaults */
+ chunksize_g = Chunksize_DFT;
+ use_swmr_g = 1; /* use swmr open */
+ chunkplanes_g = 1;
+
+ /* parse options */
+ if (parse_option(argc, argv) < 0){
+ return(-1);
+ }
+ /* set chunk dims */
+ chunkdims_g[0] = chunkplanes_g;
+ chunkdims_g[1]= chunkdims_g[2] = chunksize_g;
+
+ /* set dataset initial and max dims */
+ dims_g[0] = 0;
+ max_dims_g[0] = H5S_UNLIMITED;
+ dims_g[1] = dims_g[2] = max_dims_g[1] = max_dims_g[2] = chunksize_g;
+
+ /* set nplanes */
+ if (nplanes_g == 0)
+ nplanes_g = chunksize_g;
+
+ /* show parameters and return */
+ show_parameters();
+ return(0);
+} /* setup_parameters() */
+
+/*
+ * Create the skeleton use case file for testing.
+ * It has one 3d dataset using chunked storage.
+ * The dataset is (unlimited, chunksize, chunksize).
+ * Dataset type is 2 bytes integer.
+ * It starts out "empty", i.e., first dimension is 0.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+create_file(void)
+{
+ hsize_t dims[3]; /* Dataset starting dimensions */
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dcpl; /* Dataset creation property list */
+ hid_t sid; /* Dataspace ID */
+ hid_t dsid; /* Dataset ID */
+ hid_t fapl; /* File access property list */
+ H5D_chunk_index_t idx_type; /* Chunk index type */
+
+ /* Create the file */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fcreate(filename_g, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ return -1;
+
+ /* Set up dimension sizes */
+ dims[0] = 0;
+ dims[1] = dims[2] = max_dims_g[1];
+
+ /* Create dataspace for creating datasets */
+ if((sid = H5Screate_simple(3, dims, max_dims_g)) < 0)
+ return -1;
+
+ /* Create dataset creation property list */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ return -1;
+ if(H5Pset_chunk(dcpl, 3, chunkdims_g) < 0)
+ return -1;
+
+ /* create dataset of progname */
+ if((dsid = H5Dcreate2(fid, progname_g, UC_DATATYPE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ return -1;
+
+ /* Check that the chunk index type is not version 1 B-tree.
+ * Version 1 B-trees are not supported under SWMR.
+ */
+ if(H5D__layout_idx_type_test(dsid, &idx_type) < 0)
+ return -1;
+ if(idx_type == H5D_CHUNK_IDX_BTREE) {
+ fprintf(stderr, "ERROR: Chunk index is version 1 B-tree: aborting.\n");
+ return -1;
+ }
+
+ /* Close everything */
+ if(H5Dclose(dsid) < 0)
+ return -1;
+ if(H5Pclose(fapl) < 0)
+ return -1;
+ if(H5Pclose(dcpl) < 0)
+ return -1;
+ if(H5Sclose(sid) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ return 0;
+} /* create_file() */
+
+/*
+ * Append planes, each of (1,2*chunksize,2*chunksize) to the dataset.
+ * In other words, 4 chunks are appended to the dataset at a time.
+ * Fill each plane with the plane number and then write it at the nth plane.
+ * Increase the plane number and repeat till the end of dataset, when it
+ * reaches chunksize long. End product is a (2*chunksize)^3 cube.
+ *
+ * Return: 0 succeed; -1 fail.
+ */
+static int
+write_file(void)
+{
+ hid_t fid; /* File ID for new HDF5 file */
+ hid_t dsid; /* dataset ID */
+ hid_t fapl; /* File access property list */
+ hid_t dcpl; /* Dataset creation property list */
+ char *name;
+ UC_CTYPE *buffer, *bufptr; /* data buffer */
+ hsize_t cz=chunksize_g; /* Chunk size */
+ hid_t f_sid; /* dataset file space id */
+ hid_t m_sid; /* memory space id */
+ int rank; /* rank */
+ hsize_t chunk_dims[3]; /* Chunk dimensions */
+ hsize_t dims[3]; /* Dataspace dimensions */
+ hsize_t memdims[3]; /* Memory space dimensions */
+ hsize_t start[3] = {0,0,0}, count[3]; /* Hyperslab selection values */
+ hbool_t disabled; /* Object's disabled status */
+ hsize_t i, j, k;
+
+ name = filename_g;
+
+ /* Open the file */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+ if(use_swmr_g)
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+ if((fid = H5Fopen(name, H5F_ACC_RDWR | (use_swmr_g ? H5F_ACC_SWMR_WRITE : 0), fapl)) < 0){
+ fprintf(stderr, "H5Fopen failed\n");
+ return -1;
+ }
+
+ /* Open the dataset of the program name */
+ if((dsid = H5Dopen2(fid, progname_g, H5P_DEFAULT)) < 0){
+ fprintf(stderr, "H5Dopen2 failed\n");
+ return -1;
+ }
+
+ /* Disabled mdc flushed for the dataset */
+ if(H5Odisable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "H5Odisable_mdc_flushes failed\n");
+ return -1;
+ }
+
+ /* Get mdc disabled status of the dataset */
+ if(H5Oare_mdc_flushes_disabled(dsid, &disabled) < 0) {
+ fprintf(stderr, "H5Oare_mdc_flushes_disabled failed\n");
+ return -1;
+ } else if(disabled)
+ printf("Dataset has disabled mdc flushes.\n");
+ else
+ printf("Dataset should have disabled its mdc flushes.\n");
+
+ /* Find chunksize used */
+ if ((dcpl = H5Dget_create_plist(dsid)) < 0){
+ fprintf(stderr, "H5Dget_create_plist failed\n");
+ return -1;
+ }
+ if (H5D_CHUNKED != H5Pget_layout(dcpl)){
+ fprintf(stderr, "storage layout is not chunked\n");
+ return -1;
+ }
+ if ((rank = H5Pget_chunk(dcpl, 3, chunk_dims)) != 3){
+ fprintf(stderr, "storage rank is not 3\n");
+ return -1;
+ }
+
+ /* verify chunk_dims against set paramenters */
+ if (chunk_dims[0]!= chunkdims_g[0] || chunk_dims[1] != cz || chunk_dims[2] != cz){
+ fprintf(stderr, "chunk size is not as expected. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)chunk_dims[0], (unsigned long long)chunk_dims[1],
+ (unsigned long long)chunk_dims[2]);
+ return -1;
+ }
+
+ /* allocate space for data buffer 1 X dims[1] X dims[2] of UC_CTYPE */
+ memdims[0]=1;
+ memdims[1] = dims_g[1];
+ memdims[2] = dims_g[2];
+ if ((buffer=(UC_CTYPE*)HDmalloc((size_t)memdims[1]*(size_t)memdims[2]*sizeof(UC_CTYPE)))==NULL) {
+ fprintf(stderr, "malloc: failed\n");
+ return -1;
+ };
+
+ /*
+ * Get dataset rank and dimension.
+ */
+ f_sid = H5Dget_space(dsid); /* Get filespace handle first. */
+ rank = H5Sget_simple_extent_ndims(f_sid);
+ if (rank != UC_RANK){
+ fprintf(stderr, "rank(%d) of dataset does not match\n", rank);
+ return -1;
+ }
+ if (H5Sget_simple_extent_dims(f_sid, dims, NULL) < 0){
+ fprintf(stderr, "H5Sget_simple_extent_dims got error\n");
+ return -1;
+ }
+ printf("dataset rank %d, dimensions %llu x %llu x %llu\n",
+ rank, (unsigned long long)(dims[0]), (unsigned long long)(dims[1]),
+ (unsigned long long)(dims[2]));
+ /* verify that file space dims are as expected and are consistent with memory space dims */
+ if (dims[0] != 0 || dims[1] != memdims[1] || dims[2] != memdims[2]){
+ fprintf(stderr, "dataset is not empty. Got dims=(%llu,%llu,%llu)\n",
+ (unsigned long long)dims[0], (unsigned long long)dims[1],
+ (unsigned long long)dims[2]);
+ return -1;
+ }
+
+ /* setup mem-space for buffer */
+ if ((m_sid=H5Screate_simple(rank, memdims, NULL))<0){
+ fprintf(stderr, "H5Screate_simple for memory failed\n");
+ return -1;
+ };
+
+ /* write planes */
+ count[0]=1;
+ count[1]=dims[1];
+ count[2]=dims[2];
+ for (i=0; i<nplanes_g; i++){
+ /* fill buffer with value i+1 */
+ bufptr = buffer;
+ for (j=0; j<dims[1]; j++)
+ for (k=0; k<dims[2]; k++)
+ *bufptr++ = i;
+
+ /* extend the dataset by one for new plane */
+ dims[0]=i+1;
+ if(H5Dset_extent(dsid, dims) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ /* Get the dataset's dataspace */
+ if((f_sid = H5Dget_space(dsid)) < 0){
+ fprintf(stderr, "H5Dset_extent failed\n");
+ return -1;
+ }
+
+ start[0]=i;
+ /* Choose the next plane to write */
+ if(H5Sselect_hyperslab(f_sid, H5S_SELECT_SET, start, NULL, count, NULL) < 0){
+ fprintf(stderr, "Failed H5Sselect_hyperslab\n");
+ return -1;
+ }
+
+ /* Write plane to the dataset */
+ if(H5Dwrite(dsid, UC_DATATYPE, m_sid, f_sid, H5P_DEFAULT, buffer) < 0){
+ fprintf(stderr, "Failed H5Dwrite\n");
+ return -1;
+ }
+
+ /* Flush the dataset for every "chunkplanes_g" planes */
+ if(!((i + 1) % (hsize_t)chunkplanes_g)) {
+ if(H5Dflush(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Dflush dataset\n");
+ return -1;
+ }
+ }
+ }
+
+ if(H5Dflush(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Dflush dataset\n");
+ return -1;
+ }
+
+ /* Enable mdc flushes for the dataset */
+ /* Closing the dataset later will enable mdc flushes automatically if this is not done */
+ if(disabled)
+ if(H5Oenable_mdc_flushes(dsid) < 0) {
+ fprintf(stderr, "Failed to H5Oenable_mdc_flushes\n");
+ return -1;
+ }
+
+ /* Done writing. Free/Close all resources including data file */
+ HDfree(buffer);
+
+ if(H5Dclose(dsid) < 0){
+ fprintf(stderr, "Failed to close datasete\n");
+ return -1;
+ }
+ if(H5Sclose(m_sid) < 0){
+ fprintf(stderr, "Failed to close memory space\n");
+ return -1;
+ }
+ if(H5Sclose(f_sid) < 0){
+ fprintf(stderr, "Failed to close file space\n");
+ return -1;
+ }
+ if(H5Pclose(fapl) < 0){
+ fprintf(stderr, "Failed to property list\n");
+ return -1;
+ }
+ if(H5Fclose(fid) < 0){
+ fprintf(stderr, "Failed to close file id\n");
+ return -1;
+ }
+
+ return 0;
+} /* write_file() */
+
+
+
+/* Overall Algorithm:
+ * Parse options from user;
+ * Generate/pre-created test files needed and close it;
+ * Write to the file.
+ */
+int
+main(int argc, char *argv[])
+{
+ int ret_value = 0;
+
+ /* initialization */
+ if(setup_parameters(argc, argv) < 0)
+ Hgoto_error(1);
+
+ /* ============*/
+ /* Create file */
+ /* ============*/
+ printf("Creating skeleton data file for testing H5Odisable_mdc_flushes()...\n");
+ if(create_file() < 0) {
+ fprintf(stderr, "***encounter error\n");
+ Hgoto_error(1);
+ } /* end if */
+ else
+ printf("File created.\n");
+
+ printf("writing to the file\n");
+ if(write_file() < 0) {
+ fprintf(stderr, "write_file encountered error\n");
+ Hgoto_error(1);
+ }
+
+done:
+ /* Print result and exit */
+ if(ret_value != 0)
+ printf("Error(s) encountered\n");
+ else
+ printf("All passed\n");
+
+ return(ret_value);
+}
+
+#else /* H5_HAVE_FORK */
+
+int
+main(void)
+{
+ HDfprintf(stderr, "Non-POSIX platform. Skipping.\n");
+ return EXIT_SUCCESS;
+} /* end main() */
+
+#endif /* H5_HAVE_FORK */
+
diff --git a/test/vds.c b/test/vds.c
index 2fd492a..c9b7a1b 100644
--- a/test/vds.c
+++ b/test/vds.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -2812,7 +2810,7 @@ test_basic_io(unsigned config, hid_t fapl)
/* Write data directly to source dataset */
if(H5Dwrite(srcdset[0], H5T_NATIVE_INT, memspace, H5S_ALL, H5P_DEFAULT, buf[0]) < 0)
- TEST_ERROR
+ TEST_ERROR
/* Close srcdset and srcfile if config option specified */
if(config & TEST_IO_CLOSE_SRC) {
@@ -7265,6 +7263,12 @@ test_printf(unsigned config, hid_t fapl)
if((vdset = H5Dopen2(vfile, "v_dset", dapl)) < 0)
TEST_ERROR
+ /* Test H5Pget_virtual_printf_gap() */
+ if(H5Pget_virtual_printf_gap(dapl, &gap_size) < 0)
+ TEST_ERROR
+ if(gap_size != (hsize_t)2)
+ TEST_ERROR
+
/* Get VDS space */
if((filespace = H5Dget_space(vdset)) < 0)
TEST_ERROR
@@ -7329,6 +7333,12 @@ test_printf(unsigned config, hid_t fapl)
if((vdset = H5Dopen2(vfile, "v_dset", dapl)) < 0)
TEST_ERROR
+ /* Test H5Pget_virtual_printf_gap() */
+ if(H5Pget_virtual_printf_gap(dapl, &gap_size) < 0)
+ TEST_ERROR
+ if(gap_size != (hsize_t)3)
+ TEST_ERROR
+
/* Get VDS space */
if((filespace = H5Dget_space(vdset)) < 0)
TEST_ERROR
@@ -7393,6 +7403,12 @@ test_printf(unsigned config, hid_t fapl)
if((vdset = H5Dopen2(vfile, "v_dset", dapl)) < 0)
TEST_ERROR
+ /* Test H5Pget_virtual_printf_gap() */
+ if(H5Pget_virtual_printf_gap(dapl, &gap_size) < 0)
+ TEST_ERROR
+ if(gap_size != (hsize_t)4)
+ TEST_ERROR
+
/* Get VDS space */
if((filespace = H5Dget_space(vdset)) < 0)
TEST_ERROR
@@ -11121,23 +11137,23 @@ error:
static int
test_dapl_values(hid_t fapl_id)
{
- hid_t fid = -1; /* file to write to */
- hid_t dcpl_id = -1; /* dataset creation properties */
- hid_t dapl_id1 = -1; /* dataset access properties */
- hid_t dapl_id2 = -1; /* dataset access properties */
- hid_t vds_sid = -1; /* vds data space */
- hid_t src_sid = -1; /* source data space */
- hid_t did1 = -1; /* dataset */
- hid_t did2 = -1; /* dataset */
- hsize_t start; /* hyperslab start */
- hsize_t stride; /* hyperslab count */
- hsize_t count; /* hyperslab count */
- hsize_t block; /* hyperslab count */
- hsize_t dims; /* dataset size */
- hsize_t max_dims; /* dataset max size */
- H5D_vds_view_t view; /* view from dapl */
- hsize_t gap_size; /* gap size from dapl */
- char filename[1024]; /* file names */
+ hid_t fid = -1; /* file to write to */
+ hid_t dcpl_id = -1; /* dataset creation properties */
+ hid_t dapl_id1 = -1; /* dataset access properties */
+ hid_t dapl_id2 = -1; /* dataset access properties */
+ hid_t vds_sid = -1; /* vds data space */
+ hid_t src_sid = -1; /* source data space */
+ hid_t did1 = -1; /* dataset */
+ hid_t did2 = -1; /* dataset */
+ hsize_t start; /* hyperslab start */
+ hsize_t stride; /* hyperslab count */
+ hsize_t count; /* hyperslab count */
+ hsize_t block; /* hyperslab count */
+ hsize_t dims; /* dataset size */
+ hsize_t max_dims; /* dataset max size */
+ H5D_vds_view_t view; /* view from dapl */
+ hsize_t gap_size; /* gap size from dapl */
+ char filename[1024]; /* file names */
TESTING("H5Dget_access_plist() returns dapl w/ correct values");
@@ -11221,6 +11237,7 @@ test_dapl_values(hid_t fapl_id)
if(H5Pget_virtual_printf_gap(dapl_id2, &gap_size) < 0)
FAIL_STACK_ERROR
if(gap_size != 123)
+ TEST_ERROR
/* Close everything */
if(H5Sclose(vds_sid) < 0) FAIL_STACK_ERROR
diff --git a/test/vds_swmr.h b/test/vds_swmr.h
new file mode 100644
index 0000000..a8e4d50
--- /dev/null
+++ b/test/vds_swmr.h
@@ -0,0 +1,163 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#ifndef VDS_SWMR_H
+#define VDS_SWMR_H
+
+#include <hdf5.h>
+
+/* virtual dataset <---> source dataset mapping and sizes
+
+ ***************** --+
+ * A * K
+ ***************** --+
+ * * |
+ * B * N
+ * * |
+ ***************** --+
+ * C *
+ *****************
+ * *
+ * D *
+ * *
+ *****************
+ * E *
+ *****************
+ * *
+ * F *
+ * *
+ *****************
+
+ | |
+ +-------M-------+
+
+
+ dim[0]
+ /
+ /
+ /
+ -----> dim[2]
+ |
+ |
+ |
+ dim[1]
+
+
+ NOTE: This use case also checks for varying numbers of written planes.
+ Dataset A contains the full number of planes and each successive
+ dataset contains one fewer plane, down to the last dataset, which
+ contains zero planes. Each dataset is set to have an (unlimited
+ dimension) extent equal to the number of planes written, so the
+ "empty" regions will contain the VDS fill value.
+*/
+
+
+/* All datasets are 3D */
+#define RANK 3
+
+/* Lengths of string identifiers (file, dataset names, etc.) */
+#define NAME_LEN 32
+
+/* Compression level */
+#define COMPRESSION_LEVEL 7
+
+/* Number of source files */
+#define N_SOURCES 6
+
+/* Dataset dimensions */
+#define SM_HEIGHT 2 /* K */
+#define LG_HEIGHT 4 /* N */
+#define SM_LG_HEIGHT 6 /* SM_HEIGHT + LG_HEIGHT */
+#define FULL_HEIGHT 18 /* (3 * K) + (3 * N) */
+#define HALF_HEIGHT 9
+#define WIDTH 8 /* M */
+#define HALF_WIDTH 4
+
+/* Max number of planes in the dataset */
+#define N_MAX_PLANES H5S_UNLIMITED
+
+/* Number of planes each writer will write */
+#define N_PLANES_TO_WRITE 25
+
+/* Dataset datatypes */
+#define SOURCE_DATATYPE H5T_STD_I32LE
+#define VDS_DATATYPE H5T_STD_I32LE
+
+/* Starting size of datasets, both source and VDS */
+static hsize_t DIMS[N_SOURCES][RANK] = {
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH},
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH},
+ {0, SM_HEIGHT, WIDTH},
+ {0, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_DIMS[RANK] = {0, FULL_HEIGHT, WIDTH};
+
+/* Maximum size of datasets, both source and VDS.
+ * NOTE: Theoretical (i.e.: H5S_UNLIMITED), not the actual max
+ * number of planes written out by the writers before they stop.
+ * That number is specified separately.
+ */
+static hsize_t MAX_DIMS[N_SOURCES][RANK] = {
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH},
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH},
+ {N_MAX_PLANES, SM_HEIGHT, WIDTH},
+ {N_MAX_PLANES, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_MAX_DIMS[RANK] = {N_MAX_PLANES, FULL_HEIGHT, WIDTH};
+
+/* Planes */
+static hsize_t PLANES[N_SOURCES][RANK] = {
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH},
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH},
+ {1, SM_HEIGHT, WIDTH},
+ {1, LG_HEIGHT, WIDTH}
+};
+static hsize_t VDS_PLANE[RANK] = {1, FULL_HEIGHT, WIDTH};
+
+/* File names for source datasets */
+static char FILE_NAMES[N_SOURCES][NAME_LEN] = {
+ {"vds_swmr_src_a.h5"},
+ {"vds_swmr_src_b.h5"},
+ {"vds_swmr_src_c.h5"},
+ {"vds_swmr_src_d.h5"},
+ {"vds_swmr_src_e.h5"},
+ {"vds_swmr_src_f.h5"}
+};
+
+/* VDS file name */
+static char VDS_FILE_NAME[NAME_LEN] = "vds_swmr.h5";
+
+/* Dataset names */
+static char SOURCE_DSET_NAME[NAME_LEN] = "source_dset";
+static char SOURCE_DSET_PATH[NAME_LEN] = "/source_dset";
+static char VDS_DSET_NAME[NAME_LEN] = "vds_dset";
+
+/* Fill values */
+static int32_t FILL_VALUES[N_SOURCES] = {
+ -1,
+ -2,
+ -3,
+ -4,
+ -5,
+ -6
+};
+static int32_t VDS_FILL_VALUE = -9;
+
+#endif /* VDS_SWMR_H */
+
diff --git a/test/vds_swmr_gen.c b/test/vds_swmr_gen.c
new file mode 100644
index 0000000..24c6131
--- /dev/null
+++ b/test/vds_swmr_gen.c
@@ -0,0 +1,176 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(void)
+{
+ hid_t faplid = -1; /* file access property list ID (all files) */
+
+ hid_t src_sid = -1; /* source dataset's dataspace ID */
+ hid_t src_dcplid = -1; /* source dataset property list ID */
+
+ hid_t vds_sid = -1; /* VDS dataspace ID */
+ hid_t vds_dcplid = -1; /* VDS dataset property list ID */
+
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t did = -1; /* dataset ID */
+
+ hsize_t start[RANK]; /* starting point for hyperslab */
+ int map_start = -1; /* starting point in the VDS map */
+
+ int i; /* iterator */
+
+
+ /* Start by creating the virtual dataset (VDS) dataspace and creation
+ * property list. The individual source datasets are then created
+ * and the VDS map (stored in the VDS property list) is updated.
+ */
+
+ /* Create VDS dcpl */
+ if((vds_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(vds_dcplid, VDS_DATATYPE,
+ &VDS_FILL_VALUE) < 0)
+ TEST_ERROR
+
+ /* Create VDS dataspace */
+ if((vds_sid = H5Screate_simple(RANK, VDS_DIMS,
+ VDS_MAX_DIMS)) < 0)
+ TEST_ERROR
+
+ /************************************
+ * Create source files and datasets *
+ ************************************/
+
+ start[0] = 0;
+ start[1] = 0;
+ start[2] = 0;
+ map_start = 0;
+
+ /* All SWMR files need to use the latest file format */
+ if((faplid = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR
+ if(H5Pset_libver_bounds(faplid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR
+
+ for(i = 0; i < N_SOURCES; i++) {
+
+ /* source dataset dcpl */
+ if((src_dcplid = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ TEST_ERROR
+ if(H5Pset_chunk(src_dcplid, RANK, PLANES[i]) < 0)
+ TEST_ERROR
+ if(H5Pset_fill_value(src_dcplid, SOURCE_DATATYPE,
+ &FILL_VALUES[i]) < 0)
+ TEST_ERROR
+
+ /* Use a mix of compressed and uncompressed datasets */
+ if(0 != i % 2)
+ if(H5Pset_deflate(src_dcplid, COMPRESSION_LEVEL) < 0)
+ TEST_ERROR
+
+ /* Create source file, dataspace, and dataset */
+ if((fid = H5Fcreate(FILE_NAMES[i], H5F_ACC_TRUNC,
+ H5P_DEFAULT, faplid)) < 0)
+ TEST_ERROR
+ if((src_sid = H5Screate_simple(RANK, DIMS[i],
+ MAX_DIMS[i])) < 0)
+ TEST_ERROR
+ if((did = H5Dcreate2(fid, SOURCE_DSET_NAME,
+ SOURCE_DATATYPE, src_sid,
+ H5P_DEFAULT, src_dcplid, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* set up hyperslabs for source and destination datasets */
+ start[1] = 0;
+ if(H5Sselect_hyperslab(src_sid, H5S_SELECT_SET, start, NULL,
+ MAX_DIMS[i], NULL) < 0)
+ TEST_ERROR
+ start[1] = map_start;
+ if(H5Sselect_hyperslab(vds_sid, H5S_SELECT_SET, start, NULL,
+ MAX_DIMS[i], NULL) < 0)
+ TEST_ERROR
+ map_start += PLANES[i][1];
+
+ /* Add VDS mapping */
+ if(H5Pset_virtual(vds_dcplid, vds_sid, FILE_NAMES[i],
+ SOURCE_DSET_PATH, src_sid) < 0)
+ TEST_ERROR
+
+ /* close */
+ if(H5Sclose(src_sid) < 0)
+ TEST_ERROR
+ if(H5Pclose(src_dcplid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ } /* end for */
+
+
+ /*******************
+ * Create VDS file *
+ *******************/
+
+ /* file */
+ if((fid = H5Fcreate(VDS_FILE_NAME, H5F_ACC_TRUNC,
+ H5P_DEFAULT, faplid)) < 0)
+ TEST_ERROR
+
+ /* dataset */
+ if((did = H5Dcreate2(fid, VDS_DSET_NAME, VDS_DATATYPE, vds_sid,
+ H5P_DEFAULT, vds_dcplid, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* close */
+ if(H5Pclose(faplid) < 0)
+ TEST_ERROR
+ if(H5Pclose(vds_dcplid) < 0)
+ TEST_ERROR
+ if(H5Sclose(vds_sid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(faplid >= 0)
+ (void)H5Pclose(faplid);
+ if(src_sid >= 0)
+ (void)H5Sclose(src_sid);
+ if(src_dcplid >= 0)
+ (void)H5Pclose(src_dcplid);
+ if(vds_sid >= 0)
+ (void)H5Sclose(vds_sid);
+ if(vds_dcplid >= 0)
+ (void)H5Pclose(vds_dcplid);
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ } H5E_END_TRY
+
+ return EXIT_FAILURE;
+
+} /* end main */
+
diff --git a/test/vds_swmr_reader.c b/test/vds_swmr_reader.c
new file mode 100644
index 0000000..1de0cc5
--- /dev/null
+++ b/test/vds_swmr_reader.c
@@ -0,0 +1,140 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(void)
+{
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t did = -1; /* dataset ID */
+ hid_t msid = -1; /* memory dataspace ID */
+ hid_t fsid = -1; /* file dataspace ID */
+
+ hsize_t start[RANK]; /* hyperslab start point */
+
+ int n_elements = 0; /* size of buffer (elements) */
+ size_t size = 0; /* size of buffer (bytes) */
+ int *buffer = NULL; /* data buffer */
+
+ int n_dims = -1; /* # dimensions in dataset */
+ hsize_t dims[RANK]; /* current size of dataset */
+ hsize_t max_dims[RANK]; /* max size of dataset */
+
+ hbool_t has_errors = FALSE;/* if the read data contains errors */
+
+
+ /* Open the VDS file and dataset */
+ if((fid = H5Fopen(VDS_FILE_NAME, H5F_ACC_RDONLY | H5F_ACC_SWMR_READ, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+ if((did = H5Dopen2(fid, VDS_DSET_NAME, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+ /* Create the read buffer */
+ n_elements = VDS_PLANE[1] * VDS_PLANE[2];
+ size = n_elements * sizeof(int);
+ if(NULL == (buffer = (int *)HDmalloc(size)))
+ TEST_ERROR
+
+ /* Create memory dataspace */
+ if((msid = H5Screate_simple(RANK, VDS_PLANE, NULL)) < 0)
+ TEST_ERROR
+
+ /* Read data until the dataset is full (via the writer) */
+ do {
+
+ /* Refresh metadata */
+ if(H5Drefresh(did) < 0)
+ TEST_ERROR
+
+ /* Get the dataset dimensions */
+ if((fsid = H5Dget_space(did)) < 0)
+ TEST_ERROR
+ if(H5Sget_simple_extent_dims(fsid, dims, max_dims) < 0)
+ TEST_ERROR
+
+ /* Check the reported size of the VDS */
+ if((n_dims = H5Sget_simple_extent_ndims(fsid)) < 0)
+ TEST_ERROR
+ if(n_dims != RANK)
+ TEST_ERROR
+ if(H5Sget_simple_extent_dims(fsid, dims, max_dims) < 0)
+ TEST_ERROR
+ /* NOTE: Don't care what dims[0] is. */
+ if(dims[1] != FULL_HEIGHT)
+ TEST_ERROR
+ if(dims[2] != WIDTH)
+ TEST_ERROR
+ if(max_dims[0] != H5S_UNLIMITED)
+ TEST_ERROR
+ if(max_dims[1] != FULL_HEIGHT)
+ TEST_ERROR
+ if(max_dims[2] != WIDTH)
+ TEST_ERROR
+
+ /* Continue if there's nothing to read */
+ if(0 == dims[0]) {
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+ continue;
+ }
+
+ /* Read a plane from the VDS */
+ /* At this time, we just make sure we can read planes without errors. */
+ start[0] = dims[0] - 1;
+ start[1] = 0;
+ start[2] = 0;
+ if(H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, VDS_PLANE, NULL) < 0)
+ TEST_ERROR
+ if(H5Dread(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, buffer) < 0)
+ TEST_ERROR
+
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+
+ } while (dims[0] < N_PLANES_TO_WRITE);
+
+ /* Close file and dataset */
+ if(H5Sclose(msid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+
+ HDfree(buffer);
+
+ HDfprintf(stderr, "SWMR reader exited successfully\n");
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ if(msid >= 0)
+ (void)H5Sclose(msid);
+ if(fsid >= 0)
+ (void)H5Sclose(fsid);
+ if(buffer != NULL)
+ HDfree(buffer);
+ } H5E_END_TRY
+
+ HDfprintf(stderr, "ERROR: SWMR reader exited with errors\n");
+ return EXIT_FAILURE;
+
+} /* end main */
+
diff --git a/test/vds_swmr_writer.c b/test/vds_swmr_writer.c
new file mode 100644
index 0000000..7824aa0
--- /dev/null
+++ b/test/vds_swmr_writer.c
@@ -0,0 +1,165 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+
+#include "h5test.h"
+#include "vds_swmr.h"
+
+int
+main(int argc, char *argv[])
+{
+ int file_number = -1; /* Source file number */
+
+ hid_t fid = -1; /* HDF5 file ID */
+ hid_t faplid = -1; /* file access property list ID */
+ hid_t did = -1; /* dataset ID */
+ hid_t msid = -1; /* memory dataspace ID */
+ hid_t fsid = -1; /* file dataspace ID */
+
+ hsize_t extent[RANK]; /* dataset extents */
+ hsize_t start[RANK]; /* hyperslab start point */
+
+ int *buffer = NULL; /* data buffer */
+ int value = -1; /* value written to datasets */
+
+ hsize_t n_elements = 0; /* number of elements in a plane */
+
+ hsize_t i; /* iterator */
+ hsize_t j; /* iterator */
+
+
+ /******************************
+ * Fill a source dataset file *
+ ******************************/
+
+ /* The file number is passed on the command line.
+ * This is an integer index into the FILE_NAMES array.
+ */
+ if(argc != 2) {
+ HDfprintf(stderr, "ERROR: Must pass the source file number on the command line.\n");
+ return EXIT_FAILURE;
+ }
+
+ file_number = HDatoi(argv[1]);
+ if(file_number < 0 || file_number >= N_SOURCES)
+ TEST_ERROR
+
+ /* Open the source file and dataset */
+ /* All SWMR files need to use the latest file format */
+ if((faplid = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ TEST_ERROR
+ if(H5Pset_libver_bounds(faplid, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ TEST_ERROR
+ if((fid = H5Fopen(FILE_NAMES[file_number], H5F_ACC_RDWR | H5F_ACC_SWMR_WRITE, faplid)) < 0)
+ TEST_ERROR
+ if((did = H5Dopen2(fid, SOURCE_DSET_PATH, H5P_DEFAULT)) < 0)
+ TEST_ERROR
+
+
+ /* Create a data buffer that represents a plane */
+ n_elements = PLANES[file_number][1] * PLANES[file_number][2];
+ if(NULL == (buffer = (int *)HDmalloc(n_elements * sizeof(int))))
+ TEST_ERROR
+
+ /* Create the memory dataspace */
+ if((msid = H5Screate_simple(RANK, PLANES[file_number], NULL)) < 0)
+ TEST_ERROR
+
+ /* Write planes to the dataset */
+ for(i = 0; i < N_PLANES_TO_WRITE; i++) {
+
+ time_t delay; /* Time interval between plane writes */
+
+ /* Cork the dataset's metadata in the cache */
+ if(H5Odisable_mdc_flushes(did) < 0)
+ TEST_ERROR
+
+ /* Set the dataset's extent. This is inefficient but that's ok here. */
+ extent[0] = i + 1;
+ extent[1] = PLANES[file_number][1];
+ extent[2] = PLANES[file_number][2];
+ if(H5Dset_extent(did, extent) < 0)
+ TEST_ERROR
+
+ /* Get the file dataspace */
+ if((fsid = H5Dget_space(did)) < 0)
+ TEST_ERROR
+
+ /* Each plane is filled with the plane number as a data value. */
+ value = (((int)i + 1) * 10) + (int)i;
+ for(j = 0; j < n_elements; j++)
+ buffer[j] = value;
+
+ /* Set up the hyperslab for writing. */
+ start[0] = i;
+ start[1] = 0;
+ start[2] = 0;
+ if(H5Sselect_hyperslab(fsid, H5S_SELECT_SET, start, NULL, PLANES[file_number], NULL) < 0)
+ TEST_ERROR
+
+ /* Write the plane to the dataset. */
+ if(H5Dwrite(did, H5T_NATIVE_INT, msid, fsid, H5P_DEFAULT, buffer) < 0)
+ TEST_ERROR
+
+ /* Uncork the dataset's metadata from the cache */
+ if(H5Oenable_mdc_flushes(did) < 0)
+ TEST_ERROR
+
+ /* Wait one second between writing planes */
+ delay = HDtime(0) + (time_t)1;
+ while(HDtime(0) < delay)
+ ;
+
+ /* Flush */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ TEST_ERROR
+
+ } /* end for */
+
+ if(H5Pclose(faplid) < 0)
+ TEST_ERROR
+ if(H5Sclose(msid) < 0)
+ TEST_ERROR
+ if(H5Sclose(fsid) < 0)
+ TEST_ERROR
+ if(H5Dclose(did) < 0)
+ TEST_ERROR
+ if(H5Fclose(fid) < 0)
+ TEST_ERROR
+ HDfree(buffer);
+
+ HDfprintf(stderr, "SWMR writer exited successfully\n");
+ return EXIT_SUCCESS;
+
+error:
+
+ H5E_BEGIN_TRY {
+ if(fid >= 0)
+ (void)H5Fclose(fid);
+ if(faplid >= 0)
+ (void)H5Pclose(faplid);
+ if(did >= 0)
+ (void)H5Dclose(did);
+ if(msid >= 0)
+ (void)H5Sclose(msid);
+ if(fsid >= 0)
+ (void)H5Sclose(fsid);
+ if(buffer != NULL)
+ HDfree(buffer);
+ } H5E_END_TRY
+
+ HDfprintf(stderr, "ERROR: SWMR writer exited with errors\n");
+ return EXIT_FAILURE;
+
+} /* end main */
+
diff --git a/test/vfd.c b/test/vfd.c
index a215264..1932d2c 100644
--- a/test/vfd.c
+++ b/test/vfd.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/CMakeLists.txt b/testpar/CMakeLists.txt
index 58135b7..298d326 100644
--- a/testpar/CMakeLists.txt
+++ b/testpar/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TEST_PAR)
#-----------------------------------------------------------------------------
@@ -54,6 +54,6 @@ set (H5P_TESTS
foreach (testp ${H5P_TESTS})
ADD_H5P_EXE(${testp})
-endforeach (testp ${H5P_TESTS})
+endforeach ()
include (CMakeTests.cmake)
diff --git a/testpar/CMakeTests.cmake b/testpar/CMakeTests.cmake
index 3716ee6..6e2b05e 100644
--- a/testpar/CMakeTests.cmake
+++ b/testpar/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -9,7 +20,7 @@ add_test (NAME TEST_PAR_testphdf5 COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEX
foreach (testp ${H5P_TESTS})
add_test (NAME TEST_PAR_${testp} COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:${testp}>)
-endforeach (testp ${H5P_TESTS})
+endforeach ()
# The following will only be correct on windows shared
#set_tests_properties (TEST_PAR_t_pflush1 PROPERTIES WILL_FAIL "true")
@@ -31,16 +42,16 @@ if (HDF5_TEST_VFD)
t_pflush1
t_pflush2
)
-
+
if (DIRECT_VFD)
set (VFD_LIST ${VFD_LIST} direct)
- endif (DIRECT_VFD)
+ endif ()
- MACRO (ADD_VFD_TEST vfdname resultcode)
+ macro (ADD_VFD_TEST vfdname resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
foreach (test ${H5P_VFD_TESTS})
add_test (
- NAME TEST_PAR_VFD-${vfdname}-${test}
+ NAME TEST_PAR_VFD-${vfdname}-${test}
COMMAND "${CMAKE_COMMAND}"
-D "TEST_PROGRAM=$<TARGET_FILE:${test}>"
-D "TEST_ARGS:STRING="
@@ -50,13 +61,13 @@ if (HDF5_TEST_VFD)
-D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
-P "${HDF_RESOURCES_DIR}/vfdTest.cmake"
)
- endforeach (test ${H5P_VFD_TESTS})
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_VFD_TEST)
-
+ endforeach ()
+ endif ()
+ endmacro ()
+
# Run test with different Virtual File Driver
foreach (vfd ${VFD_LIST})
ADD_VFD_TEST (${vfd} 0)
- endforeach (vfd ${VFD_LIST})
+ endforeach ()
-endif (HDF5_TEST_VFD)
+endif ()
diff --git a/testpar/COPYING b/testpar/COPYING
index 6903daf..6497ace 100644
--- a/testpar/COPYING
+++ b/testpar/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/testpar/Makefile.am b/testpar/Makefile.am
index 4fe0ba8..b87c1df 100644
--- a/testpar/Makefile.am
+++ b/testpar/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -25,7 +23,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test
# Test programs. These are our main targets.
#
-TEST_PROG_PARA=t_mpi testphdf5 t_cache t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame
+TEST_PROG_PARA=t_mpi testphdf5 t_cache t_cache_image t_pflush1 t_pflush2 t_pshutdown t_prestart t_init_term t_shapesame
check_PROGRAMS = $(TEST_PROG_PARA)
diff --git a/testpar/t_cache.c b/testpar/t_cache.c
index 94a6e7c..700e993 100644
--- a/testpar/t_cache.c
+++ b/testpar/t_cache.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -333,7 +331,7 @@ struct mssg_t
haddr_t base_addr;
unsigned len;
int ver;
- int count;
+ unsigned count;
unsigned magic;
};
@@ -410,7 +408,8 @@ static herr_t datum_notify(H5C_notify_action_t action, void *thing);
static herr_t datum_free_icr(void * thing);
-#define DATUM_ENTRY_TYPE H5AC_TEST_ID
+/* Masquerade as object header entries to the cache */
+#define DATUM_ENTRY_TYPE H5AC_OHDR_ID
#define NUMBER_OF_ENTRY_TYPES 1
@@ -434,7 +433,7 @@ const H5C_class_t types[NUMBER_OF_ENTRY_TYPES] =
{
/* id */ DATUM_ENTRY_TYPE,
/* name */ "datum",
- /* mem_type */ H5FD_MEM_DEFAULT,
+ /* mem_type */ H5FD_MEM_OHDR,
/* flags */ H5AC__CLASS_SKIP_READS | H5AC__CLASS_SKIP_WRITES,
/* get_initial_load_size */ datum_get_initial_load_size,
/* get_final_load_size */ NULL,
@@ -484,8 +483,8 @@ static hbool_t take_down_cache(hid_t fid, H5C_t * cache_ptr);
static hbool_t verify_entry_reads(haddr_t addr, int expected_entry_reads);
static hbool_t verify_entry_writes(haddr_t addr, int expected_entry_writes);
static hbool_t verify_total_reads(int expected_total_reads);
-static hbool_t verify_total_writes(int expected_total_writes);
-static void verify_writes(int num_writes, haddr_t * written_entries_tbl);
+static hbool_t verify_total_writes(unsigned expected_total_writes);
+static void verify_writes(unsigned num_writes, haddr_t * written_entries_tbl);
static void unlock_entry(H5F_t * file_ptr, int32_t type, unsigned int flags);
static void unpin_entry(H5F_t * file_ptr, int32_t idx, hbool_t global,
hbool_t dirty, hbool_t via_unprotect);
@@ -1213,7 +1212,7 @@ setup_derived_types(void)
int result;
MPI_Datatype mpi_types[9] = {MPI_INT, MPI_INT, MPI_INT, MPI_LONG,
HADDR_AS_MPI_TYPE, MPI_INT, MPI_INT,
- MPI_INT, MPI_UNSIGNED};
+ MPI_UNSIGNED, MPI_UNSIGNED};
int block_len[9] = {1, 1, 1, 1, 1, 1, 1, 1, 1};
MPI_Aint displs[9];
struct mssg_t sample; /* used to compute displacements */
@@ -2927,6 +2926,30 @@ datum_notify(H5C_notify_action_t action, void *thing)
/* do nothing */
break;
+ case H5AC_NOTIFY_ACTION_CHILD_UNSERIALIZED:
+ if ( callbacks_verbose ) {
+
+ HDfprintf(stdout,
+ "%d: notify() action = child entry unserialized, idx = %d, addr = %ld.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ fflush(stdout);
+ }
+
+ /* do nothing */
+ break;
+
+ case H5AC_NOTIFY_ACTION_CHILD_SERIALIZED:
+ if ( callbacks_verbose ) {
+
+ HDfprintf(stdout,
+ "%d: notify() action = child entry serialized, idx = %d, addr = %ld.\n",
+ world_mpi_rank, idx, (long)entry_ptr->header.addr);
+ fflush(stdout);
+ }
+
+ /* do nothing */
+ break;
+
default:
nerrors++;
ret_value = FAIL;
@@ -3054,7 +3077,7 @@ expunge_entry(H5F_t * file_ptr,
HDassert( ! ((entry_ptr->header).is_dirty) );
result = H5C_get_entry_status(file_ptr, entry_ptr->base_addr,
- NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL);
+ NULL, &in_cache, NULL, NULL, NULL, NULL, NULL, NULL, NULL);
if ( result < 0 ) {
@@ -4309,15 +4332,13 @@ setup_cache_for_test(hid_t * fid_ptr,
*
*****************************************************************************/
static void
-verify_writes(int num_writes,
- haddr_t * written_entries_tbl)
+verify_writes(unsigned num_writes, haddr_t *written_entries_tbl)
{
const hbool_t report = FALSE;
hbool_t proceed = TRUE;
- int i = 0;
+ unsigned u = 0;
HDassert( world_mpi_rank != world_server_mpi_rank );
- HDassert( num_writes >= 0 );
HDassert( ( num_writes == 0 ) ||
( written_entries_tbl != NULL ) );
@@ -4337,15 +4358,12 @@ verify_writes(int num_writes,
}
}
- if ( proceed ) {
-
+ if(proceed)
proceed = verify_total_writes(num_writes);
- }
- while ( ( proceed ) && ( i < num_writes ) )
- {
- proceed = verify_entry_writes(written_entries_tbl[i], 1);
- i++;
+ while(proceed && u < num_writes) {
+ proceed = verify_entry_writes(written_entries_tbl[u], 1);
+ u++;
}
/* barrier to ensure that all other processes have finished verifying
@@ -4374,12 +4392,12 @@ verify_writes(int num_writes,
if ( proceed ) {
- HDfprintf(stdout, "%d:%s: verified %d writes.\n",
+ HDfprintf(stdout, "%d:%s: verified %u writes.\n",
world_mpi_rank, FUNC, num_writes);
} else {
- HDfprintf(stdout, "%d:%s: FAILED to verify %d writes.\n",
+ HDfprintf(stdout, "%d:%s: FAILED to verify %u writes.\n",
world_mpi_rank, FUNC, num_writes);
}
@@ -4904,10 +4922,10 @@ verify_total_reads(int expected_total_reads)
*
*****************************************************************************/
static hbool_t
-verify_total_writes(int expected_total_writes)
+verify_total_writes(unsigned expected_total_writes)
{
hbool_t success = TRUE; /* will set to FALSE if appropriate. */
- long reported_total_writes;
+ unsigned reported_total_writes;
struct mssg_t mssg;
if ( success ) {
@@ -4972,7 +4990,7 @@ verify_total_writes(int expected_total_writes)
success = FALSE;
if ( verbose ) {
HDfprintf(stdout,
- "%d:%s: reported/expected total writes mismatch (%ld/%ld).\n",
+ "%d:%s: reported/expected total writes mismatch (%u/%u).\n",
world_mpi_rank, FUNC,
reported_total_writes, expected_total_writes);
}
@@ -5294,7 +5312,7 @@ server_smoke_check(void)
if ( success ) {
- success = verify_total_writes(world_mpi_size - 1);
+ success = verify_total_writes((unsigned)(world_mpi_size - 1));
}
if ( success ) {
@@ -5398,7 +5416,7 @@ server_smoke_check(void)
if ( success ) {
- success = verify_total_writes(world_mpi_size - 1);
+ success = verify_total_writes((unsigned)(world_mpi_size - 1));
}
if ( success ) {
diff --git a/testpar/t_cache_image.c b/testpar/t_cache_image.c
new file mode 100644
index 0000000..524a63f
--- /dev/null
+++ b/testpar/t_cache_image.c
@@ -0,0 +1,4275 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * Copyright by the Board of Trustees of the University of Illinois. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+
+/* Programmer: John Mainzer
+ * 7/13/15
+ *
+ * This file contains tests specific to the cache image
+ * feature implemented in H5C.c
+ */
+#include "h5test.h"
+#include "testphdf5.h"
+#include "testpar.h"
+#include "cache_common.h"
+#include "genall5.h"
+
+#define TEST_FILES_TO_CONSTRUCT 2
+#define CHUNK_SIZE 10
+#define DSET_SIZE (40 * CHUNK_SIZE)
+#define MAX_NUM_DSETS 256
+#define PAR_NUM_DSETS 32
+#define PAGE_SIZE (4 * 1024)
+#define PB_SIZE (64 * PAGE_SIZE)
+
+/* global variable declarations: */
+
+
+const char *FILENAMES[] = {
+ "t_cache_image_00",
+ "t_cache_image_01",
+ "t_cache_image_02",
+ NULL
+};
+
+/* local utility function declarations */
+
+static void create_data_sets(hid_t file_id, int min_dset, int max_dset);
+#if 0 /* keep pending full parallel cache image */
+static void delete_data_sets(hid_t file_id, int min_dset, int max_dset);
+#endif
+
+static void open_hdf5_file(const hbool_t create_file,
+ const hbool_t mdci_sbem_expected,
+ const hbool_t read_only,
+ const hbool_t set_mdci_fapl,
+ const hbool_t config_fsm,
+ const hbool_t enable_page_buffer,
+ const char * hdf_file_name,
+ const unsigned cache_image_flags,
+ hid_t * file_id_ptr,
+ H5F_t ** file_ptr_ptr,
+ H5C_t ** cache_ptr_ptr,
+ MPI_Comm comm,
+ MPI_Info info,
+ int l_facc_type,
+ const hbool_t all_coll_metadata_ops,
+ const hbool_t coll_metadata_write,
+ const int md_write_strat);
+
+static void verify_data_sets(hid_t file_id, int min_dset, int max_dset);
+
+/* local test function declarations */
+
+static hbool_t parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
+ hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display);
+static void usage(void);
+static unsigned construct_test_file(int test_file_index);
+static void par_create_dataset(int dset_num, hid_t file_id, int mpi_rank,
+ int mpi_size);
+static void par_delete_dataset(int dset_num, hid_t file_id, int mpi_rank);
+static void par_verify_dataset(int dset_num, hid_t file_id, int mpi_rank);
+
+static hbool_t serial_insert_cache_image(int file_name_idx, int mpi_size);
+static void serial_verify_dataset(int dset_num, hid_t file_id, int mpi_size);
+
+/* top level test function declarations */
+static unsigned verify_cache_image_RO(int file_name_id,
+ int md_write_strat, int mpi_rank);
+static unsigned verify_cache_image_RW(int file_name_id,
+ int md_write_strat, int mpi_rank);
+
+static hbool_t smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info,
+ int mpi_rank, int mpi_size);
+
+
+/****************************************************************************/
+/***************************** Utility Functions ****************************/
+/****************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: construct_test_file()
+ *
+ * Purpose: This function attempts to mimic the typical "poor man's
+ * parallel use case in which the file is passed between
+ * processes, each of which open the file, write some data,
+ * close the file, and then pass control on to the next
+ * process.
+ *
+ * In this case, we create one group for each process, and
+ * populate it with a "zoo" of HDF5 objects selected to
+ * (ideally) exercise all HDF5 on disk data structures.
+ *
+ * The end result is a test file used verify that PHDF5
+ * can open a file with a cache image.
+ *
+ * Cycle of operation
+ *
+ * 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image
+ * FAPL entry.
+ *
+ * Set all cache image flags, forcing full functionality.
+ *
+ * 2) Create a data set in the file.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load
+ * the metadata cache image.
+ *
+ * 5) Create a data set in the file.
+ *
+ * 6) Close the file. If enough datasets have been created
+ * goto 7. Otherwise return to 4.
+ *
+ * 7) Open the file R/O.
+ *
+ * Verify that the file contains a metadata cache image
+ * superblock extension message.
+ *
+ * 8) Verify all data sets.
+ *
+ * Verify that the cache image has been loaded.
+ *
+ * 9) close the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/25/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+construct_test_file(int test_file_index)
+{
+ const char * fcn_name = "construct_test_file()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+ int min_dset = 0;
+ int max_dset = 0;
+ MPI_Comm dummy_comm = MPI_COMM_WORLD;
+ MPI_Info dummy_info = MPI_INFO_NULL;
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ HDassert(FILENAMES[test_file_index]);
+
+ if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
+ filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a HDF5 file with the cache image FAPL entry.
+ *
+ * Verify that the cache is informed of the cache image FAPL entry.
+ *
+ * Set flags forcing full function of the cache image feature.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ TRUE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ dummy_comm,
+ /* info */ dummy_info,
+ /* l_facc_type */ 0,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ 0);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create a data set in the file. */
+
+ if ( pass ) {
+
+ create_data_sets(file_id, min_dset++, max_dset++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ while ( ( pass ) && ( max_dset < MAX_NUM_DSETS ) )
+ {
+
+ /* 4) Open the file.
+ *
+ * Verify that the metadata cache is instructed to load the
+ * metadata cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ dummy_comm,
+ /* info */ dummy_info,
+ /* l_facc_type */ 0,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ 0);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L1 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp, max_dset, pass);
+
+
+ /* 5) Create a data set in the file. */
+
+ if ( pass ) {
+
+ create_data_sets(file_id, min_dset++, max_dset++);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L2 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp + 1, max_dset, pass);
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s:L3 cp = %d, max_dset = %d, pass = %d.\n",
+ fcn_name, cp + 2, max_dset, pass);
+ } /* end while */
+ cp += 3;
+
+
+ /* 7) Open the file R/O.
+ *
+ * Verify that the file contains a metadata cache image
+ * superblock extension message.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ dummy_comm,
+ /* info */ dummy_info,
+ /* l_facc_type */ 0,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ 0);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Open and close all data sets.
+ *
+ * Verify that the cache image has been loaded.
+ */
+
+ if ( pass ) {
+
+ verify_data_sets(file_id, 0, max_dset - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded == 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* 9) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ return !pass;
+
+} /* construct_test_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: create_data_sets()
+ *
+ * Purpose: If pass is TRUE on entry, create the specified data sets
+ * in the indicated file.
+ *
+ * Data sets and their contents must be well know, as we
+ * will verify that they contain the expected data later.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/15/15
+ *
+ * Modifications:
+ *
+ * Added min_dset and max_dset parameters and supporting
+ * code. This allows the caller to specify a range of
+ * datasets to create.
+ * JRM -- 8/20/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+create_data_sets(hid_t file_id, int min_dset, int max_dset)
+{
+ const char * fcn_name = "create_data_sets()";
+ char dset_name[64];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
+ herr_t status;
+ hid_t dataspace_id = -1;
+ hid_t filespace_ids[MAX_NUM_DSETS];
+ hid_t memspace_id = -1;
+ hid_t dataset_ids[MAX_NUM_DSETS];
+ hid_t properties = -1;
+ hsize_t dims[2];
+ hsize_t a_size[2];
+ hsize_t offset[2];
+ hsize_t chunk_size[2];
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ HDassert(0 <= min_dset);
+ HDassert(min_dset <= max_dset);
+ HDassert(max_dset < MAX_NUM_DSETS);
+
+ /* create the datasets */
+
+ if ( pass ) {
+
+ i = min_dset;
+
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ /* create a dataspace for the chunked dataset */
+ dims[0] = DSET_SIZE;
+ dims[1] = DSET_SIZE;
+ dataspace_id = H5Screate_simple(2, dims, NULL);
+
+ if ( dataspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+
+ /* set the dataset creation plist to specify that the raw data is
+ * to be partioned into 10X10 element chunks.
+ */
+
+ if ( pass ) {
+
+ chunk_size[0] = CHUNK_SIZE;
+ chunk_size[1] = CHUNK_SIZE;
+ properties = H5Pcreate(H5P_DATASET_CREATE);
+
+ if ( properties < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate() failed.";
+ }
+ }
+
+ if ( pass ) {
+
+ if ( H5Pset_chunk(properties, 2, chunk_size) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_chunk() failed.";
+ }
+ }
+
+ /* create the dataset */
+ if ( pass ) {
+
+ sprintf(dset_name, "/dset%03d", i);
+ dataset_ids[i] = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE,
+ dataspace_id, H5P_DEFAULT,
+ properties, H5P_DEFAULT);
+
+ if ( dataset_ids[i] < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dcreate() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_ids[i] = H5Dget_space(dataset_ids[i]);
+
+ if ( filespace_ids[i] < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ i++;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* create the mem space to be used to read and write chunks */
+ if ( pass ) {
+
+ dims[0] = CHUNK_SIZE;
+ dims[1] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(2, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /*offset of hyperslab in memory*/
+ offset[1] = 0;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* initialize all datasets on a round robin basis */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ m = min_dset;
+ while ( ( pass ) && ( m <= max_dset ) )
+ {
+ /* initialize the slab */
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ data_chunk[k][l] = (DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l;
+ }
+ }
+
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)i; /*offset of hyperslab in file*/
+ offset[1] = (hsize_t)j;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk H5Sselect_hyperslab() failed.";
+ }
+
+ /* write the chunk to file */
+ status = H5Dwrite(dataset_ids[m], H5T_NATIVE_INT, memspace_id,
+ filespace_ids[m], H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dwrite() failed.";
+ }
+ m++;
+ }
+ j += CHUNK_SIZE;
+ }
+
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* read data from data sets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ m = min_dset;
+ while ( ( pass ) && ( m <= max_dset ) )
+ {
+
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[1] = (hsize_t)j;
+ a_size[0] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
+ memspace_id, filespace_ids[m],
+ H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[k][l],
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l));
+ HDfprintf(stdout,
+ "m = %d, i = %d, j = %d, k = %d, l = %d\n",
+ m, i, j, k, l);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, m);
+ }
+ }
+ }
+ m++;
+ }
+ j += CHUNK_SIZE;
+ }
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* close the file spaces */
+ i = min_dset;
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ if ( H5Sclose(filespace_ids[i]) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose() failed.";
+ }
+ i++;
+ }
+
+
+ /* close the datasets */
+ i = min_dset;
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ if ( H5Dclose(dataset_ids[i]) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose() failed.";
+ }
+ i++;
+ }
+
+ /* close the mem space */
+ if ( pass ) {
+
+ if ( H5Sclose(memspace_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+ }
+
+ return;
+
+} /* create_data_sets() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: delete_data_sets()
+ *
+ * Purpose: If pass is TRUE on entry, verify and then delete the
+ * dataset(s) indicated by min_dset and max_dset in the
+ * indicated file.
+ *
+ * Data sets and their contents must be well know, as we
+ * will verify that they contain the expected data later.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 10/31/16
+ *
+ * Modifications:
+ *
+ * None.
+ * JRM -- 8/20/15
+ *
+ *-------------------------------------------------------------------------
+ */
+#if 0
+/* this code will be needed to test full support of cache image
+ * in parallel -- keep it around against that day.
+ *
+ * -- JRM
+ */
+static void
+delete_data_sets(hid_t file_id, int min_dset, int max_dset)
+{
+ const char * fcn_name = "delete_data_sets()";
+ char dset_name[64];
+ hbool_t show_progress = FALSE;
+ int cp = 0;
+ int i;
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ HDassert(0 <= min_dset);
+ HDassert(min_dset <= max_dset);
+ HDassert(max_dset < MAX_NUM_DSETS);
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* first, verify the contents of the target dataset(s) */
+ verify_data_sets(file_id, min_dset, max_dset);
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* now delete the target datasets */
+ if ( pass ) {
+
+ i = min_dset;
+
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ sprintf(dset_name, "/dset%03d", i);
+
+ if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5Ldelete() failed.";
+ }
+
+ i++;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ return;
+
+} /* delete_data_sets() */
+#endif
+
+
+/*-------------------------------------------------------------------------
+ * Function: open_hdf5_file()
+ *
+ * Purpose: If pass is true on entry, create or open the specified HDF5
+ * and test to see if it has a metadata cache image superblock
+ * extension message.
+ *
+ * Set pass to FALSE and issue a suitable failure
+ * message if either the file contains a metadata cache image
+ * superblock extension and mdci_sbem_expected is TRUE, or
+ * vise versa.
+ *
+ * If mdci_sbem_expected is TRUE, also verify that the metadata
+ * cache has been advised of this.
+ *
+ * If read_only is TRUE, open the file read only. Otherwise
+ * open the file read/write.
+ *
+ * If set_mdci_fapl is TRUE, set the metadata cache image
+ * FAPL entry when opening the file, and verify that the
+ * metadata cache is notified.
+ *
+ * If config_fsm is TRUE, setup the persistant free space
+ * manager. Note that this flag may only be set if
+ * create_file is also TRUE.
+ *
+ * Return pointers to the cache data structure and file data
+ * structures.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/14/15
+ *
+ * Modifications:
+ *
+ * Modified function to handle parallel file creates / opens.
+ *
+ * JRM -- 2/1/17
+ *
+ * Modified function to handle
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+open_hdf5_file(const hbool_t create_file,
+ const hbool_t mdci_sbem_expected,
+ const hbool_t read_only,
+ const hbool_t set_mdci_fapl,
+ const hbool_t config_fsm,
+ const hbool_t enable_page_buffer,
+ const char * hdf_file_name,
+ const unsigned cache_image_flags,
+ hid_t * file_id_ptr,
+ H5F_t ** file_ptr_ptr,
+ H5C_t ** cache_ptr_ptr,
+ MPI_Comm comm,
+ MPI_Info info,
+ int l_facc_type,
+ const hbool_t all_coll_metadata_ops,
+ const hbool_t coll_metadata_write,
+ const int md_write_strat)
+{
+ const char * fcn_name = "open_hdf5_file()";
+ hbool_t show_progress = FALSE;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ hid_t fapl_id = -1;
+ hid_t fcpl_id = -1;
+ hid_t file_id = -1;
+ herr_t result;
+ H5F_t * file_ptr = NULL;
+ H5C_t * cache_ptr = NULL;
+ H5C_cache_image_ctl_t image_ctl;
+ H5AC_cache_image_config_t cache_image_config = {
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION,
+ TRUE,
+ FALSE,
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
+
+ HDassert(!create_file || config_fsm);
+
+ if ( pass )
+ {
+ /* opening the file both read only and with a cache image
+ * requested is a contradiction. We resolve it by ignoring
+ * the cache image request silently.
+ */
+ if ( ( create_file && mdci_sbem_expected ) ||
+ ( create_file && read_only ) ||
+ ( config_fsm && !create_file ) ||
+ ( create_file && enable_page_buffer && ! config_fsm ) ||
+ ( hdf_file_name == NULL ) ||
+ ( ( set_mdci_fapl ) && ( cache_image_flags == 0 ) ) ||
+ ( ( set_mdci_fapl ) &&
+ ( (cache_image_flags & ~H5C_CI__ALL_FLAGS) != 0 ) ) ||
+ ( file_id_ptr == NULL ) ||
+ ( file_ptr_ptr == NULL ) ||
+ ( cache_ptr_ptr == NULL ) ||
+ ( l_facc_type != (l_facc_type & (FACC_MPIO)) ) ) {
+
+ failure_mssg =
+ "Bad param(s) on entry to open_hdf5_file().\n";
+
+ pass = FALSE;
+ } else if ( verbose ) {
+
+ HDfprintf(stdout, "%s: HDF file name = \"%s\".\n",
+ fcn_name, hdf_file_name);
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create a file access propertly list. */
+ if ( pass ) {
+
+ fapl_id = H5Pcreate(H5P_FILE_ACCESS);
+
+ if ( fapl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* call H5Pset_libver_bounds() on the fapl_id */
+ if ( pass ) {
+
+ if ( H5Pset_libver_bounds(fapl_id, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST)
+ < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_libver_bounds() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get metadata cache image config -- verify that it is the default */
+ if ( pass ) {
+
+ result = H5Pget_mdc_image_config(fapl_id, &cache_image_config);
+
+ if ( result < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pget_mdc_image_config() failed.\n";
+ }
+
+ if ( ( cache_image_config.version !=
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
+ ( cache_image_config.generate_image != FALSE ) ||
+ ( cache_image_config.save_resize_status != FALSE ) ||
+ ( cache_image_config.entry_ageout !=
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected default cache image config.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* set metadata cache image fapl entry if indicated */
+ if ( ( pass ) && ( set_mdci_fapl ) ) {
+
+ /* set cache image config fields to taste */
+ cache_image_config.generate_image = TRUE;
+ cache_image_config.save_resize_status = FALSE;
+ cache_image_config.entry_ageout = H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE;
+
+ result = H5Pset_mdc_image_config(fapl_id, &cache_image_config);
+
+ if ( result < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_mdc_image_config() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the persistant free space manager if indicated */
+ if ( ( pass ) && ( config_fsm ) ) {
+
+ fcpl_id = H5Pcreate(H5P_FILE_CREATE);
+
+ if ( fcpl_id <= 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_FILE_CREATE) failed.";
+ }
+ }
+
+ if ( ( pass ) && ( config_fsm ) ) {
+
+ if ( H5Pset_file_space_strategy(fcpl_id, H5F_FSPACE_STRATEGY_PAGE,
+ TRUE, (hsize_t)1) == FAIL ) {
+ pass = FALSE;
+ failure_mssg = "H5Pset_file_space_strategy() failed.\n";
+ }
+ }
+
+ if ( ( pass ) && ( config_fsm ) ) {
+
+ if ( H5Pset_file_space_page_size(fcpl_id, PAGE_SIZE) == FAIL ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_file_space_page_size() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the page buffer if indicated */
+ if ( ( pass ) && ( enable_page_buffer ) ) {
+
+ if ( H5Pset_page_buffer_size(fapl_id, PB_SIZE, 0, 0) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_page_buffer_size() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
+
+ /* set Parallel access with communicator */
+ if ( H5Pset_fapl_mpio(fapl_id, comm, info) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_fapl_mpio() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
+
+ if (H5Pset_all_coll_metadata_ops(fapl_id, all_coll_metadata_ops) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_all_coll_metadata_ops() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
+
+ if ( H5Pset_coll_metadata_write(fapl_id, coll_metadata_write) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_coll_metadata_write() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( ( pass ) && ( l_facc_type == FACC_MPIO ) ) {
+
+ /* set the desired parallel metadata write strategy */
+ H5AC_cache_config_t mdc_config;
+
+ mdc_config.version = H5C__CURR_AUTO_SIZE_CTL_VER;
+
+ if ( H5Pget_mdc_config(fapl_id, &mdc_config) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pget_mdc_config() failed.\n";
+ }
+
+ mdc_config.metadata_write_strategy = md_write_strat;
+
+ if ( H5Pset_mdc_config(fapl_id, &mdc_config) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_mdc_config() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* open the file */
+ if ( pass ) {
+
+ if ( create_file ) {
+
+ if ( fcpl_id != -1 )
+
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
+ fcpl_id, fapl_id);
+ else
+
+ file_id = H5Fcreate(hdf_file_name, H5F_ACC_TRUNC,
+ H5P_DEFAULT, fapl_id);
+
+ } else {
+
+ if ( read_only )
+
+ file_id = H5Fopen(hdf_file_name, H5F_ACC_RDONLY, fapl_id);
+
+ else
+
+ file_id = H5Fopen(hdf_file_name, H5F_ACC_RDWR, fapl_id);
+ }
+
+ if ( file_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fcreate() or H5Fopen() failed.\n";
+
+ } else {
+
+ file_ptr = (struct H5F_t *)H5I_object_verify(file_id, H5I_FILE);
+
+ if ( file_ptr == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "Can't get file_ptr.";
+
+ if ( verbose ) {
+ HDfprintf(stdout, "%s: Can't get file_ptr.\n", fcn_name);
+ }
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get a pointer to the files internal data structure and then
+ * to the cache structure
+ */
+ if ( pass ) {
+
+ if ( file_ptr->shared->cache == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "can't get cache pointer(1).\n";
+
+ } else {
+
+ cache_ptr = file_ptr->shared->cache;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* verify expected page buffer status. At present, page buffering
+ * must be disabled in parallel -- hopefully this will change in the
+ * future.
+ */
+ if ( pass ) {
+
+ if ( ( file_ptr->shared->page_buf ) &&
+ ( ( ! enable_page_buffer ) || ( l_facc_type == FACC_MPIO ) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "page buffer unexepectedly enabled.";
+
+ } else if ( ( file_ptr->shared->page_buf != NULL ) &&
+ ( ( enable_page_buffer ) || ( l_facc_type != FACC_MPIO ) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "page buffer unexepectedly disabled.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* verify expected metadata cache status */
+
+ /* get the cache image control structure from the cache, and verify
+ * that it contains the expected values.
+ *
+ * Then set the flags in this structure to the specified value.
+ */
+ if ( pass ) {
+
+ if ( H5C_get_cache_image_config(cache_ptr, &image_ctl) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "error returned by H5C_get_cache_image_config().";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ if ( pass ) {
+
+ if ( set_mdci_fapl ) {
+
+ if ( read_only ) {
+
+ if ( ( image_ctl.version !=
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
+ ( image_ctl.generate_image != FALSE ) ||
+ ( image_ctl.save_resize_status != FALSE ) ||
+ ( image_ctl.entry_ageout !=
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
+ ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected image_ctl values(1).\n";
+ }
+ } else {
+
+ if ( ( image_ctl.version !=
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
+ ( image_ctl.generate_image != TRUE ) ||
+ ( image_ctl.save_resize_status != FALSE ) ||
+ ( image_ctl.entry_ageout !=
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
+ ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected image_ctl values(2).\n";
+ }
+ }
+ } else {
+
+ if ( ( image_ctl.version !=
+ H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION ) ||
+ ( image_ctl.generate_image != FALSE ) ||
+ ( image_ctl.save_resize_status != FALSE ) ||
+ ( image_ctl.entry_ageout !=
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE ) ||
+ ( image_ctl.flags != H5C_CI__ALL_FLAGS ) ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected image_ctl values(3).\n";
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( ( pass ) && ( set_mdci_fapl ) ) {
+
+ image_ctl.flags = cache_image_flags;
+
+ if ( H5C_set_cache_image_config(file_ptr, cache_ptr, &image_ctl) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "error returned by H5C_set_cache_image_config().";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( cache_ptr->close_warning_received == TRUE ) {
+
+ pass = FALSE;
+ failure_mssg = "Unexpected value of close_warning_received.\n";
+ }
+
+ if ( mdci_sbem_expected ) {
+
+ if ( read_only ) {
+
+ if ( ( cache_ptr->load_image != TRUE ) ||
+ ( cache_ptr->delete_image != FALSE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message not present?\n";
+ }
+ } else {
+
+ if ( ( cache_ptr->load_image != TRUE ) ||
+ ( cache_ptr->delete_image != TRUE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message not present?\n";
+ }
+ }
+ } else {
+
+ if ( ( cache_ptr->load_image == TRUE ) ||
+ ( cache_ptr->delete_image == TRUE ) ) {
+
+ pass = FALSE;
+ failure_mssg = "mdci sb extension message present?\n";
+ }
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ *file_id_ptr = file_id;
+ *file_ptr_ptr = file_ptr;
+ *cache_ptr_ptr = cache_ptr;
+ }
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: cp = %d, pass = %d -- exiting.\n",
+ fcn_name, cp++, pass);
+
+ if ( ! pass )
+ HDfprintf(stdout, "%s: failure_mssg = %s\n",
+ fcn_name, failure_mssg);
+ }
+
+ return;
+
+} /* open_hdf5_file() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: par_create_dataset()
+ *
+ * Purpose: Collectively create a chunked dataset, and fill it with
+ * known values.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/4/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+par_create_dataset(int dset_num,
+ hid_t file_id,
+ int mpi_rank,
+ int mpi_size)
+{
+ const char * fcn_name = "par_create_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hsize_t chunk_size[3];
+ hid_t status;
+ hid_t dataspace_id = -1;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
+ hid_t dcpl_id = -1;
+ hid_t dxpl_id = -1;
+
+ show_progress = (show_progress && (mpi_rank == 0));
+ verbose = (verbose && (mpi_rank == 0));
+
+ sprintf(dset_name, "/dset%03d", dset_num);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ }
+
+ if ( pass ) {
+
+ /* create a dataspace for the chunked dataset */
+ dims[0] = (hsize_t)mpi_size;
+ dims[1] = DSET_SIZE;
+ dims[2] = DSET_SIZE;
+ dataspace_id = H5Screate_simple(3, dims, NULL);
+
+ if ( dataspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* set the dataset creation plist to specify that the raw data is
+ * to be partioned into 1X10X10 element chunks.
+ */
+
+ if ( pass ) {
+
+ dcpl_id = H5Pcreate(H5P_DATASET_CREATE);
+
+ if ( dcpl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_CREATE) failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ chunk_size[0] = 1;
+ chunk_size[1] = CHUNK_SIZE;
+ chunk_size[2] = CHUNK_SIZE;
+
+ if ( H5Pset_chunk(dcpl_id, 3, chunk_size) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_chunk() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create the dataset */
+ if ( pass ) {
+
+ dset_id = H5Dcreate2(file_id, dset_name, H5T_STD_I32BE,
+ dataspace_id, H5P_DEFAULT,
+ dcpl_id, H5P_DEFAULT);
+
+ if ( dset_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dcreate() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_id = H5Dget_space(dset_id);
+
+ if ( filespace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create the mem space to be used to read and write chunks */
+ if ( pass ) {
+
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(3, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /* offset of hyperslab in memory */
+ offset[1] = 0;
+ offset[2] = 0;
+ a_size[0] = 1; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ a_size[2] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the DXPL for collective I/O */
+ if ( pass ) {
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ if ( dxpl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* initialize the dataset with collective writes */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.0, pass = %d.\n",
+ fcn_name, cp, pass);
+
+ /* initialize the slab */
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ data_chunk[0][k][l] = (DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l +
+ dset_num;
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.1, pass = %d.\n",
+ fcn_name, cp, pass);
+
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)mpi_rank; /* offset of hyperslab in file */
+ offset[1] = (hsize_t)i;
+ offset[2] = (hsize_t)j;
+ a_size[0] = (hsize_t)1; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ a_size[2] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk H5Sselect_hyperslab() failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.2, pass = %d.\n",
+ fcn_name, cp, pass);
+
+ /* write the chunk to file */
+ status = H5Dwrite(dset_id, H5T_NATIVE_INT, memspace_id,
+ filespace_id, dxpl_id, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dwrite() failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d.3, pass = %d.\n",
+ fcn_name, cp, pass);
+
+ j += CHUNK_SIZE;
+ }
+
+ i += CHUNK_SIZE;
+ }
+
+ cp++;
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* read data from data sets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)mpi_rank;
+ offset[1] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[2] = (hsize_t)j;
+ a_size[0] = (hsize_t)1;
+ a_size[1] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[2] = CHUNK_SIZE;
+
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dset_id, H5T_NATIVE_INT,
+ memspace_id, filespace_id,
+ dxpl_id, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[0][k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l + dset_num) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[0][k][l],
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l + dset_num));
+ HDfprintf(stdout,
+ "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n",
+ dset_num, i, j, k, l);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, dset_num);
+ }
+ }
+ }
+ j += CHUNK_SIZE;
+ }
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* close the data space */
+ if ( ( pass ) && ( H5Sclose(dataspace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(dataspace_id) failed.";
+ }
+
+ /* close the file space */
+ if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(filespace_id) failed.";
+ }
+
+ /* close the dataset */
+ if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose(dset_id) failed.";
+ }
+
+ /* close the mem space */
+ if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+
+ /* close the dataset creation property list */
+ if ( ( pass ) && ( H5Pclose(dcpl_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pclose(dcpl) failed.";
+ }
+
+ /* close the data access property list */
+ if ( ( pass ) && ( H5Pclose(dxpl_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pclose(dxpl) failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* par_create_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: par_delete_dataset()
+ *
+ * Purpose: Collectively delete the specified dataset.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/6/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+par_delete_dataset(int dset_num,
+ hid_t file_id,
+ int mpi_rank)
+{
+ const char * fcn_name = "par_delete_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ int cp = 0;
+
+ show_progress = (show_progress && (mpi_rank == 0));
+
+ sprintf(dset_name, "/dset%03d", dset_num);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ }
+
+ /* verify the target dataset */
+ if ( pass ) {
+
+ par_verify_dataset(dset_num, file_id, mpi_rank);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* delete the target dataset */
+ if ( pass ) {
+
+ if ( H5Ldelete(file_id, dset_name, H5P_DEFAULT) < 0) {
+
+ pass = FALSE;
+ failure_mssg = "H5Ldelete() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* par_delete_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: par_insert_cache_image()
+ *
+ * Purpose: Insert a cache image in the supplied file.
+ *
+ * At present, cache image is not enabled in the parallel
+ * so we have to insert the cache image with a serial
+ * process. Do this via a fork and an execv from process 0.
+ * All processes wait until the child process completes, and
+ * then return.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/8/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+par_insert_cache_image(int file_name_idx, int mpi_rank, int mpi_size )
+{
+ hbool_t show_progress = FALSE;
+
+ if ( pass ) {
+
+ if ( mpi_rank == 0 ) { /* insert cache image in supplied test file */
+
+ char file_name_idx_str[32];
+ char mpi_size_str[32];
+ int child_status;
+ pid_t child_pid;
+
+ sprintf(file_name_idx_str, "%d", file_name_idx);
+ sprintf(mpi_size_str, "%d", mpi_size);
+
+ child_pid = fork();
+
+ if ( child_pid == 0 ) { /* this is the child process */
+
+ /* fun and games to shutup the compiler */
+ char param0[32] = "t_cache_image";
+ char param1[32] = "ici";
+ char * child_argv[] = {param0,
+ param1,
+ file_name_idx_str,
+ mpi_size_str,
+ NULL};
+
+ /* we may need to play with the path here */
+ if ( execv("t_cache_image", child_argv) == -1 ) {
+
+ HDfprintf(stdout,
+ "execl() of ici process failed. errno = %d(%s)\n",
+ errno, strerror(errno));
+ exit(1);
+ }
+
+ } else if ( child_pid != -1 ) {
+ /* this is the parent process -- wait until child is done */
+ if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
+
+ HDfprintf(stdout, "can't wait on ici process.\n");
+ pass = FALSE;
+
+ } else if ( ! WIFEXITED(child_status) ) {
+
+ HDfprintf(stdout, "ici process hasn't exitied.\n");
+ pass = FALSE;
+
+ } else if ( WEXITSTATUS(child_status) != 0 ) {
+
+ HDfprintf(stdout, "ici process reports failure.\n");
+ pass = FALSE;
+
+ } else if ( show_progress ) {
+
+ HDfprintf(stdout, "cache image insertion complete.\n");
+ }
+ } else { /* fork failed */
+
+ HDfprintf(stdout,
+ "can't create process to insert cache image.\n");
+ pass = FALSE;
+ }
+ }
+ }
+
+ if ( pass ) {
+
+ /* make sure insertion of the cache image is complete
+ * before proceeding
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+ }
+
+ return;
+
+} /* par_insert_cache_image() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: par_verify_dataset()
+ *
+ * Purpose: Collectively verify the contents of a chunked dataset.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/6/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+par_verify_dataset(int dset_num,
+ hid_t file_id,
+ int mpi_rank)
+{
+ const char * fcn_name = "par_verify_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hid_t status;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
+ hid_t dxpl_id = -1;
+
+ show_progress = (show_progress && (mpi_rank == 0));
+ verbose = (verbose && (mpi_rank == 0));
+
+ sprintf(dset_name, "/dset%03d", dset_num);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ }
+
+ if ( pass ) {
+
+ /* open the dataset */
+
+ dset_id = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
+
+ if ( dset_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_id = H5Dget_space(dset_id);
+
+ if ( filespace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create the mem space to be used to read */
+ if ( pass ) {
+
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(3, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /* offset of hyperslab in memory */
+ offset[1] = 0;
+ offset[2] = 0;
+ a_size[0] = 1; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ a_size[2] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the DXPL for collective I/O */
+ if ( pass ) {
+
+ dxpl_id = H5Pcreate(H5P_DATASET_XFER);
+
+ if ( dxpl_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pcreate(H5P_DATASET_XFER) failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ if ( pass ) {
+
+ if ( H5Pset_dxpl_mpio(dxpl_id, H5FD_MPIO_COLLECTIVE) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pset_dxpl_mpio() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* read data from data sets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)mpi_rank;
+ offset[1] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[2] = (hsize_t)j;
+ a_size[0] = (hsize_t)1;
+ a_size[1] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[2] = CHUNK_SIZE;
+
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dset_id, H5T_NATIVE_INT,
+ memspace_id, filespace_id,
+ dxpl_id, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[0][k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l + dset_num) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[0][k][l],
+ ((DSET_SIZE * DSET_SIZE * mpi_rank) +
+ (DSET_SIZE * (i + k)) + j + l + dset_num));
+ HDfprintf(stdout,
+ "dset_num = %d, i = %d, j = %d, k = %d, l = %d\n",
+ dset_num, i, j, k, l);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, dset_num);
+ }
+ }
+ }
+ j += CHUNK_SIZE;
+ }
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* close the file space */
+ if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(filespace_id) failed.";
+ }
+
+ /* close the dataset */
+ if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose(dset_id) failed.";
+ }
+
+ /* close the mem space */
+ if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+
+ /* close the data access property list */
+ if ( ( pass ) && ( H5Pclose(dxpl_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Pclose(dxpl) failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* par_verify_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: serial_insert_cache_image()
+ *
+ * Purpose: Insert a cache image in the supplied file.
+ *
+ * To populate the cache image, validate the contents
+ * of the file before closing.
+ *
+ * On failure, print an appropriate error message and
+ * return FALSE.
+ *
+ * Return: TRUE if succussful, FALSE otherwise.
+ *
+ * Programmer: John Mainzer
+ * 3/8/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static hbool_t
+serial_insert_cache_image(int file_name_idx, int mpi_size )
+{
+ const char * fcn_name = "serial_insert_cache_image()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ int cp = 0;
+ int i;
+ int num_dsets = PAR_NUM_DSETS;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ MPI_Comm dummy_comm = MPI_COMM_WORLD;
+ MPI_Info dummy_info = MPI_INFO_NULL;
+
+ pass = TRUE;
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) setup the file name */
+ if ( pass ) {
+
+ HDassert(FILENAMES[file_name_idx]);
+
+ if ( h5_fixname(FILENAMES[file_name_idx], H5P_DEFAULT,
+ filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ HDfprintf(stdout, "h5_fixname() failed.\n");
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Open the PHDF5 file with the cache image FAPL entry.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ TRUE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ dummy_comm,
+ /* info */ dummy_info,
+ /* l_facc_type */ 0,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ 1);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Validate contents of the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ serial_verify_dataset(i, file_id, mpi_size);
+ i++;
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Close the file */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return pass;
+
+} /* serial_insert_cache_image() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: serial_verify_dataset()
+ *
+ * Purpose: Verify the contents of a chunked dataset.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/6/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+serial_verify_dataset(int dset_num,
+ hid_t file_id,
+ int mpi_size)
+{
+ const char * fcn_name = "serial_verify_dataset()";
+ char dset_name[256];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[1][CHUNK_SIZE][CHUNK_SIZE];
+ hsize_t dims[3];
+ hsize_t a_size[3];
+ hsize_t offset[3];
+ hid_t status;
+ hid_t memspace_id = -1;
+ hid_t dset_id = -1;
+ hid_t filespace_id = -1;
+
+ sprintf(dset_name, "/dset%03d", dset_num);
+
+ if ( show_progress ) {
+ HDfprintf(stdout, "%s: dset name = \"%s\".\n", fcn_name, dset_name);
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+ }
+
+ if ( pass ) {
+
+ /* open the dataset */
+
+ dset_id = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
+
+ if ( dset_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_id = H5Dget_space(dset_id);
+
+ if ( filespace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* create the mem space to be used to read */
+ if ( pass ) {
+
+ dims[0] = 1;
+ dims[1] = CHUNK_SIZE;
+ dims[2] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(3, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /* offset of hyperslab in memory */
+ offset[1] = 0;
+ offset[2] = 0;
+ a_size[0] = 1; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ a_size[2] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* read data from data sets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < mpi_size ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ k = 0;
+ while ( ( pass ) && ( k < DSET_SIZE ) )
+ {
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[1] = (hsize_t)j; /* offset of hyperslab in file */
+ offset[2] = (hsize_t)k;
+ a_size[0] = (hsize_t)1;
+ a_size[1] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[2] = CHUNK_SIZE;
+
+ status = H5Sselect_hyperslab(filespace_id, H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dset_id, H5T_NATIVE_INT,
+ memspace_id, filespace_id,
+ H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "chunk read failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ for ( m = 0; m < CHUNK_SIZE; m++ )
+ {
+ if ( data_chunk[0][l][m]
+ !=
+ ((DSET_SIZE * DSET_SIZE * i) +
+ (DSET_SIZE * (j + l)) + k + m + dset_num) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ j, k, data_chunk[0][j][k],
+ ((DSET_SIZE * DSET_SIZE * i) +
+ (DSET_SIZE * (j + l)) + k + m + dset_num));
+ HDfprintf(stdout,
+ "dset_num = %d, i = %d, j = %d, k = %d, l = %d, m = %d\n",
+ dset_num, i, j, k, l, m);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ j, k, dset_num);
+ }
+ }
+ }
+ k += CHUNK_SIZE;
+ }
+ j += CHUNK_SIZE;
+ }
+ i++;
+ }
+
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* close the file space */
+ if ( ( pass ) && ( H5Sclose(filespace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(filespace_id) failed.";
+ }
+
+ /* close the dataset */
+ if ( ( pass ) && ( H5Dclose(dset_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose(dset_id) failed.";
+ }
+
+ /* close the mem space */
+ if ( ( pass ) && ( H5Sclose(memspace_id) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ return;
+
+} /* serial_verify_dataset() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: parse_flags
+ *
+ * Purpose: Parse the flags passed to this program, and load the
+ * values into the supplied field.
+ *
+ * Return: Success: 1
+ * Failure: 0
+ *
+ * Programmer: J Mainzer
+ * 4/28/11
+ *
+ *-------------------------------------------------------------------------
+ */
+static hbool_t
+parse_flags(int argc, char * argv[], hbool_t * setup_ptr,
+ hbool_t * ici_ptr, int * file_idx_ptr, int * mpi_size_ptr, hbool_t display)
+{
+ const char * fcn_name = "parse_flags()";
+ const char * (ops[]) = {"setup", "ici"};
+ int success = TRUE;
+
+ HDassert(setup_ptr);
+ HDassert(*setup_ptr == FALSE);
+ HDassert(ici_ptr);
+ HDassert(*ici_ptr == FALSE);
+ HDassert(file_idx_ptr);
+ HDassert(mpi_size_ptr);
+
+ if ( setup_ptr == NULL ) {
+
+ success = FALSE;
+ HDfprintf(stdout, "%s: bad arg(s) on entry.\n", fcn_name);
+ }
+
+
+ if ( ( success ) &&
+ ( ( argc != 1 ) && ( argc != 2 ) && ( argc != 4 ) ) ) {
+
+ success = FALSE;
+ usage();
+ }
+
+
+ if ( ( success ) && ( argc >= 2 ) ) {
+
+ if ( strcmp(argv[1], ops[0]) == 0 ) {
+
+ if ( argc != 2 ) {
+
+ success = FALSE;
+ usage();
+
+ } else {
+
+ *setup_ptr = TRUE;
+
+ }
+ } else if ( strcmp(argv[1], ops[1]) == 0 ) {
+
+ if ( argc != 4 ) {
+
+ success = FALSE;
+ usage();
+
+ } else {
+
+ *ici_ptr = TRUE;
+ *file_idx_ptr = atoi(argv[2]);
+ *mpi_size_ptr = atoi(argv[3]);
+
+ }
+ }
+ }
+
+ if ( ( success ) && ( display ) ) {
+
+ if ( *setup_ptr )
+
+ HDfprintf(stdout, "t_cache_image setup\n");
+
+ else if ( *ici_ptr )
+
+ HDfprintf(stdout, "t_cache_image ici %d %d\n",
+ *file_idx_ptr, *mpi_size_ptr);
+
+ else
+
+ HDfprintf(stdout, "t_cache_image\n");
+ }
+
+ return(success);
+
+} /* parse_flags() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: usage
+ *
+ * Purpose: Display a brief message describing the purpose and use
+ * of the program.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 4/28/11
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+void
+usage(void)
+{
+ const char * s[] =
+ {
+ "\n",
+ "t_cache_image:\n",
+ "\n",
+ "Run the parallel cache image tests. \n"
+ "\n"
+ "In general, this program is run via MPI. However, at present, files\n"
+ "with cache images can only be constructed by serial processes.\n",
+ "\n",
+ "To square this circle, one process in the parallel computation \n"
+ "forks a serial version of the test program to handle this detail.\n",
+ "The \"setup\" parameter indicates that t_cache_image is being \n",
+ "invokde for this purpose.\n",
+ "\n",
+ "Similarly, only a serial process can add a cache image to an\n",
+ "existing file.\n",
+ "\n",
+ "Here again, one process forks a serial version of the test program\n",
+ "with the \"ici\" parameter.\n"
+ "\n",
+ "usage: t_cache_image [setup|ici m n]\n",
+ "\n",
+ "where:\n",
+ "\n",
+ " setup parameter forces creation of test file\n",
+ "\n",
+ " ici parameter forces insertion of a cache image into the \n",
+ " m th test file, created by a parallel computation with .\n",
+ " n processes\n",
+ "\n",
+ "Returns 0 on success, 1 on failure.\n",
+ "\n",
+ NULL,
+ };
+ int i = 0;
+
+ while(s[i] != NULL) {
+ fprintf(stdout, "%s", s[i]);
+ i++;
+ }
+
+ return;
+} /* usage() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_data_sets()
+ *
+ * Purpose: If pass is TRUE on entry, verify that the data sets in the
+ * file exist and contain the expected data.
+ *
+ * Note that these data sets were created by
+ * create_data_sets() above. Thus any changes in that
+ * function must be reflected in this function, and
+ * vise-versa.
+ *
+ * On failure, set pass to FALSE, and set failure_mssg
+ * to point to an appropriate failure message.
+ *
+ * Do nothing if pass is FALSE on entry.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 7/15/15
+ *
+ * Modifications:
+ *
+ * Added min_dset and max_dset parameters and supporting
+ * code. This allows the caller to specify a range of
+ * datasets to verify.
+ * JRM -- 8/20/15
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static void
+verify_data_sets(hid_t file_id, int min_dset, int max_dset)
+{
+ const char * fcn_name = "verify_data_sets()";
+ char dset_name[64];
+ hbool_t show_progress = FALSE;
+ hbool_t valid_chunk;
+ hbool_t verbose = FALSE;
+ int cp = 0;
+ int i, j, k, l, m;
+ int data_chunk[CHUNK_SIZE][CHUNK_SIZE];
+ herr_t status;
+ hid_t filespace_ids[MAX_NUM_DSETS];
+ hid_t memspace_id = -1;
+ hid_t dataset_ids[MAX_NUM_DSETS];
+ hsize_t dims[2];
+ hsize_t a_size[2];
+ hsize_t offset[2];
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ HDassert(0 <= min_dset);
+ HDassert(min_dset <= max_dset);
+ HDassert(max_dset < MAX_NUM_DSETS);
+
+ /* open the datasets */
+
+ if ( pass ) {
+
+ i = min_dset;
+
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ /* open the dataset */
+ if ( pass ) {
+
+ sprintf(dset_name, "/dset%03d", i);
+ dataset_ids[i] = H5Dopen2(file_id, dset_name, H5P_DEFAULT);
+
+ if ( dataset_ids[i] < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dopen2() failed.";
+ }
+ }
+
+ /* get the file space ID */
+ if ( pass ) {
+
+ filespace_ids[i] = H5Dget_space(dataset_ids[i]);
+
+ if ( filespace_ids[i] < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dget_space() failed.";
+ }
+ }
+
+ i++;
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* create the mem space to be used to read and write chunks */
+ if ( pass ) {
+
+ dims[0] = CHUNK_SIZE;
+ dims[1] = CHUNK_SIZE;
+ memspace_id = H5Screate_simple(2, dims, NULL);
+
+ if ( memspace_id < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Screate_simple() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* select in memory hyperslab */
+ if ( pass ) {
+
+ offset[0] = 0; /*offset of hyperslab in memory*/
+ offset[1] = 0;
+ a_size[0] = CHUNK_SIZE; /*size of hyperslab*/
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(memspace_id, H5S_SELECT_SET, offset, NULL,
+ a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sselect_hyperslab() failed.";
+ }
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+
+ /* read data from data sets and validate it */
+ i = 0;
+ while ( ( pass ) && ( i < DSET_SIZE ) )
+ {
+ j = 0;
+ while ( ( pass ) && ( j < DSET_SIZE ) )
+ {
+ m = min_dset;
+ while ( ( pass ) && ( m <= max_dset ) )
+ {
+
+ /* select on disk hyperslab */
+ offset[0] = (hsize_t)i; /* offset of hyperslab in file */
+ offset[1] = (hsize_t)j;
+ a_size[0] = CHUNK_SIZE; /* size of hyperslab */
+ a_size[1] = CHUNK_SIZE;
+ status = H5Sselect_hyperslab(filespace_ids[m], H5S_SELECT_SET,
+ offset, NULL, a_size, NULL);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+
+ /* read the chunk from file */
+ if ( pass ) {
+
+ status = H5Dread(dataset_ids[m], H5T_NATIVE_INT,
+ memspace_id, filespace_ids[m],
+ H5P_DEFAULT, data_chunk);
+
+ if ( status < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "disk hyperslab create failed.";
+ }
+ }
+
+ /* validate the slab */
+ if ( pass ) {
+
+ valid_chunk = TRUE;
+ for ( k = 0; k < CHUNK_SIZE; k++ )
+ {
+ for ( l = 0; l < CHUNK_SIZE; l++ )
+ {
+ if ( data_chunk[k][l]
+ !=
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l) ) {
+
+ valid_chunk = FALSE;
+
+ if ( verbose ) {
+
+ HDfprintf(stdout,
+ "data_chunk[%0d][%0d] = %0d, expect %0d.\n",
+ k, l, data_chunk[k][l],
+ ((DSET_SIZE * DSET_SIZE * m) +
+ (DSET_SIZE * (i + k)) + j + l));
+ HDfprintf(stdout,
+ "m = %d, i = %d, j = %d, k = %d, l = %d\n",
+ m, i, j, k, l);
+ }
+ }
+ }
+ }
+
+ if ( ! valid_chunk ) {
+
+ pass = FALSE;
+ failure_mssg = "slab validation failed.";
+
+ if ( verbose ) {
+
+ fprintf(stdout,
+ "Chunk (%0d, %0d) in /dset%03d is invalid.\n",
+ i, j, m);
+ }
+ }
+ }
+ m++;
+ }
+ j += CHUNK_SIZE;
+ }
+ i += CHUNK_SIZE;
+ }
+
+ if ( show_progress ) HDfprintf(stdout, "%s: cp = %d.\n", fcn_name, cp++);
+
+ /* close the file spaces */
+ i = min_dset;
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ if ( H5Sclose(filespace_ids[i]) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose() failed.";
+ }
+ i++;
+ }
+
+
+ /* close the datasets */
+ i = min_dset;
+ while ( ( pass ) && ( i <= max_dset ) )
+ {
+ if ( H5Dclose(dataset_ids[i]) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Dclose() failed.";
+ }
+ i++;
+ }
+
+ /* close the mem space */
+ if ( pass ) {
+
+ if ( H5Sclose(memspace_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Sclose(memspace_id) failed.";
+ }
+ }
+
+ return;
+
+} /* verify_data_sets() */
+
+
+/****************************************************************************/
+/******************************* Test Functions *****************************/
+/****************************************************************************/
+
+/*-------------------------------------------------------------------------
+ * Function: verify_cache_image_RO()
+ *
+ * Purpose: Verify that a HDF5 file containing a cache image is
+ * opened R/O and read correctly by PHDF5 with the specified
+ * metadata write strategy.
+ *
+ * Basic cycle of operation is as follows:
+ *
+ * 1) Open the test file created at the beginning of this
+ * test read only.
+ *
+ * Verify that the file contains a cache image.
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ *
+ * 2) Verify that the file contains the expected data.
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file R/O, and verify that it still contains
+ * a cache image.
+ *
+ * 5) Verify that the file contains the expected data.
+ *
+ * 6) Close the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 3/11/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+verify_cache_image_RO(int file_name_id, int md_write_strat, int mpi_rank)
+{
+ const char * fcn_name = "verify_cache_image_RO()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ pass = TRUE;
+
+ if ( mpi_rank == 0 ) {
+
+ switch(md_write_strat) {
+
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ TESTING("parallel CI load test -- proc0 md write -- R/O");
+ break;
+
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ TESTING("parallel CI load test -- dist md write -- R/O");
+ break;
+
+ default:
+ TESTING("parallel CI load test -- unknown md write -- R/o");
+ pass = FALSE;
+ break;
+ }
+ }
+
+ show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
+ filename, sizeof(filename)) == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Open the test file created at the beginning of this test.
+ *
+ * Verify that the file contains a cache image.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ MPI_COMM_WORLD,
+ /* info */ MPI_INFO_NULL,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ md_write_strat);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Verify that the file contains the expected data.
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ */
+
+ if ( pass ) {
+
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ }
+
+ /* Verify that only process 0 reads the cache image. */
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( ( ( mpi_rank == 0 ) && ( cache_ptr->images_read != 1 ) ) ||
+ ( ( mpi_rank > 0 ) && ( cache_ptr->images_read != 0 ) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected images_read.";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
+ * all processes.
+ */
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "Image not loaded?.";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file, and verify that it doesn't contain a cache image. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ MPI_COMM_WORLD,
+ /* info */ MPI_INFO_NULL,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ md_write_strat);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Verify that the file contains the expected data. */
+
+ if ( pass ) {
+
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block not loaded(2).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* report results */
+ if ( mpi_rank == 0 ) {
+
+ if ( pass ) {
+
+ PASSED();
+
+ } else {
+
+ H5_FAILED();
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
+ }
+ }
+
+
+ return !pass;
+
+} /* verify_cache_image_RO() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: verify_cache_image_RW()
+ *
+ * Purpose: Verify that a HDF5 file containing a cache image is
+ * opened and read correctly by PHDF5 with the specified
+ * metadata write strategy.
+ *
+ * Basic cycle of operation is as follows:
+ *
+ * 1) Open the test file created at the beginning of this
+ * test.
+ *
+ * Verify that the file contains a cache image.
+ *
+ * 2) Verify that the file contains the expected data.
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ *
+ *
+ * 3) Close the file.
+ *
+ * 4) Open the file, and verify that it doesn't contain
+ * a cache image.
+ *
+ * 5) Verify that the file contains the expected data.
+ *
+ * 6) Close the file.
+ *
+ * 7) Delete the file.
+ *
+ * Return: void
+ *
+ * Programmer: John Mainzer
+ * 1/25/17
+ *
+ * Modifications:
+ *
+ * None.
+ *
+ *-------------------------------------------------------------------------
+ */
+
+static unsigned
+verify_cache_image_RW(int file_name_id, int md_write_strat, int mpi_rank)
+{
+ const char * fcn_name = "verify_cache_imageRW()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+
+ pass = TRUE;
+
+ if ( mpi_rank == 0 ) {
+
+ switch(md_write_strat) {
+
+ case H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY:
+ TESTING("parallel CI load test -- proc0 md write -- R/W");
+ break;
+
+ case H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED:
+ TESTING("parallel CI load test -- dist md write -- R/W");
+ break;
+
+ default:
+ TESTING("parallel CI load test -- unknown md write -- R/W");
+ pass = FALSE;
+ break;
+ }
+ }
+
+ show_progress = ( ( show_progress ) && ( mpi_rank == 0 ) );
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* setup the file name */
+ if ( pass ) {
+
+ if ( h5_fixname(FILENAMES[file_name_id], H5P_DEFAULT,
+ filename, sizeof(filename)) == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Open the test file created at the beginning of this test.
+ *
+ * Verify that the file contains a cache image.
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ MPI_COMM_WORLD,
+ /* info */ MPI_INFO_NULL,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ md_write_strat);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Verify that the file contains the expected data.
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ */
+ if ( pass ) {
+
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ }
+
+ /* Verify that only process 0 reads the cache image. */
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( ( ( mpi_rank == 0 ) && ( cache_ptr->images_read != 1 ) ) ||
+ ( ( mpi_rank > 0 ) && ( cache_ptr->images_read != 0 ) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected images_read.";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
+ * all processes.
+ */
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "Image not loaded?.";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 4) Open the file, and verify that it doesn't contain a cache image. */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ MPI_COMM_WORLD,
+ /* info */ MPI_INFO_NULL,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ FALSE,
+ /* md_write_strat */ md_write_strat);
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5) Verify that the file contains the expected data. */
+
+ if ( pass ) {
+
+ verify_data_sets(file_id, 0, MAX_NUM_DSETS - 1);
+ }
+
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "metadata cache image block loaded(1).";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+
+ /* 6) Close the file. */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Delete the file. */
+
+ if ( pass ) {
+
+ /* wait for everyone to close the file */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if ( ( mpi_rank == 0 ) && ( HDremove(filename) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+
+ /* report results */
+ if ( mpi_rank == 0 ) {
+
+ if ( pass ) {
+
+ PASSED();
+
+ } else {
+
+ H5_FAILED();
+
+ if ( show_progress )
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n", failure_mssg);
+ }
+ }
+
+
+ return !pass;
+
+} /* verify_cache_imageRW() */
+
+
+/*****************************************************************************
+ *
+ * Function: smoke_check_1()
+ *
+ * Purpose: Initial smoke check to verify correct behaviour of cache
+ * image in combination with parallel.
+ *
+ * As cache image is currently disabled in the parallel case,
+ * we construct a test file in parallel, verify it in serial
+ * and generate a cache image in passing, and then verify
+ * it again in parallel.
+ *
+ * In passing, also verify that page buffering is silently
+ * disabled in the parallel case. Needless to say, this part
+ * of the test will have to be re-worked when and if page
+ * buffering is supported in parallel.
+ *
+ * Return: Success: TRUE
+ *
+ * Failure: FALSE
+ *
+ * Programmer: JRM -- 3/6/17
+ *
+ *****************************************************************************/
+static hbool_t
+smoke_check_1(MPI_Comm mpi_comm, MPI_Info mpi_info, int mpi_rank, int mpi_size)
+{
+ const char * fcn_name = "smoke_check_1()";
+ char filename[512];
+ hbool_t show_progress = FALSE;
+ hid_t file_id = -1;
+ H5F_t *file_ptr = NULL;
+ H5C_t *cache_ptr = NULL;
+ int cp = 0;
+ int i;
+ int num_dsets = PAR_NUM_DSETS;
+ int test_file_index = 2;
+ h5_stat_size_t file_size;
+
+ pass = TRUE;
+
+ if ( mpi_rank == 0 ) {
+
+ TESTING("parallel cache image smoke check 1");
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* setup the file name */
+ if ( pass ) {
+
+ HDassert(FILENAMES[test_file_index]);
+
+ if ( h5_fixname(FILENAMES[test_file_index], H5P_DEFAULT,
+ filename, sizeof(filename))
+ == NULL ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_fixname() failed.\n";
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 1) Create a PHDF5 file without the cache image FAPL entry.
+ *
+ * Verify that a cache image is not requested
+ */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ TRUE,
+ /* mdci_sbem_expected */ FALSE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ TRUE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ mpi_comm,
+ /* info */ mpi_info,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ TRUE,
+ /* md_write_strat */ 1);
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 2) Create datasets in the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ par_create_dataset(i, file_id, mpi_rank, mpi_size);
+ i++;
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 3) Verify the datasets in the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ par_verify_dataset(i, file_id, mpi_rank);
+ i++;
+ }
+
+
+ /* 4) Close the file */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.\n";
+
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 5 Insert a cache image into the file */
+
+ if ( pass ) {
+
+ par_insert_cache_image(test_file_index, mpi_rank, mpi_size);
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 6) Open the file R/O */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ TRUE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ mpi_comm,
+ /* info */ mpi_info,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ TRUE,
+ /* md_write_strat */ 1);
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 7) Verify the datasets in the file backwards
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ */
+
+ i = num_dsets - 1;
+ while ( ( pass ) && ( i >= 0 ) ) {
+
+ par_verify_dataset(i, file_id, mpi_rank);
+ i--;
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Verify that only process 0 reads the cache image. */
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( ( ( mpi_rank == 0 ) && ( cache_ptr->images_read != 1 ) ) ||
+ ( ( mpi_rank > 0 ) && ( cache_ptr->images_read != 0 ) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected images_read.";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
+ * all processes.
+ */
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "Image not loaded?.";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 8) Close the file */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.";
+
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 9) Open the file */
+
+ if ( pass ) {
+
+ open_hdf5_file(/* create_file */ FALSE,
+ /* mdci_sbem_expected */ TRUE,
+ /* read_only */ FALSE,
+ /* set_mdci_fapl */ FALSE,
+ /* config_fsm */ FALSE,
+ /* enable_page_buffer */ FALSE,
+ /* hdf_file_name */ filename,
+ /* cache_image_flags */ H5C_CI__ALL_FLAGS,
+ /* file_id_ptr */ &file_id,
+ /* file_ptr_ptr */ &file_ptr,
+ /* cache_ptr_ptr */ &cache_ptr,
+ /* comm */ mpi_comm,
+ /* info */ mpi_info,
+ /* l_facc_type */ FACC_MPIO,
+ /* all_coll_metadata_ops */ FALSE,
+ /* coll_metadata_write */ TRUE,
+ /* md_write_strat */ 1);
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 10) Verify the datasets in the file
+ *
+ * Verify that only process 0 reads the cache image.
+ *
+ * Verify that all other processes receive the cache
+ * image block from process 0.
+ */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ par_verify_dataset(i, file_id, mpi_rank);
+ i++;
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Verify that only process 0 reads the cache image. */
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( ( ( mpi_rank == 0 ) && ( cache_ptr->images_read != 1 ) ) ||
+ ( ( mpi_rank > 0 ) && ( cache_ptr->images_read != 0 ) ) ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpected images_read.";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+ /* Verify that all other processes receive the cache image block
+ * from process 0.
+ *
+ * Since we have alread verified that only process 0 has read the
+ * image, it is sufficient to verify that the image was loaded on
+ * all processes.
+ */
+#if H5C_COLLECT_CACHE_STATS
+ if ( pass ) {
+
+ if ( cache_ptr->images_loaded != 1 ) {
+
+ pass = FALSE;
+ failure_mssg = "Image not loaded?.";
+ }
+ }
+#endif /* H5C_COLLECT_CACHE_STATS */
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 11) Delete the datasets in the file */
+
+ i = 0;
+ while ( ( pass ) && ( i < num_dsets ) ) {
+
+ par_delete_dataset(i, file_id, mpi_rank);
+ i++;
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 12) Close the file */
+
+ if ( pass ) {
+
+ if ( H5Fclose(file_id) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "H5Fclose() failed.";
+
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 13) Get the size of the file. Verify that it is less
+ * than 20 KB. Without deletions and persistant free
+ * space managers, size size is about 30 MB, so this
+ * is sufficient to verify that the persistant free
+ * space managers are more or less doing their job.
+ *
+ * Note that this test will have to change if we use
+ * a larger page size.
+ */
+ if ( pass ) {
+
+ if ( ( file_size = h5_get_file_size(filename, H5P_DEFAULT) ) < 0 ) {
+
+ pass = FALSE;
+ failure_mssg = "h5_get_file_size() failed.";
+
+ } else if ( file_size > 20 * 1024 ) {
+
+ pass = FALSE;
+ failure_mssg = "unexpectedly large file size.";
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* 14) Delete the file */
+
+ if ( pass ) {
+
+ /* wait for everyone to close the file */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if ( ( mpi_rank == 0 ) && ( HDremove(filename) < 0 ) ) {
+
+ pass = FALSE;
+ failure_mssg = "HDremove() failed.\n";
+ }
+ }
+
+ if ( ( mpi_rank == 0 ) && ( show_progress ) )
+ HDfprintf(stdout, "%s: cp = %d, pass = %d.\n", fcn_name, cp++, pass);
+
+
+ /* report results */
+ if ( mpi_rank == 0 ) {
+
+ if ( pass ) {
+
+ PASSED();
+
+ } else {
+
+ H5_FAILED();
+
+ HDfprintf(stdout, "%s: failure_mssg = \"%s\"\n",
+ fcn_name, failure_mssg);
+ }
+ }
+
+ return !pass;
+
+} /* smoke_check_1() */
+
+
+/*-------------------------------------------------------------------------
+ * Function: main
+ *
+ * Purpose: Run parallel tests on the cache image feature.
+ *
+ * At present, cache image is disabled in parallel, and
+ * thus these tests are restructed to verifying that a
+ * file with a cache image can be opened in the parallel
+ * case, and verifying that instructions to create a
+ * cache image are ignored in the parallel case.
+ *
+ * WARNING: This test uses fork() and execve(), and
+ * therefore will not run on Windows.
+ *
+ * Return: Success: 0
+ *
+ * Failure: 1
+ *
+ * Programmer: John Mainzer
+ * 1/25/17
+ *
+ * Modifications:
+ *
+ *-------------------------------------------------------------------------
+ */
+
+int
+main(int argc, char **argv)
+{
+ hbool_t setup = FALSE;
+ hbool_t ici = FALSE;
+ unsigned nerrs = 0;
+ MPI_Comm comm = MPI_COMM_WORLD;
+ MPI_Info info = MPI_INFO_NULL;
+ int file_idx;
+ int i;
+ int mpi_size;
+ int mpi_rank;
+
+ if ( ! parse_flags(argc, argv, &setup, &ici, &file_idx, &mpi_size, FALSE) )
+ exit(1); /* exit now if unable to parse flags */
+
+ if ( setup ) { /* construct test files and exit */
+
+ H5open();
+ HDfprintf(stdout, "Constructing test files: \n");
+ HDfflush(stdout);
+
+ i = 0;
+ while ( ( FILENAMES[i] != NULL ) && ( i < TEST_FILES_TO_CONSTRUCT ) ) {
+
+ HDfprintf(stdout, " writing %s ... ", FILENAMES[i]);
+ HDfflush(stdout);
+ construct_test_file(i);
+
+ if ( pass ) {
+
+ printf("done.\n");
+ HDfflush(stdout);
+
+ } else {
+
+ printf("failed.\n");
+ exit(1);
+ }
+ i++;
+ }
+
+ HDfprintf(stdout, "Test file construction complete.\n");
+ exit(0);
+
+ } else if ( ici ) {
+
+ if ( serial_insert_cache_image(file_idx, mpi_size) ) {
+
+ exit(0);
+
+ } else {
+
+ HDfprintf(stderr, "\n\nCache image insertion failed.\n");
+ HDfprintf(stderr, " failure mssg = \"%s\"\n", failure_mssg);
+ exit(1);
+ }
+ }
+
+ HDassert(!setup);
+ HDassert(!ici);
+
+ MPI_Init(&argc, &argv);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+
+ /* Attempt to turn off atexit post processing so that in case errors
+ * happen during the test and the process is aborted, it will not get
+ * hang in the atexit post processing in which it may try to make MPI
+ * calls. By then, MPI calls may not work.
+ */
+ if (H5dont_atexit() < 0){
+ printf("%d:Failed to turn off atexit processing. Continue.\n",
+ mpi_rank);
+ };
+
+ H5open();
+
+ if ( mpi_rank == 0 ) {
+ printf("===================================\n");
+ printf("Parallel metadata cache image tests\n");
+ printf(" mpi_size = %d\n", mpi_size);
+ printf("===================================\n");
+ }
+
+ if ( mpi_size < 2 ) {
+
+ if ( mpi_rank == 0 ) {
+
+ printf(" Need at least 2 processes. Exiting.\n");
+ }
+ goto finish;
+ }
+
+ if ( mpi_rank == 0 ) { /* create test files */
+
+ int child_status;
+ pid_t child_pid;
+
+ child_pid = fork();
+
+ if ( child_pid == 0 ) { /* this is the child process */
+
+ /* fun and games to shutup the compiler */
+ char param0[32] = "t_cache_image";
+ char param1[32] = "setup";
+ char * child_argv[] = {param0, param1, NULL};
+
+ /* we may need to play with the path here */
+ if ( execv("t_cache_image", child_argv) == -1 ) {
+
+ HDfprintf(stdout,
+ "execl() of setup process failed. errno = %d(%s)\n",
+ errno, strerror(errno));
+ exit(1);
+ }
+
+ } else if ( child_pid != -1 ) {
+ /* this is the parent process -- wait until child is done */
+ if ( -1 == waitpid(child_pid, &child_status, WUNTRACED)) {
+
+ HDfprintf(stdout, "can't wait on setup process.\n");
+
+ } else if ( ! WIFEXITED(child_status) ) {
+
+ HDfprintf(stdout, "setup process hasn't exitied.\n");
+
+ } else if ( WEXITSTATUS(child_status) != 0 ) {
+
+ HDfprintf(stdout, "setup process reports failure.\n");
+
+ } else {
+
+ HDfprintf(stdout,
+ "testfile construction complete -- proceeding with tests.\n");
+ }
+ } else { /* fork failed */
+
+ HDfprintf(stdout, "can't create process to construct test file.\n");
+ }
+ }
+
+ /* can't start test until test files exist */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+
+ nerrs += verify_cache_image_RO(0,
+ H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
+#if 1
+ nerrs += verify_cache_image_RO(1,
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
+ nerrs += verify_cache_image_RW(0,
+ H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, mpi_rank);
+ nerrs += verify_cache_image_RW(1,
+ H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, mpi_rank);
+ nerrs += smoke_check_1(comm, info, mpi_rank, mpi_size);
+#endif
+finish:
+
+ /* make sure all processes are finished before final report, cleanup
+ * and exit.
+ */
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if ( mpi_rank == 0 ) { /* only process 0 reports */
+ sleep(10);
+ printf("===================================\n");
+ if ( nerrs > 0 ) {
+ printf("***metadata cache image tests detected %d failures***\n",
+ nerrs);
+ }
+ else {
+ printf("metadata cache image tests finished with no failures\n");
+ }
+ printf("===================================\n");
+ }
+
+ /* takedown_derived_types(); */
+
+ /* close HDF5 library */
+ H5close();
+
+ /* MPI_Finalize must be called AFTER H5close which may use MPI calls */
+ MPI_Finalize();
+
+ /* cannot just return (nerrs) because exit code is limited to 1byte */
+ return(nerrs > 0);
+
+} /* main() */
+
diff --git a/testpar/t_chunk_alloc.c b/testpar/t_chunk_alloc.c
index 05fd2fc..2340ae0 100644
--- a/testpar/t_chunk_alloc.c
+++ b/testpar/t_chunk_alloc.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_coll_chunk.c b/testpar/t_coll_chunk.c
index ab9de09..c6fa3d4 100644
--- a/testpar/t_coll_chunk.c
+++ b/testpar/t_coll_chunk.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "testphdf5.h"
diff --git a/testpar/t_dset.c b/testpar/t_dset.c
index ceacb3a..b952bf3 100644
--- a/testpar/t_dset.c
+++ b/testpar/t_dset.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_file.c b/testpar/t_file.c
index b2f1d5e..1b6978f 100644
--- a/testpar/t_file.c
+++ b/testpar/t_file.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -19,6 +17,30 @@
#include "testphdf5.h"
+#include "H5PBprivate.h"
+#include "H5Iprivate.h"
+
+/*
+ * This file needs to access private information from the H5F package.
+ */
+#define H5C_FRIEND /*suppress error about including H5Cpkg */
+#include "H5Cpkg.h"
+#define H5AC_FRIEND /*suppress error about including H5ACpkg */
+#include "H5ACpkg.h"
+#define H5MF_FRIEND /*suppress error about including H5MFpkg */
+#include "H5MFpkg.h"
+#define H5F_FRIEND /*suppress error about including H5Fpkg */
+#define H5F_TESTING
+#include "H5Fpkg.h"
+
+#define NUM_DSETS 5
+
+int mpi_size, mpi_rank;
+
+static int create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy);
+static int open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
+ hsize_t page_size, size_t page_buffer_size);
+
/*
* test file access by communicator besides COMM_WORLD.
* Split COMM_WORLD into two, one (even_comm) contains the original
@@ -33,7 +55,6 @@
void
test_split_comm_access(void)
{
- int mpi_size, mpi_rank;
MPI_Comm comm;
MPI_Info info = MPI_INFO_NULL;
int is_old, mrc;
@@ -95,6 +116,595 @@ test_split_comm_access(void)
}
void
+test_page_buffer_access(void)
+{
+ hid_t file_id = -1; /* File ID */
+ hid_t fcpl, fapl, fapl_self;
+ hid_t dxpl_id = H5P_DATASET_XFER_DEFAULT;
+ size_t page_count = 0;
+ int i, num_elements = 200;
+ haddr_t raw_addr, meta_addr;
+ int *data;
+ H5F_t *f = NULL;
+ herr_t ret; /* generic return value */
+ const char *filename;
+
+ MPI_Comm_rank(MPI_COMM_WORLD, &mpi_rank);
+ MPI_Comm_size(MPI_COMM_WORLD, &mpi_size);
+
+ filename = (const char *)GetTestParameters();
+
+ if (VERBOSE_MED)
+ printf("Page Buffer Usage in Parallel %s\n", filename);
+
+ fapl = create_faccess_plist(MPI_COMM_WORLD, MPI_INFO_NULL, facc_type);
+ VRFY((fapl >= 0), "create_faccess_plist succeeded");
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ VRFY((fcpl >= 0), "");
+
+ ret = H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 1, (hsize_t)0);
+ VRFY((ret == 0), "");
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*100);
+ VRFY((ret == 0), "");
+ ret = H5Pset_page_buffer_size(fapl, sizeof(int)*100000, 0, 0);
+ VRFY((ret == 0), "");
+
+ /* This should fail because collective metadata writes are not supported with page buffering */
+ H5E_BEGIN_TRY {
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ } H5E_END_TRY;
+ VRFY((file_id < 0), "H5Fcreate failed");
+
+ /* disable collective metadata writes for page buffering to work */
+ ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ VRFY((ret >= 0), "");
+
+ ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED);
+ VRFY((ret == 0), "");
+ ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__DISTRIBUTED, sizeof(int)*100, sizeof(int)*100000);
+ VRFY((ret == 0), "");
+
+ ret = create_file(filename, fcpl, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY);
+ VRFY((ret == 0), "");
+ ret = open_file(filename, fapl, H5AC_METADATA_WRITE_STRATEGY__PROCESS_0_ONLY, sizeof(int)*100, sizeof(int)*100000);
+ VRFY((ret == 0), "");
+
+ ret = H5Pset_file_space_page_size(fcpl, sizeof(int)*100);
+ VRFY((ret == 0), "");
+
+ data = (int *) HDmalloc(sizeof(int)*(size_t)num_elements);
+
+ /* intialize all the elements to have a value of -1 */
+ for(i=0 ; i<num_elements ; i++)
+ data[i] = -1;
+
+ /* MSC - why this stopped working ? */
+#if 0
+ if(MAINPROCESS) {
+ hid_t fapl_self;
+
+ fapl_self = create_faccess_plist(MPI_COMM_SELF, MPI_INFO_NULL, facc_type);
+
+ ret = H5Pset_page_buffer_size(fapl_self, sizeof(int)*1000, 0, 0);
+ VRFY((ret == 0), "");
+ /* collective metadata writes do not work with page buffering */
+ ret = H5Pset_coll_metadata_write(fapl_self, FALSE);
+ VRFY((ret >= 0), "");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl_self);
+ VRFY((file_id >= 0), "");
+
+ /* Get a pointer to the internal file object */
+ f = (H5F_t *)H5I_object(file_id);
+
+ VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
+
+ /* allocate space for 200 raw elements */
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements);
+ VRFY((raw_addr != HADDR_UNDEF), "");
+
+ /* allocate space for 200 metadata elements */
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements);
+ VRFY((meta_addr != HADDR_UNDEF), "");
+
+ page_count = 0;
+
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data);
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the first 50 elements */
+ for(i=0 ; i<50 ; i++)
+ data[i] = i;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ page_count += 2;
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the second 50 elements */
+ for(i=0 ; i<50 ; i++)
+ data[i] = i+50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update 100 - 200 */
+ for(i=0 ; i<100 ; i++)
+ data[i] = i+100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ ret = H5PB_flush(f, dxpl_id, FALSE);
+ VRFY((ret == 0), "");
+
+ /* read elements 0 - 200 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* close the file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ ret = H5Pclose(fapl_self);
+ VRFY((ret>=0), "H5Pclose succeeded");
+ }
+#endif
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ if(mpi_size > 1) {
+ ret = H5Pset_page_buffer_size(fapl, sizeof(int)*1000, 0, 0);
+ VRFY((ret == 0), "");
+ /* collective metadata writes do not work with page buffering */
+ ret = H5Pset_coll_metadata_write(fapl, FALSE);
+ VRFY((ret >= 0), "");
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ VRFY((file_id >= 0), "");
+
+ /* Get a pointer to the internal file object */
+ f = (H5F_t *)H5I_object(file_id);
+
+ VRFY((f->shared->page_buf != NULL), "Page Buffer created with 1 process");
+
+ /* allocate space for 200 raw elements */
+ raw_addr = H5MF_alloc(f, H5FD_MEM_DRAW, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements);
+ VRFY((raw_addr != HADDR_UNDEF), "");
+ /* allocate space for 200 metadata elements */
+ meta_addr = H5MF_alloc(f, H5FD_MEM_SUPER, H5AC_ind_read_dxpl_id, sizeof(int)*(size_t)num_elements);
+ VRFY((meta_addr != HADDR_UNDEF), "");
+
+ page_count = 0;
+
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*(size_t)num_elements, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*(size_t)num_elements, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the first 50 elements */
+ for(i=0 ; i<50 ; i++)
+ data[i] = i;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update the second 50 elements */
+ for(i=0 ; i<50 ; i++)
+ data[i] = i+50;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*50), sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* update 100 - 200 */
+ for(i=0 ; i<100 ; i++)
+ data[i] = i+100;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr+(sizeof(int)*100), sizeof(int)*100, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ /* read elements 0 - 200 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*200, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*200, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 200; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ page_count += 1;
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 50; i++)
+ VRFY((data[i] == i), "Read different values than written");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ /* reset the first 50 elements to -1*/
+ for(i=0 ; i<50 ; i++)
+ data[i] = -1;
+ ret = H5F_block_write(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ ret = H5F_block_write(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+
+ /* read elements 0 - 50 */
+ ret = H5F_block_read(f, H5FD_MEM_DRAW, raw_addr, sizeof(int)*50, H5AC_rawdata_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 50; i++)
+ VRFY((data[i] == -1), "Read different values than written");
+ ret = H5F_block_read(f, H5FD_MEM_SUPER, meta_addr, sizeof(int)*50, H5AC_ind_read_dxpl_id, data);
+ VRFY((ret == 0), "");
+ VRFY((H5SL_count(f->shared->page_buf->slist_ptr) == page_count), "Wrong number of pages in PB");
+ for (i=0; i < 50; i++)
+ VRFY((data[i] == -1), "Read different values than written");
+
+ /* close the file */
+ ret = H5Fclose(file_id);
+ VRFY((ret >= 0), "H5Fclose succeeded");
+ }
+
+ ret = H5Pclose(fapl);
+ VRFY((ret>=0), "H5Pclose succeeded");
+ ret = H5Pclose(fcpl);
+ VRFY((ret>=0), "H5Pclose succeeded");
+
+ HDfree(data);
+ data = NULL;
+ MPI_Barrier(MPI_COMM_WORLD);
+}
+
+static int
+create_file(const char *filename, hid_t fcpl, hid_t fapl, int metadata_write_strategy)
+{
+ hid_t file_id, dset_id, grp_id;
+ hid_t sid, mem_dataspace;
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ DATATYPE *data_array = NULL;
+ hsize_t dims[RANK], i;
+ hsize_t num_elements;
+ int k;
+ char dset_name[10];
+ H5F_t *f = NULL;
+ H5C_t *cache_ptr = NULL;
+ H5AC_cache_config_t config;
+ herr_t ret;
+
+ file_id = H5Fcreate(filename, H5F_ACC_TRUNC, fcpl, fapl);
+ VRFY((file_id >= 0), "");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ f = (H5F_t *)H5I_object(file_id);
+ VRFY((f != NULL), "");
+
+ cache_ptr = f->shared->cache;
+ VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
+
+ cache_ptr->ignore_tags = TRUE;
+ H5C_stats__reset(cache_ptr);
+ config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+
+ ret = H5AC_get_cache_auto_resize_config(cache_ptr, &config);
+ VRFY((ret == 0), "");
+
+ config.metadata_write_strategy = metadata_write_strategy;
+
+ ret = H5AC_set_cache_auto_resize_config(cache_ptr, &config);
+ VRFY((ret == 0), "");
+
+ grp_id = H5Gcreate2(file_id, "GROUP", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((grp_id >= 0), "");
+
+ dims[0] = ROW_FACTOR*mpi_size;
+ dims[1] = COL_FACTOR*mpi_size;
+ sid = H5Screate_simple (RANK, dims, NULL);
+ VRFY((sid >= 0), "H5Screate_simple succeeded");
+
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0]/mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = mpi_rank*block[0];
+ start[1] = 0;
+
+ num_elements = block[0] * block[1];
+ /* allocate memory for data buffer */
+ data_array = (DATATYPE *)HDmalloc(num_elements*sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+ /* put some trivial data in the data_array */
+ for(i=0 ; i<num_elements; i++)
+ data_array[i] = mpi_rank + 1;
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (1, &num_elements, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ for(k=0 ; k<NUM_DSETS; k++) {
+ sprintf(dset_name, "D1dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ sprintf(dset_name, "D2dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ sprintf(dset_name, "D3dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ sprintf(dset_name, "dset%d", k);
+ dset_id = H5Dcreate2(grp_id, dset_name, H5T_NATIVE_INT, sid,
+ H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ ret = H5Dwrite(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret == 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ HDmemset(data_array, 0, num_elements*sizeof(DATATYPE));
+ dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret == 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret == 0), "");
+
+ for (i=0; i < num_elements; i++)
+ VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
+
+ sprintf(dset_name, "D1dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ sprintf(dset_name, "D2dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ sprintf(dset_name, "D3dset%d", k);
+ ret = H5Ldelete(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((ret == 0), "");
+ }
+
+ ret = H5Gclose(grp_id);
+ VRFY((ret == 0), "");
+ ret = H5Fclose(file_id);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(sid);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret == 0), "");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ HDfree(data_array);
+ return 0;
+} /* create_file */
+
+static int
+open_file(const char *filename, hid_t fapl, int metadata_write_strategy,
+ hsize_t page_size, size_t page_buffer_size)
+{
+ hid_t file_id, dset_id, grp_id, grp_id2;
+ hid_t sid, mem_dataspace;
+ DATATYPE *data_array = NULL;
+ hsize_t dims[RANK];
+ hsize_t start[RANK];
+ hsize_t count[RANK];
+ hsize_t stride[RANK];
+ hsize_t block[RANK];
+ int i, k, ndims;
+ hsize_t num_elements;
+ char dset_name[10];
+ H5F_t *f = NULL;
+ H5C_t *cache_ptr = NULL;
+ H5AC_cache_config_t config;
+ herr_t ret;
+
+ config.version = H5AC__CURR_CACHE_CONFIG_VERSION;
+ ret = H5Pget_mdc_config(fapl, &config);
+ VRFY((ret == 0), "");
+
+ config.metadata_write_strategy = metadata_write_strategy;
+
+ ret = H5Pget_mdc_config(fapl, &config);
+ VRFY((ret == 0), "");
+
+ file_id = H5Fopen(filename, H5F_ACC_RDWR, fapl);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((file_id >= 0), "");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ f = (H5F_t *)H5I_object(file_id);
+ VRFY((f != NULL), "");
+
+ cache_ptr = f->shared->cache;
+ VRFY((cache_ptr->magic == H5C__H5C_T_MAGIC), "");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ VRFY((f->shared->page_buf != NULL), "");
+ VRFY((f->shared->page_buf->page_size == page_size), "");
+ VRFY((f->shared->page_buf->max_size == page_buffer_size), "");
+
+ grp_id = H5Gopen2(file_id, "GROUP", H5P_DEFAULT);
+ VRFY((grp_id >= 0), "");
+
+ dims[0] = ROW_FACTOR*mpi_size;
+ dims[1] = COL_FACTOR*mpi_size;
+
+ /* Each process takes a slabs of rows. */
+ block[0] = dims[0]/mpi_size;
+ block[1] = dims[1];
+ stride[0] = block[0];
+ stride[1] = block[1];
+ count[0] = 1;
+ count[1] = 1;
+ start[0] = mpi_rank*block[0];
+ start[1] = 0;
+
+ num_elements = block[0] * block[1];
+ /* allocate memory for data buffer */
+ data_array = (DATATYPE *)HDmalloc(num_elements*sizeof(DATATYPE));
+ VRFY((data_array != NULL), "data_array HDmalloc succeeded");
+
+ /* create a memory dataspace independently */
+ mem_dataspace = H5Screate_simple (1, &num_elements, NULL);
+ VRFY((mem_dataspace >= 0), "");
+
+ for(k=0 ; k<NUM_DSETS; k++) {
+ sprintf(dset_name, "dset%d", k);
+ dset_id = H5Dopen2(grp_id, dset_name, H5P_DEFAULT);
+ VRFY((dset_id >= 0), "");
+
+ sid = H5Dget_space(dset_id);
+ VRFY((dset_id >= 0), "H5Dget_space succeeded");
+
+ ndims = H5Sget_simple_extent_dims(sid, dims, NULL);
+ VRFY((ndims == 2), "H5Sget_simple_extent_dims succeeded");
+ VRFY(dims[0] == ROW_FACTOR*mpi_size, "Wrong dataset dimensions");
+ VRFY(dims[1] == COL_FACTOR*mpi_size, "Wrong dataset dimensions");
+
+ ret = H5Sselect_hyperslab(sid, H5S_SELECT_SET, start, stride, count, block);
+ VRFY((ret >= 0), "H5Sset_hyperslab succeeded");
+
+ ret = H5Dread(dset_id, H5T_NATIVE_INT, mem_dataspace, sid, H5P_DEFAULT, data_array);
+ VRFY((ret >= 0), "");
+
+ ret = H5Dclose(dset_id);
+ VRFY((ret >= 0), "");
+ ret = H5Sclose(sid);
+ VRFY((ret == 0), "");
+
+ for (i=0; i < num_elements; i++)
+ VRFY((data_array[i] == mpi_rank+1), "Dataset Verify failed");
+ }
+
+ grp_id2 = H5Gcreate2(file_id, "GROUP/GROUP2", H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ VRFY((grp_id2 >= 0), "");
+ ret = H5Gclose(grp_id2);
+ VRFY((ret == 0), "");
+
+ ret = H5Fflush(file_id, H5F_SCOPE_GLOBAL);
+ VRFY((ret == 0), "");
+
+ MPI_Barrier(MPI_COMM_WORLD);
+ /* flush invalidate each ring, starting from the outermost ring and
+ * working inward.
+ */
+ for ( i = 0; i < H5C__HASH_TABLE_LEN; i++ ) {
+ H5C_cache_entry_t * entry_ptr = NULL;
+
+ entry_ptr = cache_ptr->index[i];
+
+ while ( entry_ptr != NULL ) {
+ HDassert(entry_ptr->magic == H5C__H5C_CACHE_ENTRY_T_MAGIC);
+ HDassert(entry_ptr->is_dirty == FALSE);
+
+ if(!entry_ptr->is_pinned && !entry_ptr->is_protected) {
+ ret = H5AC_expunge_entry(f, H5AC_ind_read_dxpl_id, entry_ptr->type, entry_ptr->addr, 0);
+ VRFY((ret == 0), "");
+ }
+
+ entry_ptr = entry_ptr->ht_next;
+ }
+ }
+ MPI_Barrier(MPI_COMM_WORLD);
+
+ grp_id2 = H5Gopen2(file_id, "GROUP/GROUP2", H5P_DEFAULT);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((grp_id2 >= 0), "");
+ ret = H5Gclose(grp_id2);
+ H5Eprint2(H5E_DEFAULT, stderr);
+ VRFY((ret == 0), "");
+
+ ret = H5Gclose(grp_id);
+ VRFY((ret == 0), "");
+ ret = H5Fclose(file_id);
+ VRFY((ret == 0), "");
+ ret = H5Sclose(mem_dataspace);
+ VRFY((ret == 0), "");
+ HDfree(data_array);
+
+ return nerrors;
+}
+
+void
test_file_properties(void)
{
hid_t fid; /* HDF5 file ID */
@@ -103,7 +713,6 @@ test_file_properties(void)
const char *filename;
MPI_Comm comm = MPI_COMM_WORLD;
MPI_Info info = MPI_INFO_NULL;
- int mpi_size, mpi_rank;
herr_t ret; /* Generic return value */
filename = (const char *)GetTestParameters();
diff --git a/testpar/t_file_image.c b/testpar/t_file_image.c
index a2246b6..62db11a 100644
--- a/testpar/t_file_image.c
+++ b/testpar/t_file_image.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_filter_read.c b/testpar/t_filter_read.c
index 44f3f11..4556b01 100644
--- a/testpar/t_filter_read.c
+++ b/testpar/t_filter_read.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_init_term.c b/testpar/t_init_term.c
index 824f773..933fbd2 100644
--- a/testpar/t_init_term.c
+++ b/testpar/t_init_term.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_mdset.c b/testpar/t_mdset.c
index 7077081..5d989bb 100644
--- a/testpar/t_mdset.c
+++ b/testpar/t_mdset.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "testphdf5.h"
@@ -510,7 +508,7 @@ void big_dataset(void)
/* Check that file of the correct size was created */
file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 2147485792ULL), "File is correct size(~2GB)");
+ VRFY((file_size == 2147485696ULL), "File is correct size(~2GB)");
/*
* Create >4GB HDF5 file
@@ -539,7 +537,7 @@ void big_dataset(void)
/* Check that file of the correct size was created */
file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 4294969440ULL), "File is correct size(~4GB)");
+ VRFY((file_size == 4294969344ULL), "File is correct size(~4GB)");
/*
* Create >8GB HDF5 file
@@ -568,7 +566,7 @@ void big_dataset(void)
/* Check that file of the correct size was created */
file_size = h5_get_file_size(filename, fapl);
- VRFY((file_size == 8589936736ULL), "File is correct size(~8GB)");
+ VRFY((file_size == 8589936640ULL), "File is correct size(~8GB)");
/* Close fapl */
ret = H5Pclose(fapl);
diff --git a/testpar/t_mpi.c b/testpar/t_mpi.c
index c234257..3d501c9 100644
--- a/testpar/t_mpi.c
+++ b/testpar/t_mpi.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_pflush1.c b/testpar/t_pflush1.c
index 1bcfeb8..0782f3d 100644
--- a/testpar/t_pflush1.c
+++ b/testpar/t_pflush1.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_pflush2.c b/testpar/t_pflush2.c
index f9f8459..2051f4e 100644
--- a/testpar/t_pflush2.c
+++ b/testpar/t_pflush2.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_ph5basic.c b/testpar/t_ph5basic.c
index 76eeaef..574591c 100644
--- a/testpar/t_ph5basic.c
+++ b/testpar/t_ph5basic.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_prestart.c b/testpar/t_prestart.c
index fab4a7c..719d150 100644
--- a/testpar/t_prestart.c
+++ b/testpar/t_prestart.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_prop.c b/testpar/t_prop.c
index 2cc0f5e..d5efa94 100644
--- a/testpar/t_prop.c
+++ b/testpar/t_prop.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_pshutdown.c b/testpar/t_pshutdown.c
index be9734f..def7071 100644
--- a/testpar/t_pshutdown.c
+++ b/testpar/t_pshutdown.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_shapesame.c b/testpar/t_shapesame.c
index 3675824..d81d2be 100644
--- a/testpar/t_shapesame.c
+++ b/testpar/t_shapesame.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/t_span_tree.c b/testpar/t_span_tree.c
index f1487eb..a42df95 100644
--- a/testpar/t_span_tree.c
+++ b/testpar/t_span_tree.c
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/testpar/testpar.h b/testpar/testpar.h
index 2c99103..84c073f 100644
--- a/testpar/testpar.h
+++ b/testpar/testpar.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* common definitions used by all parallel test programs. */
diff --git a/testpar/testphdf5.c b/testpar/testphdf5.c
index c54cb5e..87d9056 100644
--- a/testpar/testphdf5.c
+++ b/testpar/testphdf5.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -351,6 +349,11 @@ int main(int argc, char **argv)
AddTest("split", test_split_comm_access, NULL,
"dataset using split communicators", PARATESTFILE);
+#ifdef PB_OUT /* temporary: disable page buffering when parallel */
+ AddTest("page_buffer", test_page_buffer_access, NULL,
+ "page buffer usage in parallel", PARATESTFILE);
+#endif
+
AddTest("props", test_file_properties, NULL,
"Coll Metadata file property settings", PARATESTFILE);
diff --git a/testpar/testphdf5.h b/testpar/testphdf5.h
index 9838673..322cb9b 100644
--- a/testpar/testphdf5.h
+++ b/testpar/testphdf5.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* common definitions used by all parallel hdf5 test programs. */
@@ -246,6 +244,7 @@ void collective_group_write(void);
void independent_group_read(void);
void test_fapl_mpio_dup(void);
void test_split_comm_access(void);
+void test_page_buffer_access(void);
void dataset_atomicity(void);
void dataset_writeInd(void);
void dataset_writeAll(void);
diff --git a/tools/CMakeLists.txt b/tools/CMakeLists.txt
index 4b9b765..4b3b3d5 100644
--- a/tools/CMakeLists.txt
+++ b/tools/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS)
#-----------------------------------------------------------------------------
@@ -11,7 +11,7 @@ add_definitions (${HDF_EXTRA_C_FLAGS})
# --------------------------------------------------------------------
if (NOT BUILD_TESTING)
add_subdirectory (${HDF5_TOOLS_SOURCE_DIR}/lib)
-endif (NOT BUILD_TESTING)
+endif ()
#-----------------------------------------------------------------------------
# Setup include Directories
@@ -24,4 +24,4 @@ add_subdirectory (${HDF5_TOOLS_SOURCE_DIR}/src)
#-- Add the tests
if (BUILD_TESTING)
add_subdirectory (${HDF5_TOOLS_SOURCE_DIR}/test)
-endif (BUILD_TESTING)
+endif ()
diff --git a/tools/COPYING b/tools/COPYING
index 6903daf..6497ace 100644
--- a/tools/COPYING
+++ b/tools/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/tools/Makefile.am b/tools/Makefile.am
index cd97069..b0c33ed 100644
--- a/tools/Makefile.am
+++ b/tools/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/lib/CMakeLists.txt b/tools/lib/CMakeLists.txt
index bfd1af2..ade7671 100644
--- a/tools/lib/CMakeLists.txt
+++ b/tools/lib/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_LIB)
#-----------------------------------------------------------------------------
@@ -63,7 +63,7 @@ if (BUILD_SHARED_LIBS)
)
#set_property (TARGET ${HDF5_TOOLS_LIBSH_TARGET} APPEND PROPERTY COMPILE_DEFINITIONS H5DIFF_DEBUG)
set (install_targets ${install_targets} ${HDF5_TOOLS_LIBSH_TARGET})
-endif (BUILD_SHARED_LIBS)
+endif ()
##############################################################################
##############################################################################
@@ -89,7 +89,8 @@ install (
if (HDF5_EXPORTED_TARGETS)
if (BUILD_SHARED_LIBS)
INSTALL_TARGET_PDB (${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_INSTALL_BIN_DIR} toolslibraries)
- endif (BUILD_SHARED_LIBS)
+ endif ()
+ INSTALL_TARGET_PDB (${HDF5_TOOLS_LIB_TARGET} ${HDF5_INSTALL_BIN_DIR} toolslibraries)
install (
TARGETS
@@ -102,4 +103,4 @@ if (HDF5_EXPORTED_TARGETS)
FRAMEWORK DESTINATION ${HDF5_INSTALL_FWRK_DIR} COMPONENT toolslibraries
INCLUDES DESTINATION include
)
-endif (HDF5_EXPORTED_TARGETS)
+endif ()
diff --git a/tools/lib/Makefile.am b/tools/lib/Makefile.am
index e8fc6a1..f07dff7 100644
--- a/tools/lib/Makefile.am
+++ b/tools/lib/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/lib/h5diff.c b/tools/lib/h5diff.c
index 79dc51f..afb36d9 100644
--- a/tools/lib/h5diff.c
+++ b/tools/lib/h5diff.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
@@ -47,7 +45,7 @@ void do_print_objname (const char *OBJ, const char *path1, const char *path2, di
{
/* if verbose level is higher than 0, put space line before
* displaying any object or symbolic links. This improves
- * readability of the output.
+ * readability of the output.
*/
if (opts->m_verbose_level >= 1)
parallel_print("\n");
@@ -71,7 +69,7 @@ do_print_attrname (const char *attr, const char *path1, const char *path2)
* Function: print_warn
*
* Purpose: check print warning condition.
- * Return:
+ * Return:
* 1 if verbose mode
* 0 if not verbos mode
* Programmer: Jonathan Kim
@@ -146,7 +144,7 @@ static void print_incoming_data(void)
*
* Purpose: check if options are valid
*
- * Return:
+ * Return:
* 1 : Valid
* 0 : Not valid
*
@@ -189,10 +187,10 @@ out:
*
* Purpose: check if 'paths' are part of exclude path list
*
- * Return:
+ * Return:
* 1 - excluded path
* 0 - not excluded path
- *
+ *
* Programmer: Jonathan Kim
* Date: Aug 23, 2010
*------------------------------------------------------------------------*/
@@ -221,11 +219,11 @@ static int is_exclude_path (char * path, h5trav_type_t type, diff_opt_t *options
{
size_t len_grp;
- /* check if given path belong to an excluding group, if so
+ /* check if given path belong to an excluding group, if so
* exclude it as well.
* This verifies if “/grp1/dset1” is only under “/grp1”, but
- * not under “/grp1xxx/” group.
- */
+ * not under “/grp1xxx/” group.
+ */
len_grp = HDstrlen(exclude_path_ptr->obj_path);
if (path[len_grp] == '/')
{
@@ -236,15 +234,15 @@ static int is_exclude_path (char * path, h5trav_type_t type, diff_opt_t *options
}
}
/* exclude target is not group, just exclude the object */
- else
+ else
{
ret_cmp = HDstrcmp(exclude_path_ptr->obj_path, path);
if (ret_cmp == 0) /* found matching object */
{
/* excluded non-group object */
ret = 1;
- /* remember the type of this maching object.
- * if it's group, it can be used for excluding its member
+ /* remember the type of this maching object.
+ * if it's group, it can be used for excluding its member
* objects in this while() loop */
exclude_path_ptr->obj_type = type;
break; /* while */
@@ -339,7 +337,7 @@ static void build_match_list (const char *objname1, trav_info_t *info1, const ch
path2_lp = (info2->paths[curr2].path) + path2_offset;
type1_l = info1->paths[curr1].type;
type2_l = info2->paths[curr2].type;
-
+
/* criteria is string compare */
cmp = HDstrcmp(path1_lp, path2_lp);
@@ -410,7 +408,7 @@ static void build_match_list (const char *objname1, trav_info_t *info1, const ch
if (!is_exclude_path(path2_lp, type2_l, options))
{
trav_table_addflags(infile, path2_lp, info2->paths[curr2].type, table);
- }
+ }
curr2++;
} /* end while */
@@ -424,8 +422,8 @@ static void build_match_list (const char *objname1, trav_info_t *info1, const ch
/*-------------------------------------------------------------------------
* Function: trav_grp_objs
*
- * Purpose:
- * Call back function from h5trav_visit().
+ * Purpose:
+ * Call back function from h5trav_visit().
*
* Programmer: Jonathan Kim
*
@@ -437,22 +435,22 @@ static herr_t trav_grp_objs(const char *path, const H5O_info_t *oinfo,
trav_info_visit_obj(path, oinfo, already_visited, udata);
return 0;
-}
+}
/*-------------------------------------------------------------------------
* Function: trav_grp_symlinks
*
- * Purpose:
- * Call back function from h5trav_visit().
+ * Purpose:
+ * Call back function from h5trav_visit().
* Track and extra checkings while visiting all symbolic-links.
*
* Programmer: Jonathan Kim
*
* Date: Aug 16, 2010
*------------------------------------------------------------------------*/
-static herr_t trav_grp_symlinks(const char *path, const H5L_info_t *linfo,
+static herr_t trav_grp_symlinks(const char *path, const H5L_info_t *linfo,
void *udata)
-{
+{
trav_info_t *tinfo = (trav_info_t *)udata;
diff_opt_t *opts = (diff_opt_t *)tinfo->opts;
int ret;
@@ -482,18 +480,18 @@ static herr_t trav_grp_symlinks(const char *path, const H5L_info_t *linfo,
tinfo->symlink_visited.dangle_link = TRUE;
trav_info_visit_lnk(path, linfo, tinfo);
if (opts->no_dangle_links)
- opts->err_stat = 1; /* make dgangling link is error */
+ opts->err_stat = 1; /* make dangling link is error */
goto done;
}
- /* check if already visit the target object */
- if(symlink_is_visited( &(tinfo->symlink_visited), linfo->type, NULL, lnk_info.trg_path))
+ /* check if already visit the target object */
+ if(symlink_is_visited( &(tinfo->symlink_visited), linfo->type, NULL, lnk_info.trg_path))
goto done;
/* add this link as visited link */
- if(symlink_visit_add( &(tinfo->symlink_visited), linfo->type, NULL, lnk_info.trg_path) < 0)
+ if(symlink_visit_add( &(tinfo->symlink_visited), linfo->type, NULL, lnk_info.trg_path) < 0)
goto done;
-
+
if(h5trav_visit(tinfo->fid, path, TRUE, TRUE,
trav_grp_objs,trav_grp_symlinks, tinfo) < 0)
{
@@ -502,8 +500,8 @@ static herr_t trav_grp_symlinks(const char *path, const H5L_info_t *linfo,
goto done;
}
break;
-
- case H5L_TYPE_EXTERNAL:
+
+ case H5L_TYPE_EXTERNAL:
ret = H5tools_get_symlink_info(tinfo->fid, path, &lnk_info, opts->follow_links);
/* error */
if (ret < 0)
@@ -514,21 +512,21 @@ static herr_t trav_grp_symlinks(const char *path, const H5L_info_t *linfo,
tinfo->symlink_visited.dangle_link = TRUE;
trav_info_visit_lnk(path, linfo, tinfo);
if (opts->no_dangle_links)
- opts->err_stat = 1; /* make dgangling link is error */
+ opts->err_stat = 1; /* make dangling link is error */
goto done;
}
- if(H5Lunpack_elink_val(lnk_info.trg_path, linfo->u.val_size, NULL, &ext_fname, &ext_path) < 0)
+ if(H5Lunpack_elink_val(lnk_info.trg_path, linfo->u.val_size, NULL, &ext_fname, &ext_path) < 0)
goto done;
- /* check if already visit the target object */
- if(symlink_is_visited( &(tinfo->symlink_visited), linfo->type, ext_fname, ext_path))
+ /* check if already visit the target object */
+ if(symlink_is_visited( &(tinfo->symlink_visited), linfo->type, ext_fname, ext_path))
goto done;
/* add this link as visited link */
- if(symlink_visit_add( &(tinfo->symlink_visited), linfo->type, ext_fname, ext_path) < 0)
+ if(symlink_visit_add( &(tinfo->symlink_visited), linfo->type, ext_fname, ext_path) < 0)
goto done;
-
+
if(h5trav_visit(tinfo->fid, path, TRUE, TRUE,
trav_grp_objs,trav_grp_symlinks, tinfo) < 0)
{
@@ -548,11 +546,11 @@ static herr_t trav_grp_symlinks(const char *path, const H5L_info_t *linfo,
break;
} /* end of switch */
-done:
+done:
if (lnk_info.trg_path)
HDfree(lnk_info.trg_path);
return 0;
-}
+}
/*-------------------------------------------------------------------------
@@ -630,7 +628,7 @@ hsize_t h5diff(const char *fname1,
H5E_BEGIN_TRY
{
/* open file 1 */
- if((file1_id = h5tools_fopen(fname1, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, (size_t)0)) < 0)
+ if((file1_id = h5tools_fopen(fname1, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, (size_t)0)) < 0)
{
parallel_print("h5diff: <%s>: unable to open file\n", fname1);
options->err_stat = 1;
@@ -639,7 +637,7 @@ hsize_t h5diff(const char *fname1,
/* open file 2 */
- if((file2_id = h5tools_fopen(fname2, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, (size_t)0)) < 0)
+ if((file2_id = h5tools_fopen(fname2, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, (size_t)0)) < 0)
{
parallel_print("h5diff: <%s>: unable to open file\n", fname2);
options->err_stat = 1;
@@ -704,14 +702,14 @@ hsize_t h5diff(const char *fname1,
else
{
/* check if link itself exist */
- if(H5Lexists(file1_id, obj1fullname, H5P_DEFAULT) <= 0)
+ if(H5Lexists(file1_id, obj1fullname, H5P_DEFAULT) <= 0)
{
parallel_print ("Object <%s> could not be found in <%s>\n", obj1fullname, fname1);
options->err_stat = 1;
goto out;
}
/* get info from link */
- if(H5Lget_info(file1_id, obj1fullname, &src_linfo1, H5P_DEFAULT) < 0)
+ if(H5Lget_info(file1_id, obj1fullname, &src_linfo1, H5P_DEFAULT) < 0)
{
parallel_print("Unable to get link info from <%s>\n", obj1fullname);
goto out;
@@ -719,7 +717,7 @@ hsize_t h5diff(const char *fname1,
info1_lp = info1_obj;
- /*
+ /*
* check the type of specified path for hard and symbolic links
*/
if(src_linfo1.type == H5L_TYPE_HARD)
@@ -764,14 +762,14 @@ hsize_t h5diff(const char *fname1,
else
{
/* check if link itself exist */
- if(H5Lexists(file2_id, obj2fullname, H5P_DEFAULT) <= 0)
+ if(H5Lexists(file2_id, obj2fullname, H5P_DEFAULT) <= 0)
{
parallel_print ("Object <%s> could not be found in <%s>\n", obj2fullname, fname2);
options->err_stat = 1;
goto out;
}
/* get info from link */
- if(H5Lget_info(file2_id, obj2fullname, &src_linfo2, H5P_DEFAULT) < 0)
+ if(H5Lget_info(file2_id, obj2fullname, &src_linfo2, H5P_DEFAULT) < 0)
{
parallel_print("Unable to get link info from <%s>\n", obj2fullname);
goto out;
@@ -779,7 +777,7 @@ hsize_t h5diff(const char *fname1,
info2_lp = info2_obj;
- /*
+ /*
* check the type of specified path for hard and symbolic links
*/
if(src_linfo2.type == H5L_TYPE_HARD)
@@ -811,7 +809,7 @@ hsize_t h5diff(const char *fname1,
obj2type = H5TRAV_TYPE_UDLINK;
trav_info_add(info2_obj, obj2fullname, obj2type);
}
- }
+ }
}
/* if no object specified */
else
@@ -830,7 +828,7 @@ hsize_t h5diff(const char *fname1,
l_ret2 = H5tools_get_symlink_info(file2_id, obj2fullname, &trg_linfo2, options->follow_links);
/*---------------------------------------------
- * check for following symlinks
+ * check for following symlinks
*/
if (options->follow_links)
{
@@ -915,7 +913,7 @@ hsize_t h5diff(const char *fname1,
}
}
}
- else if(l_ret2 < 0) /* fail */
+ else if(l_ret2 < 0) /* fail */
{
parallel_print ("Object <%s> could not be found in <%s>\n", obj2fullname, fname2);
options->err_stat = 1;
@@ -936,11 +934,11 @@ hsize_t h5diff(const char *fname1,
}
} /* end of if follow symlinks */
- /*
+ /*
* If verbose options is not used, don't need to traverse through the list
* of objects in the group to display objects information,
- * So use h5tools_is_obj_same() to improve performance by skipping
- * comparing details of same objects.
+ * So use h5tools_is_obj_same() to improve performance by skipping
+ * comparing details of same objects.
*/
if(!(options->m_verbose || options->m_report))
@@ -1087,10 +1085,10 @@ out:
/*-------------------------------------------------------------------------
* Function: diff_match
*
- * Purpose:
- * Compare common objects in given groups according to table structure.
- * The table structure has flags which can be used to find common objects
- * and will be compared.
+ * Purpose:
+ * Compare common objects in given groups according to table structure.
+ * The table structure has flags which can be used to find common objects
+ * and will be compared.
* Common object means same name (absolute path) objects in both location.
*
* Return: Number of differences found
@@ -1126,7 +1124,7 @@ hsize_t diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1,
h5difftrace("diff_match start\n");
- /*
+ /*
* if not root, prepare object name to be pre-appended to group path to
* make full path
*/
@@ -1141,8 +1139,8 @@ hsize_t diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1,
* 2) the graph must match, i.e same names (absolute path)
* 3) objects with the same name must be of the same type
*-------------------------------------------------------------------------
- */
-
+ */
+
/* not valid compare used when --exclude-path option is used */
if (!options->exclude_path)
{
@@ -1152,7 +1150,7 @@ hsize_t diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1,
options->contents = 0;
}
}
-
+
/* objects in one file and not the other */
for( i = 0; i < table->nobjs; i++)
{
@@ -1209,7 +1207,7 @@ hsize_t diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1,
#endif /* H5_HAVE_ASPRINTF */
/* get index to figure out type of the object in file1 */
- while(info1->paths[idx1].path &&
+ while(info1->paths[idx1].path &&
(HDstrcmp(obj1_fullpath, info1->paths[idx1].path) != 0))
idx1++;
/* get index to figure out type of the object in file2 */
@@ -1226,7 +1224,7 @@ hsize_t diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1,
if(!g_Parallel)
{
nfound += diff(file1_id, obj1_fullpath,
- file2_id, obj2_fullpath,
+ file2_id, obj2_fullpath,
options, &argdata);
} /* end if */
#ifdef H5_HAVE_PARALLEL
@@ -1244,7 +1242,7 @@ hsize_t diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1,
*/
/*Set up args to pass to worker task. */
- if(HDstrlen(obj1_fullpath) > 255 ||
+ if(HDstrlen(obj1_fullpath) > 255 ||
HDstrlen(obj2_fullpath) > 255)
{
printf("The parallel diff only supports object names up to 255 characters\n");
@@ -1401,7 +1399,7 @@ hsize_t diff_match(hid_t file1_id, const char *grp1, trav_info_t *info1,
#endif /* H5_HAVE_PARALLEL */
if(obj1_fullpath)
HDfree(obj1_fullpath);
- if(obj2_fullpath)
+ if(obj2_fullpath)
HDfree(obj2_fullpath);
} /* end if */
} /* end for */
@@ -1568,14 +1566,14 @@ hsize_t diff(hid_t file1_id,
if(print_warn(options))
linkinfo1.opt.msg_mode = linkinfo2.opt.msg_mode = 1;
- /* for symbolic links, take care follow symlink and no dangling link
+ /* for symbolic links, take care follow symlink and no dangling link
* options */
- if (argdata->type[0] == H5TRAV_TYPE_LINK ||
+ if (argdata->type[0] == H5TRAV_TYPE_LINK ||
argdata->type[0] == H5TRAV_TYPE_UDLINK ||
- argdata->type[1] == H5TRAV_TYPE_LINK ||
+ argdata->type[1] == H5TRAV_TYPE_LINK ||
argdata->type[1] == H5TRAV_TYPE_UDLINK )
{
- /*
+ /*
* check dangling links for path1 and path2
*/
@@ -1586,7 +1584,7 @@ hsize_t diff(hid_t file1_id,
{
if (options->no_dangle_links)
{
- /* gangling link is error */
+ /* dangling link is error */
if(options->m_verbose)
parallel_print("Warning: <%s> is a dangling link.\n", path1);
goto out;
@@ -1604,7 +1602,7 @@ hsize_t diff(hid_t file1_id,
{
if (options->no_dangle_links)
{
- /* gangling link is error */
+ /* dangling link is error */
if(options->m_verbose)
parallel_print("Warning: <%s> is a dangling link.\n", path2);
goto out;
@@ -1614,7 +1612,7 @@ hsize_t diff(hid_t file1_id,
}
else if (ret < 0)
goto out;
-
+
/* found dangling link */
if (is_dangle_link1 || is_dangle_link2)
goto out2;
@@ -1637,7 +1635,7 @@ hsize_t diff(hid_t file1_id,
if (options->m_verbose||options->m_list_not_cmp)
{
parallel_print("Not comparable: <%s> is of type %s and <%s> is of type %s\n",
- path1, get_type(argdata->type[0]),
+ path1, get_type(argdata->type[0]),
path2, get_type(argdata->type[1]));
}
options->not_cmp=1;
@@ -1648,11 +1646,11 @@ hsize_t diff(hid_t file1_id,
}
else /* now both object types are same */
object_type = argdata->type[0];
-
- /*
+
+ /*
* If both points to the same target object, skip comparing details inside
* of the objects to improve performance.
- * Always check for the hard links, otherwise if follow symlink option is
+ * Always check for the hard links, otherwise if follow symlink option is
* specified.
*
* Perform this to match the outputs as bypassing.
@@ -1672,7 +1670,7 @@ hsize_t diff(hid_t file1_id,
{
case H5TRAV_TYPE_DATASET:
do_print_objname("dataset", path1, path2, options);
- break;
+ break;
case H5TRAV_TYPE_NAMED_DATATYPE:
do_print_objname("datatype", path1, path2, options);
break;
@@ -1687,7 +1685,7 @@ hsize_t diff(hid_t file1_id,
do_print_objname("external link", path1, path2, options);
else
do_print_objname ("user defined link", path1, path2, options);
- break;
+ break;
case H5TRAV_TYPE_UNKNOWN:
default:
parallel_print("Comparison not supported: <%s> and <%s> are of type %s\n",
@@ -1735,14 +1733,14 @@ hsize_t diff(hid_t file1_id,
if (nfound)
{
do_print_objname("dataset", path1, path2, options);
- print_found(nfound);
+ print_found(nfound);
}
}
/*---------------------------------------------------------
* compare attributes
- * if condition refers to cases when the dataset is a
+ * if condition refers to cases when the dataset is a
* referenced object
*---------------------------------------------------------
*/
@@ -1781,7 +1779,7 @@ hsize_t diff(hid_t file1_id,
/*-----------------------------------------------------------------
* compare attributes
- * the if condition refers to cases when the dataset is a
+ * the if condition refers to cases when the dataset is a
* referenced object
*-----------------------------------------------------------------
*/
@@ -1813,7 +1811,7 @@ hsize_t diff(hid_t file1_id,
/*-----------------------------------------------------------------
* compare attributes
- * the if condition refers to cases when the dataset is a
+ * the if condition refers to cases when the dataset is a
* referenced object
*-----------------------------------------------------------------
*/
@@ -1855,10 +1853,10 @@ hsize_t diff(hid_t file1_id,
case H5TRAV_TYPE_UDLINK:
{
/* Only external links will have a query function registered */
- if(linkinfo1.linfo.type == H5L_TYPE_EXTERNAL && linkinfo2.linfo.type == H5L_TYPE_EXTERNAL)
+ if(linkinfo1.linfo.type == H5L_TYPE_EXTERNAL && linkinfo2.linfo.type == H5L_TYPE_EXTERNAL)
{
/* If the buffers are the same size, compare them */
- if(linkinfo1.linfo.u.val_size == linkinfo2.linfo.u.val_size)
+ if(linkinfo1.linfo.u.val_size == linkinfo2.linfo.u.val_size)
{
ret = HDmemcmp(linkinfo1.trg_path, linkinfo2.trg_path, linkinfo1.linfo.u.val_size);
}
@@ -1866,7 +1864,7 @@ hsize_t diff(hid_t file1_id,
ret = 1;
/* if "linkinfo1.trg_path" != "linkinfo2.trg_path" then the links
- * are "different" extlinkinfo#.path is combination string of
+ * are "different" extlinkinfo#.path is combination string of
* file_name and obj_name
*/
nfound = (ret != 0) ? 1 : 0;
@@ -1875,7 +1873,7 @@ hsize_t diff(hid_t file1_id,
do_print_objname("external link", path1, path2, options);
} /* end if */
- else
+ else
{
/* If one or both of these links isn't an external link, we can only
* compare information from H5Lget_info since we don't have a query
@@ -1884,7 +1882,7 @@ hsize_t diff(hid_t file1_id,
* If the link classes or the buffer length are not the
* same, the links are "different"
*/
- if((linkinfo1.linfo.type != linkinfo2.linfo.type) ||
+ if((linkinfo1.linfo.type != linkinfo2.linfo.type) ||
(linkinfo1.linfo.u.val_size != linkinfo2.linfo.u.val_size))
nfound = 1;
else
@@ -1922,7 +1920,7 @@ out:
out2:
/*-----------------------------------
- * handle dangling link(s)
+ * handle dangling link(s)
*/
/* both path1 and path2 are dangling links */
if(is_dangle_link1 && is_dangle_link2)
diff --git a/tools/lib/h5diff.h b/tools/lib/h5diff.h
index 04b640f..0226e83 100644
--- a/tools/lib/h5diff.h
+++ b/tools/lib/h5diff.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5DIFF_H__
diff --git a/tools/lib/h5diff_array.c b/tools/lib/h5diff_array.c
index 9aadffe..499d5eb 100644
--- a/tools/lib/h5diff_array.c
+++ b/tools/lib/h5diff_array.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h"
@@ -130,7 +128,7 @@ typedef struct mcomp_t
{
unsigned n; /* number of members */
hid_t *ids; /* member type id */
- size_t *offsets;
+ size_t *offsets;
struct mcomp_t **m; /* members */
}mcomp_t;
@@ -546,7 +544,7 @@ hsize_t diff_array( void *_mem1,
* Recursively call this function for each element
* H5T_STRING
* compare byte by byte in a cycle from 0 to type_size. this type_size is the
- * value obtained by the get_size function but it is the string lenght for
+ * value obtained by the get_size function but it is the string length for
* variable sized strings
* H5T_OPAQUE
* compare byte by byte in a cycle from 0 to type_size
@@ -602,7 +600,7 @@ static hsize_t diff_datum(void *_mem1,
/* Fast comparison first for atomic type by memcmp().
* It is OK not to list non-atomic type here because it will not be caught
- * by the confition, but it gives more clarity for code planning
+ * by the condition, but it gives more clarity for code planning
*/
if (type_class != H5T_REFERENCE &&
type_class != H5T_COMPOUND &&
@@ -625,6 +623,7 @@ static hsize_t diff_datum(void *_mem1,
*-------------------------------------------------------------------------
*/
case H5T_COMPOUND:
+ h5difftrace("diff_datum H5T_COMPOUND\n");
nmembs = members->n;
@@ -657,18 +656,21 @@ static hsize_t diff_datum(void *_mem1,
*-------------------------------------------------------------------------
*/
case H5T_STRING:
-
+ h5difftrace("diff_datum H5T_STRING\n");
{
- H5T_str_t pad;
- char *s;
- char *s1;
- char *s2;
- size_t size1;
- size_t size2;
+ char *s = NULL;
+ char *sx = NULL;
+ char *s1 = NULL;
+ char *s2 = NULL;
+ size_t size1;
+ size_t size2;
+ size_t sizex;
+ size_t size_mtype = H5Tget_size(m_type);
+ H5T_str_t pad = H5Tget_strpad(m_type);
/* if variable length string */
- if(H5Tis_variable_str(m_type))
- {
+ if(H5Tis_variable_str(m_type)) {
+ h5difftrace("diff_datum H5T_STRING variable\n");
/* Get pointer to first string */
s1 = *(char**) mem1;
size1 = HDstrlen(s1);
@@ -676,14 +678,26 @@ static hsize_t diff_datum(void *_mem1,
s2 = *(char**) mem2;
size2 = HDstrlen(s2);
}
- else
- {
+ else if (H5T_STR_NULLTERM == pad) {
+ h5difftrace("diff_datum H5T_STRING null term\n");
+ /* Get pointer to first string */
+ s1 = (char*) mem1;
+ size1 = HDstrlen(s1);
+ if (size1 > size_mtype)
+ size1 = size_mtype;
+ /* Get pointer to second string */
+ s2 = (char*) mem2;
+ size2 = HDstrlen(s2);
+ if (size2 > size_mtype)
+ size2 = size_mtype;
+ }
+ else {
/* Get pointer to first string */
s1 = (char *)mem1;
- size1 = H5Tget_size(m_type);
+ size1 = size_mtype;
/* Get pointer to second string */
s2 = (char *)mem2;
- size2 = H5Tget_size(m_type);
+ size2 = size_mtype;
}
/*
@@ -692,40 +706,61 @@ static hsize_t diff_datum(void *_mem1,
* of length of strings.
* For now mimic the previous way.
*/
- if(size1 < size2)
- {
+ h5diffdebug2("diff_datum string size:%d\n",size1);
+ h5diffdebug2("diff_datum string size:%d\n",size2);
+ if(size1 != size2) {
+ h5difftrace("diff_datum string sizes\n");
+ nfound++;
+ }
+ if(size1 < size2) {
size = size1;
s = s1;
+ sizex = size2;
+ sx = s2;
}
- else
- {
+ else {
size = size2;
s = s2;
+ sizex = size1;
+ sx = s1;
}
/* check for NULL pointer for string */
- if(s!=NULL)
- {
+ if(s!=NULL) {
/* try fast compare first */
- if (HDmemcmp(s1, s2, size)==0)
- break;
-
- pad = H5Tget_strpad(m_type);
-
- for (u=0; u<size && (s[u] || pad!=H5T_STR_NULLTERM); u++)
- nfound+=character_compare(
- s1 + u,
- s2 + u, /* offset */
- i, /* index position */
- u, /* string character position */
- rank,
- dims,
- acc,
- pos,
- options,
- obj1,
- obj2,
- ph);
+ if(HDmemcmp(s1, s2, size)==0) {
+ if(size1 != size2)
+ if(print_data(options))
+ for (u=size; u<sizex; u++)
+ character_compare(
+ s1 + u,
+ s2 + u, /* offset */
+ i, /* index position */
+ u, /* string character position */
+ rank,
+ dims,
+ acc,
+ pos,
+ options,
+ obj1,
+ obj2,
+ ph);
+ }
+ else
+ for (u=0; u<size; u++)
+ nfound+=character_compare(
+ s1 + u,
+ s2 + u, /* offset */
+ i, /* index position */
+ u, /* string character position */
+ rank,
+ dims,
+ acc,
+ pos,
+ options,
+ obj1,
+ obj2,
+ ph);
}
}
@@ -736,6 +771,7 @@ static hsize_t diff_datum(void *_mem1,
*-------------------------------------------------------------------------
*/
case H5T_BITFIELD:
+ h5difftrace("diff_datum H5T_BITFIELD\n");
/* byte-by-byte comparison */
for (u=0; u<type_size; u++)
@@ -759,7 +795,7 @@ static hsize_t diff_datum(void *_mem1,
*-------------------------------------------------------------------------
*/
case H5T_OPAQUE:
-
+ h5difftrace("diff_datum H5T_OPAQUE\n");
/* byte-by-byte comparison */
for (u=0; u<type_size; u++)
nfound+=character_compare_opt(
@@ -783,6 +819,7 @@ static hsize_t diff_datum(void *_mem1,
*-------------------------------------------------------------------------
*/
case H5T_ENUM:
+ h5difftrace("diff_datum H5T_ENUM\n");
/* For enumeration types we compare the names instead of the
integer values. For each pair of elements being
@@ -2792,7 +2829,7 @@ hsize_t character_compare(char *mem1,
HDmemcpy(&temp1_uchar, mem1, sizeof(unsigned char));
HDmemcpy(&temp2_uchar, mem2, sizeof(unsigned char));
- h5difftrace("character_compare start\n");
+ h5diffdebug3("character_compare start %d=%d\n",temp1_uchar,temp2_uchar);
if (temp1_uchar != temp2_uchar)
{
@@ -5975,8 +6012,8 @@ static void get_member_types(hid_t tid, mcomp_t *members)
hid_t base_tid = H5Tget_super(tid);
get_member_types(base_tid, members);
H5Tclose(base_tid);
- }
- else if (tclass == H5T_COMPOUND)
+ }
+ else if (tclass == H5T_COMPOUND)
{
int nmembs;
@@ -5996,9 +6033,9 @@ static void get_member_types(hid_t tid, mcomp_t *members)
members->m[u] = (mcomp_t *)HDmalloc(sizeof(mcomp_t));
HDmemset(members->m[u], 0, sizeof(mcomp_t));
get_member_types(members->ids[u], members->m[u]);
- }
+ }
}
-
+
return;
diff --git a/tools/lib/h5diff_attr.c b/tools/lib/h5diff_attr.c
index b29e360..74607da 100644
--- a/tools/lib/h5diff_attr.c
+++ b/tools/lib/h5diff_attr.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h"
@@ -93,7 +91,7 @@ static void table_attrs_free( table_attrs_t *table )
/*-------------------------------------------------------------------------
* Function: table_attr_mark_exist
*
- * Purpose: mark given attribute name to table as sign of exsit
+ * Purpose: mark given attribute name to table as sign of exist
*
* Parameter:
* - exist [IN]
@@ -157,6 +155,7 @@ static herr_t build_match_list_attrs(hid_t loc1_id, hid_t loc2_id, table_attrs_t
unsigned i;
table_attrs_t *table_lp = NULL;
+ h5difftrace("build_match_list_attrs start\n");
if(H5Oget_info(loc1_id, &oinfo1) < 0)
goto error;
if(H5Oget_info(loc2_id, &oinfo2) < 0)
@@ -164,6 +163,7 @@ static herr_t build_match_list_attrs(hid_t loc1_id, hid_t loc2_id, table_attrs_t
table_attrs_init( &table_lp );
+ h5diffdebug3("build_match_list_attrs: %ld - %ld\n", curr1 < oinfo1.num_attrs, curr2 < oinfo2.num_attrs);
/*--------------------------------------------------
* build the list
@@ -228,6 +228,7 @@ static herr_t build_match_list_attrs(hid_t loc1_id, hid_t loc2_id, table_attrs_t
/* get name */
if(H5Aget_name(attr1_id, (size_t)ATTR_NAME_MAX, name1) < 0)
goto error;
+ h5diffdebug2("build_match_list_attrs #1 name - %s\n", name1);
table_attr_mark_exist(infile, name1, table_lp);
table_lp->nattrs_only1++;
@@ -249,6 +250,7 @@ static herr_t build_match_list_attrs(hid_t loc1_id, hid_t loc2_id, table_attrs_t
/* get name */
if(H5Aget_name(attr2_id, (size_t)ATTR_NAME_MAX, name2) < 0)
goto error;
+ h5diffdebug2("build_match_list_attrs #2 name - %s\n", name2);
table_attr_mark_exist(infile, name2, table_lp);
table_lp->nattrs_only2++;
@@ -280,6 +282,7 @@ static herr_t build_match_list_attrs(hid_t loc1_id, hid_t loc2_id, table_attrs_t
*table_out = table_lp;
+ h5difftrace("build_match_list_attrs end\n");
return 0;
error:
@@ -288,6 +291,7 @@ error:
if (0 < attr2_id)
H5Aclose(attr2_id);
+ h5difftrace("build_match_list_attrs end with error\n");
return -1;
}
@@ -324,8 +328,8 @@ hsize_t diff_attr(hid_t loc1_id,
hid_t space2_id=-1; /* space ID */
hid_t ftype1_id=-1; /* file data type ID */
hid_t ftype2_id=-1; /* file data type ID */
- int vstrtype1=0; /* ftype1 is a variable string */
- int vstrtype2=0; /* ftype2 is a variable string */
+ int vstrtype1=0; /* ftype1 is a variable string */
+ int vstrtype2=0; /* ftype2 is a variable string */
hid_t mtype1_id=-1; /* memory data type ID */
hid_t mtype2_id=-1; /* memory data type ID */
size_t msize1; /* memory size of memory type */
@@ -348,12 +352,15 @@ hsize_t diff_attr(hid_t loc1_id,
hsize_t nfound_total = 0;
int j;
- table_attrs_t * match_list_attrs = NULL;
+ table_attrs_t *match_list_attrs = NULL;
+ h5difftrace("diff_attr start\n");
+
if(build_match_list_attrs(loc1_id, loc2_id, &match_list_attrs, options) < 0)
goto error;
/* if detect any unique extra attr */
if(match_list_attrs->nattrs_only1 || match_list_attrs->nattrs_only2) {
+ h5difftrace("diff_attr attributes only in one file\n");
/* exit will be 1 */
options->contents = 0;
}
@@ -361,6 +368,7 @@ hsize_t diff_attr(hid_t loc1_id,
for(u = 0; u < (unsigned)match_list_attrs->nattrs; u++) {
if((match_list_attrs->attrs[u].exist[0]) && (match_list_attrs->attrs[u].exist[1])) {
name1 = name2 = match_list_attrs->attrs[u].name;
+ h5diffdebug2("diff_attr name - %s\n", name1);
/*--------------
* attribute 1 */
@@ -372,6 +380,7 @@ hsize_t diff_attr(hid_t loc1_id,
if((attr2_id = H5Aopen(loc2_id, name2, H5P_DEFAULT)) < 0)
goto error;
+ h5difftrace("diff_attr got attributes\n");
/* get the datatypes */
if((ftype1_id = H5Aget_type(attr1_id)) < 0)
goto error;
@@ -397,9 +406,9 @@ hsize_t diff_attr(hid_t loc1_id,
continue;
}
- if((mtype1_id = h5tools_get_native_type(ftype1_id)) < 0)
+ if((mtype1_id = H5Tget_native_type(ftype1_id, H5T_DIR_DEFAULT)) < 0)
goto error;
- if((mtype2_id = h5tools_get_native_type(ftype2_id)) < 0)
+ if((mtype2_id = H5Tget_native_type(ftype2_id, H5T_DIR_DEFAULT)) < 0)
goto error;
if((msize1 = H5Tget_size(mtype1_id)) == 0)
goto error;
@@ -558,6 +567,7 @@ hsize_t diff_attr(hid_t loc1_id,
table_attrs_free(match_list_attrs);
+ h5difftrace("diff_attr end\n");
return nfound_total;
error:
@@ -586,6 +596,7 @@ error:
} H5E_END_TRY;
options->err_stat = 1;
+ h5difftrace("diff_attr end with error\n");
return nfound_total;
}
diff --git a/tools/lib/h5diff_dset.c b/tools/lib/h5diff_dset.c
index 63f1483..991ef48 100644
--- a/tools/lib/h5diff_dset.c
+++ b/tools/lib/h5diff_dset.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h"
@@ -46,6 +44,7 @@ hsize_t diff_dataset( hid_t file1_id,
hid_t dcpl2 = -1;
hsize_t nfound = 0;
+ h5difftrace("diff_dataset start\n");
/*-------------------------------------------------------------------------
* open the handles
*-------------------------------------------------------------------------
@@ -54,13 +53,11 @@ hsize_t diff_dataset( hid_t file1_id,
H5E_BEGIN_TRY
{
/* Open the datasets */
- if((did1 = H5Dopen2(file1_id, obj1_name, H5P_DEFAULT)) < 0)
- {
+ if((did1 = H5Dopen2(file1_id, obj1_name, H5P_DEFAULT)) < 0) {
parallel_print("Cannot open dataset <%s>\n", obj1_name);
goto error;
}
- if((did2 = H5Dopen2(file2_id, obj2_name, H5P_DEFAULT)) < 0)
- {
+ if((did2 = H5Dopen2(file2_id, obj2_name, H5P_DEFAULT)) < 0) {
parallel_print("Cannot open dataset <%s>\n", obj2_name);
goto error;
}
@@ -80,33 +77,22 @@ hsize_t diff_dataset( hid_t file1_id,
* 2) the internal filters might be turned off
*-------------------------------------------------------------------------
*/
- if ((h5tools_canreadf((options->m_verbose?obj1_name:NULL),dcpl1)==1) &&
- (h5tools_canreadf((options->m_verbose?obj2_name:NULL),dcpl2)==1))
- {
- nfound=diff_datasetid(did1,
- did2,
- obj1_name,
- obj2_name,
- options);
- }
+ if ((h5tools_canreadf((options->m_verbose ? obj1_name : NULL), dcpl1) == 1) &&
+ (h5tools_canreadf((options->m_verbose ? obj2_name : NULL), dcpl2) == 1))
+ nfound = diff_datasetid(did1, did2, obj1_name, obj2_name, options);
+ else
+ goto error;
+
/*-------------------------------------------------------------------------
* close
*-------------------------------------------------------------------------
*/
- /* disable error reporting */
- H5E_BEGIN_TRY {
- H5Pclose(dcpl1);
- H5Pclose(dcpl2);
- H5Dclose(did1);
- H5Dclose(did2);
- /* enable error reporting */
- } H5E_END_TRY;
-
-
- return nfound;
+ goto done;
error:
- options->err_stat=1;
+ options->err_stat = 1;
+
+done:
/* disable error reporting */
H5E_BEGIN_TRY {
H5Pclose(dcpl1);
@@ -116,6 +102,7 @@ error:
/* enable error reporting */
} H5E_END_TRY;
+ h5difftrace("diff_dataset end\n");
return nfound;
}
@@ -187,20 +174,24 @@ hsize_t diff_datasetid( hid_t did1,
hid_t sid2=-1;
hid_t f_tid1=-1;
hid_t f_tid2=-1;
+ hid_t dam_tid=-1; /* m_tid for diff_array function */
hid_t m_tid1=-1;
hid_t m_tid2=-1;
hid_t dcpl1 = -1;
hid_t dcpl2 = -1;
H5D_layout_t stl1 = -1;
H5D_layout_t stl2 = -1;
+ size_t dam_size; /* m_size for diff_array function */
size_t m_size1;
size_t m_size2;
H5T_sign_t sign1;
H5T_sign_t sign2;
int rank1;
int rank2;
+ hsize_t danelmts; /* nelmts for diff_array function */
hsize_t nelmts1;
hsize_t nelmts2;
+ hsize_t *dadims; /* dims for diff_array function */
hsize_t dims1[H5S_MAX_RANK];
hsize_t dims2[H5S_MAX_RANK];
hsize_t maxdim1[H5S_MAX_RANK];
@@ -222,30 +213,29 @@ hsize_t diff_datasetid( hid_t did1,
h5difftrace("diff_datasetid start\n");
/* Get the dataspace handle */
- if ( (sid1 = H5Dget_space(did1)) < 0 )
+ if((sid1 = H5Dget_space(did1)) < 0)
goto error;
/* Get rank */
- if ( (rank1 = H5Sget_simple_extent_ndims(sid1)) < 0 )
+ if((rank1 = H5Sget_simple_extent_ndims(sid1)) < 0)
goto error;
/* Get the dataspace handle */
- if ( (sid2 = H5Dget_space(did2)) < 0 )
+ if((sid2 = H5Dget_space(did2)) < 0 )
goto error;
/* Get rank */
- if ( (rank2 = H5Sget_simple_extent_ndims(sid2)) < 0 )
+ if((rank2 = H5Sget_simple_extent_ndims(sid2)) < 0)
goto error;
/* Get dimensions */
- if ( H5Sget_simple_extent_dims(sid1,dims1,maxdim1) < 0 )
+ if(H5Sget_simple_extent_dims(sid1, dims1, maxdim1) < 0)
goto error;
/* Get dimensions */
- if ( H5Sget_simple_extent_dims(sid2,dims2,maxdim2) < 0 )
- {
+ if(H5Sget_simple_extent_dims(sid2, dims2, maxdim2) < 0)
goto error;
- }
+ h5diffdebug3("rank: %ld - %ld\n", rank1, rank2);
/*-------------------------------------------------------------------------
* get the file data type
@@ -253,15 +243,12 @@ hsize_t diff_datasetid( hid_t did1,
*/
/* Get the data type */
- if ( (f_tid1 = H5Dget_type(did1)) < 0 )
+ if((f_tid1 = H5Dget_type(did1)) < 0)
goto error;
/* Get the data type */
- if ( (f_tid2 = H5Dget_type(did2)) < 0 )
- {
+ if((f_tid2 = H5Dget_type(did2)) < 0)
goto error;
- }
-
/*-------------------------------------------------------------------------
* get the storage layout type
@@ -283,22 +270,20 @@ hsize_t diff_datasetid( hid_t did1,
*/
h5difftrace("check for empty datasets\n");
- storage_size1=H5Dget_storage_size(did1);
- storage_size2=H5Dget_storage_size(did2);
+ storage_size1 = H5Dget_storage_size(did1);
+ storage_size2 = H5Dget_storage_size(did2);
+ h5diffdebug3("storage size: %ld - %ld\n", storage_size1, storage_size2);
- if (storage_size1==0 || storage_size2==0)
- {
- if (stl1==H5D_VIRTUAL || stl2==H5D_VIRTUAL)
- {
- if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name)
+ if(storage_size1 == 0 || storage_size2 == 0) {
+ if(stl1 == H5D_VIRTUAL || stl2 == H5D_VIRTUAL) {
+ if((options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name)
parallel_print("Warning: <%s> or <%s> is a virtual dataset\n", obj1_name, obj2_name);
}
- else
- {
- if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name)
+ else {
+ if((options->m_verbose || options->m_list_not_cmp) && obj1_name && obj2_name)
parallel_print("Not comparable: <%s> or <%s> is an empty dataset\n", obj1_name, obj2_name);
- can_compare=0;
- options->not_cmp=1;
+ can_compare = 0;
+ options->not_cmp = 1;
}
}
@@ -306,71 +291,58 @@ hsize_t diff_datasetid( hid_t did1,
* check for comparable TYPE and SPACE
*-------------------------------------------------------------------------
*/
-
- if (diff_can_type(f_tid1,
- f_tid2,
- rank1,
- rank2,
- dims1,
- dims2,
- maxdim1,
- maxdim2,
- obj1_name,
- obj2_name,
- options,
- 0)!=1)
- {
- can_compare=0;
- }
+ if (diff_can_type(f_tid1, f_tid2, rank1, rank2,
+ dims1, dims2, maxdim1, maxdim2,
+ obj1_name, obj2_name,
+ options, 0) != 1)
+ can_compare = 0;
/*-------------------------------------------------------------------------
* memory type and sizes
*-------------------------------------------------------------------------
*/
h5difftrace("check for memory type and sizes\n");
- if ((m_tid1=h5tools_get_native_type(f_tid1)) < 0)
+ if((m_tid1 = H5Tget_native_type(f_tid1, H5T_DIR_DEFAULT)) < 0)
goto error;
- if ((m_tid2=h5tools_get_native_type(f_tid2)) < 0)
+ if((m_tid2 = H5Tget_native_type(f_tid2, H5T_DIR_DEFAULT)) < 0)
goto error;
- m_size1 = H5Tget_size( m_tid1 );
- m_size2 = H5Tget_size( m_tid2 );
+ m_size1 = H5Tget_size(m_tid1);
+ m_size2 = H5Tget_size(m_tid2);
+ h5diffdebug3("type size: %ld - %ld\n", m_size1, m_size2);
/*-------------------------------------------------------------------------
* check for different signed/unsigned types
*-------------------------------------------------------------------------
*/
- if (can_compare)
- {
+ if(can_compare) {
h5difftrace("can_compare for sign\n");
- sign1=H5Tget_sign(m_tid1);
- sign2=H5Tget_sign(m_tid2);
- if ( sign1 != sign2 )
- {
+ sign1 = H5Tget_sign(m_tid1);
+ sign2 = H5Tget_sign(m_tid2);
+ if(sign1 != sign2) {
h5difftrace("sign1 != sign2\n");
- if ((options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name)
- {
+ if((options->m_verbose || options->m_list_not_cmp) && obj1_name && obj2_name) {
parallel_print("Not comparable: <%s> has sign %s ", obj1_name, get_sign(sign1));
parallel_print("and <%s> has sign %s\n", obj2_name, get_sign(sign2));
}
- can_compare=0;
- options->not_cmp=1;
+ can_compare = 0;
+ options->not_cmp = 1;
}
}
/* Check if type is either VLEN-data or VLEN-string to reclaim any
* VLEN memory buffer later */
- if( TRUE == h5tools_detect_vlen(m_tid1) )
+ if(TRUE == h5tools_detect_vlen(m_tid1))
vl_data = TRUE;
/*------------------------------------------------------------------------
* only attempt to compare if possible
*-------------------------------------------------------------------------
*/
- if(can_compare) /* it is possible to compare */
- {
+ if(can_compare) { /* it is possible to compare */
+ H5T_class_t tclass = H5Tget_class(f_tid1);
h5difftrace("can_compare attempt\n");
/*-----------------------------------------------------------------
@@ -385,19 +357,44 @@ hsize_t diff_datasetid( hid_t did1,
for(i = 0; i < rank2; i++)
nelmts2 *= dims2[i];
+ h5diffdebug3("nelmts: %ld - %ld\n", nelmts1, nelmts2);
HDassert(nelmts1 == nelmts2);
- /*-----------------------------------------------------------------
- * "upgrade" the smaller memory size
- *------------------------------------------------------------------
- */
- h5difftrace("upgrade the smaller memory size?\n");
-
- if (FAIL == match_up_memsize (f_tid1, f_tid2,
- &m_tid1, &m_tid2,
- &m_size1, &m_size2))
- goto error;
-
+ if(tclass != H5T_ARRAY) {
+ /*-----------------------------------------------------------------
+ * "upgrade" the smaller memory size
+ *------------------------------------------------------------------
+ */
+ h5difftrace("upgrade the smaller memory size?\n");
+ if (FAIL == match_up_memsize (f_tid1, f_tid2,
+ &m_tid1, &m_tid2,
+ &m_size1, &m_size2))
+ goto error;
+ h5diffdebug3("m_size: %ld - %ld\n", m_size1, m_size2);
+ dadims = dims1;
+ dam_size = m_size1;
+ dam_tid = m_tid1;
+ danelmts = nelmts1;
+ need = (size_t)(nelmts1 * m_size1); /* bytes needed */
+ }
+ else {
+ h5diffdebug3("Array dims: %d - %d\n", dims1[0], dims2[0]);
+ /* Compare the smallest array, but create the largest buffer */
+ if(m_size1 <= m_size2) {
+ dadims = dims1;
+ dam_size = m_size1;
+ dam_tid = m_tid1;
+ danelmts = nelmts1;
+ need = (size_t)(nelmts2 * m_size2); /* bytes needed */
+ }
+ else {
+ dadims = dims2;
+ dam_size = m_size2;
+ dam_tid = m_tid2;
+ danelmts = nelmts2;
+ need = (size_t)(nelmts1 * m_size1); /* bytes needed */
+ }
+ }
/* print names */
if(obj1_name)
name1 = diff_basename(obj1_name);
@@ -409,7 +406,6 @@ hsize_t diff_datasetid( hid_t did1,
* read/compare
*-----------------------------------------------------------------
*/
- need = (size_t)(nelmts1 * m_size1); /* bytes needed */
if(need < H5TOOLS_MALLOCSIZE) {
buf1 = HDmalloc(need);
buf2 = HDmalloc(need);
@@ -419,12 +415,13 @@ hsize_t diff_datasetid( hid_t did1,
h5difftrace("buf1 != NULL && buf2 != NULL\n");
if(H5Dread(did1, m_tid1, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1) < 0)
goto error;
+ h5difftrace("H5Dread did2\n");
if(H5Dread(did2, m_tid2, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2) < 0)
goto error;
/* array diff */
- nfound = diff_array(buf1, buf2, nelmts1, (hsize_t)0, rank1, dims1,
- options, name1, name2, m_tid1, did1, did2);
+ nfound = diff_array(buf1, buf2, danelmts, (hsize_t)0, rank1, dadims,
+ options, name1, name2, dam_tid, did1, did2);
/* reclaim any VL memory, if necessary */
if(vl_data) {
@@ -432,10 +429,9 @@ hsize_t diff_datasetid( hid_t did1,
H5Dvlen_reclaim(m_tid2, sid2, H5P_DEFAULT, buf2);
} /* end if */
} /* end if */
- else /* possibly not enough memory, read/compare by hyperslabs */
- {
- size_t p_type_nbytes = m_size1; /*size of memory type */
- hsize_t p_nelmts = nelmts1; /*total selected elmts */
+ else { /* possibly not enough memory, read/compare by hyperslabs */
+ size_t p_type_nbytes = dam_size; /*size of memory type */
+ hsize_t p_nelmts = danelmts; /*total selected elmts */
hsize_t elmtno; /*counter */
int carry; /*counter carry value */
@@ -461,7 +457,7 @@ hsize_t diff_datasetid( hid_t did1,
if(size == 0) /* datum size > H5TOOLS_BUFSIZE */
size = 1;
- sm_size[i - 1] = MIN(dims1[i - 1], size);
+ sm_size[i - 1] = MIN(dadims[i - 1], size);
sm_nbytes *= sm_size[i - 1];
HDassert(sm_nbytes > 0);
} /* end for */
@@ -489,7 +485,7 @@ hsize_t diff_datasetid( hid_t did1,
/* calculate the hyperslab size */
if(rank1 > 0) {
for(i = 0, hs_nelmts = 1; i < rank1; i++) {
- hs_size[i] = MIN(dims1[i] - hs_offset[i], sm_size[i]);
+ hs_size[i] = MIN(dadims[i] - hs_offset[i], sm_size[i]);
hs_nelmts *= hs_size[i];
} /* end for */
if(H5Sselect_hyperslab(sid1, H5S_SELECT_SET, hs_offset, NULL, hs_size, NULL) < 0)
@@ -502,15 +498,15 @@ hsize_t diff_datasetid( hid_t did1,
else
hs_nelmts = 1;
- if(H5Dread(did1,m_tid1,sm_space,sid1,H5P_DEFAULT,sm_buf1) < 0)
+ if(H5Dread(did1, m_tid1, sm_space, sid1, H5P_DEFAULT, sm_buf1) < 0)
goto error;
- if(H5Dread(did2,m_tid2,sm_space,sid2,H5P_DEFAULT,sm_buf2) < 0)
+ if(H5Dread(did2, m_tid2, sm_space, sid2, H5P_DEFAULT, sm_buf2) < 0)
goto error;
/* get array differences. in the case of hyperslab read, increment the number of differences
- found in each hyperslab and pass the position at the beggining for printing */
+ found in each hyperslab and pass the position at the beginning for printing */
nfound += diff_array(sm_buf1, sm_buf2, hs_nelmts, elmtno, rank1,
- dims1, options, name1, name2, m_tid1, did1, did2);
+ dadims, options, name1, name2, dam_tid, did1, did2);
/* reclaim any VL memory, if necessary */
if(vl_data) {
@@ -521,7 +517,7 @@ hsize_t diff_datasetid( hid_t did1,
/* calculate the next hyperslab offset */
for(i = rank1, carry = 1; i > 0 && carry; --i) {
hs_offset[i - 1] += hs_size[i - 1];
- if(hs_offset[i - 1] == dims1[i - 1])
+ if(hs_offset[i - 1] == dadims[i - 1])
hs_offset[i - 1] = 0;
else
carry = 0;
@@ -573,37 +569,33 @@ error:
options->err_stat=1;
/* free */
- if (buf1!=NULL)
- {
+ if(buf1 != NULL) {
/* reclaim any VL memory, if necessary */
if(vl_data)
H5Dvlen_reclaim(m_tid1, sid1, H5P_DEFAULT, buf1);
HDfree(buf1);
- buf1=NULL;
+ buf1 = NULL;
}
- if (buf2!=NULL)
- {
+ if(buf2 != NULL) {
/* reclaim any VL memory, if necessary */
if(vl_data)
H5Dvlen_reclaim(m_tid2, sid2, H5P_DEFAULT, buf2);
HDfree(buf2);
- buf2=NULL;
+ buf2 = NULL;
}
- if (sm_buf1!=NULL)
- {
+ if(sm_buf1 != NULL) {
/* reclaim any VL memory, if necessary */
if(vl_data)
H5Dvlen_reclaim(m_tid1, sm_space, H5P_DEFAULT, sm_buf1);
HDfree(sm_buf1);
- sm_buf1=NULL;
+ sm_buf1 = NULL;
}
- if (sm_buf2!=NULL)
- {
+ if(sm_buf2 != NULL) {
/* reclaim any VL memory, if necessary */
if(vl_data)
H5Dvlen_reclaim(m_tid1, sm_space, H5P_DEFAULT, sm_buf2);
HDfree(sm_buf2);
- sm_buf2=NULL;
+ sm_buf2 = NULL;
}
/* disable error reporting */
@@ -651,75 +643,55 @@ int diff_can_type( hid_t f_tid1, /* file data type */
diff_opt_t *options,
int is_compound)
{
-
-
H5T_class_t tclass1;
H5T_class_t tclass2;
- int maxdim_diff=0; /* maximum dimensions are different */
- int dim_diff=0; /* current dimensions are different */
+ int maxdim_diff = 0; /* maximum dimensions are different */
+ int dim_diff = 0; /* current dimensions are different */
int i;
- int can_compare = 1; /* return value */
+ int can_compare = 1; /* return value */
+ h5difftrace("diff_can_type start\n");
/*-------------------------------------------------------------------------
* check for the same class
*-------------------------------------------------------------------------
*/
-
- if ((tclass1=H5Tget_class(f_tid1)) < 0)
+ if((tclass1 = H5Tget_class(f_tid1)) < 0)
return -1;
-
- if ((tclass2=H5Tget_class(f_tid2)) < 0)
+ if((tclass2 = H5Tget_class(f_tid2)) < 0)
return -1;
- if ( tclass1 != tclass2 )
- {
-
- if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name)
- {
-
- if ( is_compound )
- {
-
+ if(tclass1 != tclass2) {
+ if((options->m_verbose || options->m_list_not_cmp) && obj1_name && obj2_name) {
+ if(is_compound) {
parallel_print("Not comparable: <%s> has a class %s and <%s> has a class %s\n",
- obj1_name, get_class(tclass1),
- obj2_name, get_class(tclass2) );
-
+ obj1_name, get_class(tclass1),
+ obj2_name, get_class(tclass2));
}
-
- else
-
- {
-
+ else {
parallel_print("Not comparable: <%s> is of class %s and <%s> is of class %s\n",
- obj1_name, get_class(tclass1),
- obj2_name, get_class(tclass2) );
-
+ obj1_name, get_class(tclass1),
+ obj2_name, get_class(tclass2));
}
-
}
-
-
can_compare = 0;
options->not_cmp = 1;
- return can_compare;
+ goto done;
}
/*-------------------------------------------------------------------------
* check for non supported classes
*-------------------------------------------------------------------------
*/
-
- HDassert(tclass1==tclass2);
- switch (tclass1)
- {
+ HDassert(tclass1 == tclass2);
+ switch (tclass1) {
case H5T_TIME:
- if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name) {
+ if((options->m_verbose || options->m_list_not_cmp) && obj1_name && obj2_name) {
parallel_print("Not comparable: <%s> and <%s> are of class %s\n",
- obj1_name,obj2_name,get_class(tclass2) );
+ obj1_name, obj2_name, get_class(tclass2));
} /* end if */
can_compare = 0;
options->not_cmp = 1;
- return can_compare;
+ goto done;
case H5T_INTEGER:
case H5T_FLOAT:
@@ -734,6 +706,7 @@ int diff_can_type( hid_t f_tid1, /* file data type */
case H5T_NO_CLASS:
case H5T_NCLASSES:
default:
+ h5diffdebug2("diff_can_type class - %s\n", get_class(tclass1));
break;
} /* end switch */
@@ -741,17 +714,11 @@ int diff_can_type( hid_t f_tid1, /* file data type */
* check for equal file datatype; warning only
*-------------------------------------------------------------------------
*/
-
- if ( (H5Tequal(f_tid1, f_tid2)==0) &&
- (options->m_verbose) && obj1_name && obj2_name)
- {
-
+ if((H5Tequal(f_tid1, f_tid2) == 0) && (options->m_verbose) && obj1_name && obj2_name) {
H5T_class_t cl = H5Tget_class(f_tid1);
-
parallel_print("Warning: different storage datatype\n");
- if ( cl == H5T_INTEGER || cl == H5T_FLOAT )
- {
+ if(cl == H5T_INTEGER || cl == H5T_FLOAT) {
parallel_print("<%s> has file datatype ", obj1_name);
print_type(f_tid1);
parallel_print("\n");
@@ -759,110 +726,85 @@ int diff_can_type( hid_t f_tid1, /* file data type */
print_type(f_tid2);
parallel_print("\n");
}
-
-
-
}
/*-------------------------------------------------------------------------
* check for the same rank
*-------------------------------------------------------------------------
*/
-
-
- if ( rank1 != rank2 )
- {
-
- if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name)
- {
+ if(rank1 != rank2) {
+ if((options->m_verbose || options->m_list_not_cmp) && obj1_name && obj2_name) {
parallel_print("Not comparable: <%s> has rank %d, dimensions ", obj1_name, rank1);
- print_dimensions(rank1,dims1);
+ print_dimensions(rank1, dims1);
parallel_print(", max dimensions ");
- print_dimensions(rank1,maxdim1);
+ print_dimensions(rank1, maxdim1);
parallel_print("\n" );
parallel_print("and <%s> has rank %d, dimensions ", obj2_name, rank2);
- print_dimensions(rank2,dims2);
+ print_dimensions(rank2, dims2);
parallel_print(", max dimensions ");
- print_dimensions(rank2,maxdim2);
+ print_dimensions(rank2, maxdim2);
parallel_print("\n");
}
-
can_compare = 0;
options->not_cmp = 1;
- return can_compare;
+ goto done;
}
/*-------------------------------------------------------------------------
* check for different dimensions
*-------------------------------------------------------------------------
*/
-
- HDassert(rank1==rank2);
- for ( i=0; i<rank1; i++)
- {
- if (maxdim1 && maxdim2)
- {
- if ( maxdim1[i] != maxdim2[i] )
- maxdim_diff=1;
+ HDassert(rank1 == rank2);
+ for(i = 0; i<rank1; i++) {
+ if(maxdim1 && maxdim2) {
+ if(maxdim1[i] != maxdim2[i])
+ maxdim_diff = 1;
}
- if ( dims1[i] != dims2[i] )
- dim_diff=1;
+ if(dims1[i] != dims2[i])
+ dim_diff = 1;
}
/*-------------------------------------------------------------------------
* current dimensions
*-------------------------------------------------------------------------
*/
-
- if (dim_diff==1)
- {
- if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name)
- {
+ if(dim_diff == 1) {
+ if((options->m_verbose || options->m_list_not_cmp) && obj1_name && obj2_name) {
parallel_print("Not comparable: <%s> has rank %d, dimensions ", obj1_name, rank1);
- print_dimensions(rank1,dims1);
- if (maxdim1 && maxdim2)
- {
+ print_dimensions(rank1, dims1);
+ if(maxdim1 && maxdim2) {
parallel_print(", max dimensions ");
- print_dimensions(rank1,maxdim1);
+ print_dimensions(rank1, maxdim1);
parallel_print("\n" );
parallel_print("and <%s> has rank %d, dimensions ", obj2_name, rank2);
- print_dimensions(rank2,dims2);
+ print_dimensions(rank2, dims2);
parallel_print(", max dimensions ");
- print_dimensions(rank2,maxdim2);
+ print_dimensions(rank2, maxdim2);
parallel_print("\n");
}
}
-
-
can_compare = 0;
options->not_cmp = 1;
- return can_compare;
-
-
-
+ goto done;
}
/*-------------------------------------------------------------------------
* maximum dimensions; just give a warning
*-------------------------------------------------------------------------
*/
- if (maxdim1 && maxdim2 && maxdim_diff==1 && obj1_name )
- {
- if (options->m_verbose) {
+ if(maxdim1 && maxdim2 && maxdim_diff == 1 && obj1_name) {
+ if(options->m_verbose) {
parallel_print( "Warning: different maximum dimensions\n");
parallel_print("<%s> has max dimensions ", obj1_name);
- print_dimensions(rank1,maxdim1);
+ print_dimensions(rank1, maxdim1);
parallel_print("\n");
parallel_print("<%s> has max dimensions ", obj2_name);
- print_dimensions(rank2,maxdim2);
+ print_dimensions(rank2, maxdim2);
parallel_print("\n");
}
}
-
- if ( tclass1 == H5T_COMPOUND )
- {
-
+ if(tclass1 == H5T_COMPOUND) {
int nmembs1;
int nmembs2;
int j;
@@ -872,67 +814,40 @@ int diff_can_type( hid_t f_tid1, /* file data type */
nmembs1 = H5Tget_nmembers(f_tid1);
nmembs2 = H5Tget_nmembers(f_tid2);
- if ( nmembs1 != nmembs2 )
- {
-
- if ( (options->m_verbose||options->m_list_not_cmp) && obj1_name && obj2_name)
- {
+ if(nmembs1 != nmembs2) {
+ if((options->m_verbose || options->m_list_not_cmp) && obj1_name && obj2_name) {
parallel_print("Not comparable: <%s> has %d members ", obj1_name, nmembs1);
parallel_print("<%s> has %d members ", obj2_name, nmembs2);
parallel_print("\n");
}
-
can_compare = 0;
options->not_cmp = 1;
- return can_compare;
+ goto done;
}
- for (j = 0; j < nmembs1; j++)
- {
+ for (j = 0; j < nmembs1; j++) {
memb_type1 = H5Tget_member_type(f_tid1, (unsigned)j);
memb_type2 = H5Tget_member_type(f_tid2, (unsigned)j);
- if (diff_can_type(memb_type1,
- memb_type2,
- rank1,
- rank2,
- dims1,
- dims2,
- maxdim1,
- maxdim2,
- obj1_name,
- obj2_name,
- options,
- 1)!=1)
- {
+ if (diff_can_type(memb_type1, memb_type2, rank1, rank2,
+ dims1, dims2, maxdim1, maxdim2, obj1_name, obj2_name,
+ options, 1) != 1) {
can_compare = 0;
options->not_cmp = 1;
H5Tclose(memb_type1);
H5Tclose(memb_type2);
- return can_compare;
+ goto done;
}
-
H5Tclose(memb_type1);
H5Tclose(memb_type2);
-
}
-
-
-
-
-
}
-
-
-
-
-
+done:
+ h5diffdebug2("diff_can_type end - %d\n", can_compare);
return can_compare;
}
-
-
/*-------------------------------------------------------------------------
* Function: print_sizes
*
diff --git a/tools/lib/h5diff_util.c b/tools/lib/h5diff_util.c
index 985a47d..454f993 100644
--- a/tools/lib/h5diff_util.c
+++ b/tools/lib/h5diff_util.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h"
@@ -35,19 +33,14 @@ print_dimensions (int rank, hsize_t *dims)
{
int i;
- if ( rank <= 0 )
- {
+ if( rank <= 0 )
parallel_print("H5S_SCALAR" );
- }
- else
- {
+ else {
if (!dims)
parallel_print("dimension is NULL");
- else
- {
+ else {
parallel_print("[");
- for ( i = 0; i < rank-1; i++)
- {
+ for ( i = 0; i < rank-1; i++) {
parallel_print(HSIZE_T_FORMAT, dims[i]);
parallel_print("x");
}
@@ -76,91 +69,108 @@ print_dimensions (int rank, hsize_t *dims)
*/
void print_type(hid_t type)
{
- switch (H5Tget_class(type))
- {
+ switch (H5Tget_class(type)) {
case H5T_INTEGER:
- if (H5Tequal(type, H5T_STD_I8BE)) {
+ if(H5Tequal(type, H5T_STD_I8BE))
parallel_print("H5T_STD_I8BE");
- } else if (H5Tequal(type, H5T_STD_I8LE)) {
+ else if(H5Tequal(type, H5T_STD_I8LE))
parallel_print("H5T_STD_I8LE");
- } else if (H5Tequal(type, H5T_STD_I16BE)) {
+ else if(H5Tequal(type, H5T_STD_I16BE))
parallel_print("H5T_STD_I16BE");
- } else if (H5Tequal(type, H5T_STD_I16LE)) {
+ else if(H5Tequal(type, H5T_STD_I16LE))
parallel_print("H5T_STD_I16LE");
- } else if (H5Tequal(type, H5T_STD_I32BE)) {
+ else if(H5Tequal(type, H5T_STD_I32BE))
parallel_print("H5T_STD_I32BE");
- } else if (H5Tequal(type, H5T_STD_I32LE)) {
+ else if(H5Tequal(type, H5T_STD_I32LE))
parallel_print("H5T_STD_I32LE");
- } else if (H5Tequal(type, H5T_STD_I64BE)) {
+ else if(H5Tequal(type, H5T_STD_I64BE))
parallel_print("H5T_STD_I64BE");
- } else if (H5Tequal(type, H5T_STD_I64LE)) {
+ else if(H5Tequal(type, H5T_STD_I64LE))
parallel_print("H5T_STD_I64LE");
- } else if (H5Tequal(type, H5T_STD_U8BE)) {
+ else if(H5Tequal(type, H5T_STD_U8BE))
parallel_print("H5T_STD_U8BE");
- } else if (H5Tequal(type, H5T_STD_U8LE)) {
+ else if(H5Tequal(type, H5T_STD_U8LE))
parallel_print("H5T_STD_U8LE");
- } else if (H5Tequal(type, H5T_STD_U16BE)) {
+ else if(H5Tequal(type, H5T_STD_U16BE))
parallel_print("H5T_STD_U16BE");
- } else if (H5Tequal(type, H5T_STD_U16LE)) {
+ else if(H5Tequal(type, H5T_STD_U16LE))
parallel_print("H5T_STD_U16LE");
- } else if (H5Tequal(type, H5T_STD_U32BE)) {
+ else if(H5Tequal(type, H5T_STD_U32BE))
parallel_print("H5T_STD_U32BE");
- } else if (H5Tequal(type, H5T_STD_U32LE)) {
+ else if(H5Tequal(type, H5T_STD_U32LE))
parallel_print("H5T_STD_U32LE");
- } else if (H5Tequal(type, H5T_STD_U64BE)) {
+ else if(H5Tequal(type, H5T_STD_U64BE))
parallel_print("H5T_STD_U64BE");
- } else if (H5Tequal(type, H5T_STD_U64LE)) {
+ else if(H5Tequal(type, H5T_STD_U64LE))
parallel_print("H5T_STD_U64LE");
- } else if (H5Tequal(type, H5T_NATIVE_SCHAR)) {
+ else if(H5Tequal(type, H5T_NATIVE_SCHAR))
parallel_print("H5T_NATIVE_SCHAR");
- } else if (H5Tequal(type, H5T_NATIVE_UCHAR)) {
+ else if(H5Tequal(type, H5T_NATIVE_UCHAR))
parallel_print("H5T_NATIVE_UCHAR");
- } else if (H5Tequal(type, H5T_NATIVE_SHORT)) {
+ else if(H5Tequal(type, H5T_NATIVE_SHORT))
parallel_print("H5T_NATIVE_SHORT");
- } else if (H5Tequal(type, H5T_NATIVE_USHORT)) {
+ else if(H5Tequal(type, H5T_NATIVE_USHORT))
parallel_print("H5T_NATIVE_USHORT");
- } else if (H5Tequal(type, H5T_NATIVE_INT)) {
+ else if(H5Tequal(type, H5T_NATIVE_INT))
parallel_print("H5T_NATIVE_INT");
- } else if (H5Tequal(type, H5T_NATIVE_UINT)) {
+ else if(H5Tequal(type, H5T_NATIVE_UINT))
parallel_print("H5T_NATIVE_UINT");
- } else if (H5Tequal(type, H5T_NATIVE_LONG)) {
+ else if(H5Tequal(type, H5T_NATIVE_LONG))
parallel_print("H5T_NATIVE_LONG");
- } else if (H5Tequal(type, H5T_NATIVE_ULONG)) {
+ else if(H5Tequal(type, H5T_NATIVE_ULONG))
parallel_print("H5T_NATIVE_ULONG");
- } else if (H5Tequal(type, H5T_NATIVE_LLONG)) {
+ else if(H5Tequal(type, H5T_NATIVE_LLONG))
parallel_print("H5T_NATIVE_LLONG");
- } else if (H5Tequal(type, H5T_NATIVE_ULLONG)) {
+ else if(H5Tequal(type, H5T_NATIVE_ULLONG))
parallel_print("H5T_NATIVE_ULLONG");
- } else {
+ else
parallel_print("undefined integer");
- }
break;
case H5T_FLOAT:
- if (H5Tequal(type, H5T_IEEE_F32BE)) {
+ if(H5Tequal(type, H5T_IEEE_F32BE))
parallel_print("H5T_IEEE_F32BE");
- } else if (H5Tequal(type, H5T_IEEE_F32LE)) {
+ else if(H5Tequal(type, H5T_IEEE_F32LE))
parallel_print("H5T_IEEE_F32LE");
- } else if (H5Tequal(type, H5T_IEEE_F64BE)) {
+ else if(H5Tequal(type, H5T_IEEE_F64BE))
parallel_print("H5T_IEEE_F64BE");
- } else if (H5Tequal(type, H5T_IEEE_F64LE)) {
+ else if(H5Tequal(type, H5T_IEEE_F64LE))
parallel_print("H5T_IEEE_F64LE");
- } else if (H5Tequal(type, H5T_NATIVE_FLOAT)) {
+ else if(H5Tequal(type, H5T_NATIVE_FLOAT))
parallel_print("H5T_NATIVE_FLOAT");
- } else if (H5Tequal(type, H5T_NATIVE_DOUBLE)) {
+ else if(H5Tequal(type, H5T_NATIVE_DOUBLE))
parallel_print("H5T_NATIVE_DOUBLE");
#if H5_SIZEOF_LONG_DOUBLE !=0
- } else if (H5Tequal(type, H5T_NATIVE_LDOUBLE)) {
+ else if(H5Tequal(type, H5T_NATIVE_LDOUBLE))
parallel_print("H5T_NATIVE_LDOUBLE");
#endif
- } else {
+ else
parallel_print("undefined float");
- }
+ break;
+
+ case H5T_BITFIELD:
+ if(H5Tequal(type, H5T_STD_B8BE))
+ parallel_print("H5T_STD_B8BE");
+ else if(H5Tequal(type, H5T_STD_B8LE))
+ parallel_print("H5T_STD_B8LE");
+ else if(H5Tequal(type, H5T_STD_B16BE))
+ parallel_print("H5T_STD_B16BE");
+ else if(H5Tequal(type, H5T_STD_B16LE))
+ parallel_print("H5T_STD_B16LE");
+ else if(H5Tequal(type, H5T_STD_B32BE))
+ parallel_print("H5T_STD_B32BE");
+ else if(H5Tequal(type, H5T_STD_B32LE))
+ parallel_print("H5T_STD_B32LE");
+ else if(H5Tequal(type, H5T_STD_B64BE))
+ parallel_print("H5T_STD_B64BE");
+ else if(H5Tequal(type, H5T_STD_B64LE))
+ parallel_print("H5T_STD_B64LE");
+ else
+ parallel_print("undefined bitfield");
break;
case H5T_TIME:
case H5T_STRING:
- case H5T_BITFIELD:
case H5T_OPAQUE:
case H5T_COMPOUND:
case H5T_REFERENCE:
@@ -191,16 +201,16 @@ diff_basename(const char *name)
{
size_t i;
- if (name == NULL)
+ if(name == NULL)
return NULL;
/* Find the end of the base name */
i = HDstrlen(name);
- while (i > 0 && '/' == name[i - 1])
+ while(i > 0 && '/' == name[i - 1])
--i;
/* Skip backward over base name */
- while (i > 0 && '/' != name[i - 1])
+ while(i > 0 && '/' != name[i - 1])
--i;
return(name+i);
@@ -258,7 +268,7 @@ get_type(h5trav_type_t type)
H5_ATTR_PURE const char*
get_sign(H5T_sign_t sign)
{
- switch (sign)
+ switch(sign)
{
case H5T_SGN_NONE:
return "H5T_SGN_NONE";
@@ -293,7 +303,7 @@ get_sign(H5T_sign_t sign)
H5_ATTR_PURE const char*
get_class(H5T_class_t tclass)
{
- switch (tclass) {
+ switch(tclass) {
case H5T_TIME:
return("H5T_TIME");
@@ -353,24 +363,21 @@ void print_found(hsize_t nfound)
/*-----------------------------------------------------------------
* Function: match_up_memsize
- *
+ *
* Purpose: match smaller memory size up to bigger memory size
*------------------------------------------------------------------
*/
herr_t match_up_memsize (hid_t f_tid1_id, hid_t f_tid2_id,
- hid_t *m_tid1, hid_t *m_tid2,
+ hid_t *m_tid1, hid_t *m_tid2,
size_t *m_size1, size_t *m_size2)
{
herr_t ret = SUCCEED;
- if( (*m_size1) != (*m_size2) )
- {
- if( (*m_size1) < (*m_size2) )
- {
+ if((*m_size1) != (*m_size2)) {
+ if((*m_size1) < (*m_size2)) {
H5Tclose( *m_tid1 );
- if(( (*m_tid1) = h5tools_get_native_type(f_tid2_id)) < 0)
- {
+ if(((*m_tid1) = H5Tget_native_type(f_tid2_id, H5T_DIR_DEFAULT)) < 0) {
ret = FAIL;
goto out;
}
@@ -380,8 +387,7 @@ herr_t match_up_memsize (hid_t f_tid1_id, hid_t f_tid2_id,
else {
H5Tclose(*m_tid2);
- if(( (*m_tid2) = h5tools_get_native_type(f_tid1_id)) < 0)
- {
+ if(((*m_tid2) = H5Tget_native_type(f_tid1_id, H5T_DIR_DEFAULT)) < 0) {
ret = FAIL;
goto out;
}
@@ -389,7 +395,7 @@ herr_t match_up_memsize (hid_t f_tid1_id, hid_t f_tid2_id,
*m_size2 = H5Tget_size(*m_tid2);
} /* end else */
} /* end if */
- HDassert( (*m_size1) == (*m_size2) );
+ HDassert((*m_size1) == (*m_size2));
out:
return ret;
diff --git a/tools/lib/h5tools.c b/tools/lib/h5tools.c
index bfcbfb8..22fd863 100644
--- a/tools/lib/h5tools.c
+++ b/tools/lib/h5tools.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -53,6 +51,10 @@ unsigned packed_data_length; /* length of packed bits to display */
unsigned long long packed_data_mask; /* mask in which packed bits to display */
int enable_error_stack= FALSE; /* re-enable error stack */
+/* sort parameters */
+H5_index_t sort_by = H5_INDEX_NAME; /*sort_by [creation_order | name] */
+H5_iter_order_t sort_order = H5_ITER_INC; /*sort_order [ascending | descending] */
+
/* module-scoped variables */
static int h5tools_init_g; /* if h5tools lib has been initialized */
@@ -279,16 +281,16 @@ h5tools_set_attr_output_file(const char *fname, int is_bin)
if (fname != NULL) {
/* binary output */
if (is_bin) {
- if ((f = HDfopen(fname, "wb")) != NULL) {
- rawattrstream = f;
- retvalue = SUCCEED;
- }
+ if ((f = HDfopen(fname, "wb")) != NULL) {
+ rawattrstream = f;
+ retvalue = SUCCEED;
+ }
}
else {
- if ((f = HDfopen(fname, "w")) != NULL) {
- rawattrstream = f;
- retvalue = SUCCEED;
- }
+ if ((f = HDfopen(fname, "w")) != NULL) {
+ rawattrstream = f;
+ retvalue = SUCCEED;
+ }
}
}
else {
@@ -321,26 +323,26 @@ h5tools_set_input_file(const char *fname, int is_bin)
if (HDfclose(rawinstream))
HDperror("closing rawinstream");
else
- rawinstream = NULL;
+ rawinstream = NULL;
}
/* First check if filename is string "NULL" */
if (fname != NULL) {
/* binary output */
if (is_bin) {
- if ((f = HDfopen(fname, "rb")) != NULL) {
- rawinstream = f;
- retvalue = SUCCEED;
- }
+ if ((f = HDfopen(fname, "rb")) != NULL) {
+ rawinstream = f;
+ retvalue = SUCCEED;
+ }
}
else {
- if ((f = HDfopen(fname, "r")) != NULL) {
- rawinstream = f;
- retvalue = SUCCEED;
- }
+ if ((f = HDfopen(fname, "r")) != NULL) {
+ rawinstream = f;
+ retvalue = SUCCEED;
+ }
}
}
else {
- rawinstream = NULL;
+ rawinstream = NULL;
retvalue = SUCCEED;
}
@@ -375,16 +377,16 @@ h5tools_set_output_file(const char *fname, int is_bin)
if (fname != NULL) {
/* binary output */
if (is_bin) {
- if ((f = HDfopen(fname, "wb")) != NULL) {
- rawoutstream = f;
- retvalue = SUCCEED;
- }
+ if ((f = HDfopen(fname, "wb")) != NULL) {
+ rawoutstream = f;
+ retvalue = SUCCEED;
+ }
}
else {
- if ((f = HDfopen(fname, "w")) != NULL) {
- rawoutstream = f;
- retvalue = SUCCEED;
- }
+ if ((f = HDfopen(fname, "w")) != NULL) {
+ rawoutstream = f;
+ retvalue = SUCCEED;
+ }
}
}
else {
@@ -423,21 +425,21 @@ h5tools_set_error_file(const char *fname, int is_bin)
/* First check if filename is string "NULL" */
if (fname != NULL) {
/* binary output */
- if (is_bin) {
- if ((f = HDfopen(fname, "wb")) != NULL) {
- rawerrorstream = f;
- retvalue = SUCCEED;
- }
+ if (is_bin) {
+ if ((f = HDfopen(fname, "wb")) != NULL) {
+ rawerrorstream = f;
+ retvalue = SUCCEED;
+ }
}
else {
- if ((f = HDfopen(fname, "w")) != NULL) {
- rawerrorstream = f;
- retvalue = SUCCEED;
- }
- }
+ if ((f = HDfopen(fname, "w")) != NULL) {
+ rawerrorstream = f;
+ retvalue = SUCCEED;
+ }
+ }
}
else {
- rawerrorstream = NULL;
+ rawerrorstream = NULL;
retvalue = SUCCEED;
}
@@ -1324,6 +1326,7 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t
case H5T_INTEGER:
case H5T_FLOAT:
case H5T_ENUM:
+ case H5T_BITFIELD:
block_index = block_nelmts * size;
while(block_index > 0) {
size_t bytes_in = 0; /* # of bytes to write */
@@ -1490,7 +1493,6 @@ render_bin_output(FILE *stream, hid_t container, hid_t tid, void *_mem, hsize_t
break;
case H5T_TIME:
- case H5T_BITFIELD:
case H5T_OPAQUE:
for (block_index = 0; block_index < block_nelmts; block_index++) {
mem = ((unsigned char*)_mem) + block_index * size;
diff --git a/tools/lib/h5tools.h b/tools/lib/h5tools.h
index 38739c8..3d38dd0 100644
--- a/tools/lib/h5tools.h
+++ b/tools/lib/h5tools.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -570,6 +568,10 @@ H5TOOLS_DLLVAR int oid_output; /* oid output */
H5TOOLS_DLLVAR int data_output; /* data output */
H5TOOLS_DLLVAR int attr_data_output; /* attribute data output */
+/* sort parameters */
+H5TOOLS_DLLVAR H5_index_t sort_by; /*sort_by [creation_order | name] */
+H5TOOLS_DLLVAR H5_iter_order_t sort_order; /*sort_order [ascending | descending] */
+
/* things to display or which are set via command line parameters */
H5TOOLS_DLLVAR int enable_error_stack; /* re-enable error stack */
@@ -588,7 +590,6 @@ H5TOOLS_DLL int h5tools_set_output_file(const char *fname, int is_bin);
H5TOOLS_DLL int h5tools_set_error_file(const char *fname, int is_bin);
H5TOOLS_DLL hid_t h5tools_fopen(const char *fname, unsigned flags, hid_t fapl,
const char *driver, char *drivername, size_t drivername_len);
-H5TOOLS_DLL hid_t h5tools_get_native_type(hid_t type);
H5TOOLS_DLL hid_t h5tools_get_little_endian_type(hid_t type);
H5TOOLS_DLL hid_t h5tools_get_big_endian_type(hid_t type);
H5TOOLS_DLL htri_t h5tools_detect_vlen(hid_t tid);
diff --git a/tools/lib/h5tools_dump.c b/tools/lib/h5tools_dump.c
index 67585d1..1a57512 100644
--- a/tools/lib/h5tools_dump.c
+++ b/tools/lib/h5tools_dump.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -31,73 +29,73 @@
#include "H5private.h"
h5tool_format_t h5tools_dataformat = {
-0, /*raw */
-
-"", /*fmt_raw */
-"%d", /*fmt_int */
-"%u", /*fmt_uint */
-"%hhd", /*fmt_schar */
-"%u", /*fmt_uchar */
-"%d", /*fmt_short */
-"%u", /*fmt_ushort */
-"%ld", /*fmt_long */
-"%lu", /*fmt_ulong */
-NULL, /*fmt_llong */
-NULL, /*fmt_ullong */
-"%g", /*fmt_double */
-"%g", /*fmt_float */
-
-0, /*ascii */
-0, /*str_locale */
-0, /*str_repeat */
-
-"[ ", /*arr_pre */
-",", /*arr_sep */
-" ]", /*arr_suf */
-1, /*arr_linebreak */
-
-"", /*cmpd_name */
-",\n", /*cmpd_sep */
-"{", /*cmpd_pre */
-"}", /*cmpd_suf */
-"\n", /*cmpd_end */
-NULL, /* cmpd_listv */
-
-", ", /*vlen_sep */
-"(", /*vlen_pre */
-")", /*vlen_suf */
-"", /*vlen_end */
-
-"%s", /*elmt_fmt */
-",", /*elmt_suf1 */
-" ", /*elmt_suf2 */
-
-"", /*idx_n_fmt */
-"", /*idx_sep */
-"", /*idx_fmt */
-
-80, /*line_ncols *//*standard default columns */
-0, /*line_per_line */
-"", /*line_pre */
-"%s", /*line_1st */
-"%s", /*line_cont */
-"", /*line_suf */
-"", /*line_sep */
-1, /*line_multi_new */
-" ", /*line_indent */
-
-1, /*skip_first */
-
-1, /*obj_hidefileno */
-" "H5_PRINTF_HADDR_FMT, /*obj_format */
-
-1, /*dset_hidefileno */
-"DATASET %s ", /*dset_format */
-"%s", /*dset_blockformat_pre */
-"%s", /*dset_ptformat_pre */
-"%s", /*dset_ptformat */
-1, /*array indices */
-1 /*escape non printable characters */
+ 0, /*raw */
+
+ "", /*fmt_raw */
+ "%d", /*fmt_int */
+ "%u", /*fmt_uint */
+ "%hhd", /*fmt_schar */
+ "%u", /*fmt_uchar */
+ "%d", /*fmt_short */
+ "%u", /*fmt_ushort */
+ "%ld", /*fmt_long */
+ "%lu", /*fmt_ulong */
+ NULL, /*fmt_llong */
+ NULL, /*fmt_ullong */
+ "%g", /*fmt_double */
+ "%g", /*fmt_float */
+
+ 0, /*ascii */
+ 0, /*str_locale */
+ 0, /*str_repeat */
+
+ "[ ", /*arr_pre */
+ ",", /*arr_sep */
+ " ]", /*arr_suf */
+ 1, /*arr_linebreak */
+
+ "", /*cmpd_name */
+ ",\n", /*cmpd_sep */
+ "{", /*cmpd_pre */
+ "}", /*cmpd_suf */
+ "\n", /*cmpd_end */
+ NULL, /* cmpd_listv */
+
+ ", ", /*vlen_sep */
+ "(", /*vlen_pre */
+ ")", /*vlen_suf */
+ "", /*vlen_end */
+
+ "%s", /*elmt_fmt */
+ ",", /*elmt_suf1 */
+ " ", /*elmt_suf2 */
+
+ "", /*idx_n_fmt */
+ "", /*idx_sep */
+ "", /*idx_fmt */
+
+ 80, /*line_ncols *//*standard default columns */
+ 0, /*line_per_line */
+ "", /*line_pre */
+ "%s", /*line_1st */
+ "%s", /*line_cont */
+ "", /*line_suf */
+ "", /*line_sep */
+ 1, /*line_multi_new */
+ " ", /*line_indent */
+
+ 1, /*skip_first */
+
+ 1, /*obj_hidefileno */
+ " "H5_PRINTF_HADDR_FMT, /*obj_format */
+
+ 1, /*dset_hidefileno */
+ "DATASET %s ", /*dset_format */
+ "%s", /*dset_blockformat_pre */
+ "%s", /*dset_ptformat_pre */
+ "%s", /*dset_ptformat */
+ 1, /*array indices */
+ 1 /*escape non printable characters */
};
const h5tools_dump_header_t h5tools_standardformat = {
@@ -1797,7 +1795,7 @@ h5tools_dump_dset(FILE *stream, const h5tool_format_t *info, h5tools_context_t *
else if (bin_form == 3)
p_type = h5tools_get_big_endian_type(f_type);
else
- p_type = h5tools_get_native_type(f_type);
+ p_type = H5Tget_native_type(f_type, H5T_DIR_DEFAULT);
if (p_type < 0)
goto done;
@@ -2959,7 +2957,7 @@ h5tools_print_fill_value(h5tools_str_t *buffer/*in,out*/, const h5tool_format_t
hid_t n_type;
void *buf = NULL;
- n_type = h5tools_get_native_type(type_id);
+ n_type = H5Tget_native_type(type_id, H5T_DIR_DEFAULT);
size = H5Tget_size(n_type);
buf = HDmalloc(size);
@@ -3787,7 +3785,7 @@ void
h5tools_print_packed_bits(h5tools_str_t *buffer, hid_t type)
{
unsigned packed_bits_size = 0;
- hid_t n_type = h5tools_get_native_type(type);
+ hid_t n_type = H5Tget_native_type(type, H5T_DIR_DEFAULT);
if(H5Tget_class(n_type) == H5T_INTEGER) {
if(H5Tequal(n_type, H5T_NATIVE_SCHAR) == TRUE)
@@ -4045,7 +4043,7 @@ h5tools_dump_data(FILE *stream, const h5tool_format_t *info,
unsigned int vl_data = 0; /* contains VL datatypes */
type = H5Aget_type(obj_id);
- p_type = h5tools_get_native_type(type);
+ p_type = H5Tget_native_type(type, H5T_DIR_DEFAULT);
ndims = H5Sget_simple_extent_dims(space, size, NULL);
diff --git a/tools/lib/h5tools_dump.h b/tools/lib/h5tools_dump.h
index b05f226..dc79f43 100644
--- a/tools/lib/h5tools_dump.h
+++ b/tools/lib/h5tools_dump.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/lib/h5tools_error.h b/tools/lib/h5tools_error.h
index 749157e..136c5ed 100644
--- a/tools/lib/h5tools_error.h
+++ b/tools/lib/h5tools_error.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/lib/h5tools_filters.c b/tools/lib/h5tools_filters.c
index 486d1c6..6ee58c5 100644
--- a/tools/lib/h5tools_filters.c
+++ b/tools/lib/h5tools_filters.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h"
diff --git a/tools/lib/h5tools_ref.c b/tools/lib/h5tools_ref.c
index 8c869c8..85850e3 100644
--- a/tools/lib/h5tools_ref.c
+++ b/tools/lib/h5tools_ref.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
diff --git a/tools/lib/h5tools_ref.h b/tools/lib/h5tools_ref.h
index 7ddb91a..b7bd9a3 100644
--- a/tools/lib/h5tools_ref.h
+++ b/tools/lib/h5tools_ref.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5TOOLS_REF_H__
diff --git a/tools/lib/h5tools_str.c b/tools/lib/h5tools_str.c
index 358e993..b7a2c1e 100644
--- a/tools/lib/h5tools_str.c
+++ b/tools/lib/h5tools_str.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -25,24 +23,24 @@
#include <string.h>
#include "H5private.h"
-#include "h5tools.h" /*for h5tool_format_t structure */
+#include "h5tools.h" /* for h5tool_format_t structure */
#include "h5tools_ref.h"
-#include "h5tools_str.h" /*function prototypes */
+#include "h5tools_str.h" /* function prototypes */
/* Copied from hl/src/H5LDprivate.h */
/* Info about the list of comma-separated compound fields */
typedef struct H5LD_memb_t {
- size_t tot_offset;
- size_t last_tsize;
- hid_t last_tid;
- char **names;
+ size_t tot_offset;
+ size_t last_tsize;
+ hid_t last_tid;
+ char **names;
} H5LD_memb_t;
/*
* If REPEAT_VERBOSE is defined then character strings will be printed so
* that repeated character sequences like "AAAAAAAAAA" are displayed as
*
- * 'A' repeates 9 times
+ * 'A' repeats 9 times
*
* Otherwise the format is more Perl-like
*
@@ -63,15 +61,12 @@ void h5tools_str_indent(h5tools_str_t *str, const h5tool_format_t *in
* Function: h5tools_str_close
*
* Purpose: Closes a string by releasing it's memory and setting the size
- * information to zero.
+ * information to zero.
*
* Return: void
*
* Programmer: Robb Matzke
* Monday, April 26, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
void
@@ -87,17 +82,14 @@ h5tools_str_close(h5tools_str_t *str)
* Function: h5tools_str_len
*
* Purpose: Returns the length of the string, not counting the null
- * terminator.
+ * terminator.
*
* Return: Success: Length of string
*
- * Failure: 0
+ * Failure: 0
*
* Programmer: Robb Matzke
* Monday, April 26, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
H5_ATTR_PURE size_t
@@ -110,43 +102,34 @@ h5tools_str_len(h5tools_str_t *str)
* Function: h5tools_str_append
*
* Purpose: Formats variable arguments according to printf() format
- * string and appends the result to variable length string STR.
+ * string and appends the result to variable length string STR.
*
* Return: Success: Pointer to buffer containing result.
*
- * Failure: NULL
+ * Failure: NULL
*
* Programmer: Robb Matzke
* Monday, April 26, 1999
- *
- * Modifications:
- *
- * Major change: need to check results of vsnprintf to
- * handle errors, empty format, and overflows.
- *
- * Programmer: REMcG Matzke
- * June 16, 2004
- *
*-------------------------------------------------------------------------
*/
char *
h5tools_str_append(h5tools_str_t *str/*in,out*/, const char *fmt, ...)
{
- va_list ap;
+ va_list ap;
/* Make sure we have some memory into which to print */
- if (!str->s || str->nalloc <= 0)
+ if(!str->s || str->nalloc <= 0)
h5tools_str_reset(str);
- if (HDstrlen(fmt) == 0)
+ if(HDstrlen(fmt) == 0)
/* nothing to print */
return str->s;
/* Format the arguments and append to the value already in `str' */
- while (1) {
+ while(1) {
/* How many bytes available for new value, counting the new NUL */
+ int nchars = -1;
size_t avail = str->nalloc - str->len;
- int nchars = -1;
HDva_start(ap, fmt);
nchars = HDvsnprintf(str->s + str->len, avail, fmt, ap);
@@ -163,11 +146,11 @@ h5tools_str_append(h5tools_str_t *str/*in,out*/, const char *fmt, ...)
* to lack of buffer size, so try one more time after realloc more
* buffer size before return NULL.
*/
- if (nchars < 0)
+ if(nchars < 0)
/* failure, such as bad format */
return NULL;
- if ((size_t) nchars >= avail || (0 == nchars && (HDstrcmp(fmt, "%s")))) {
+ if((size_t) nchars >= avail || (0 == nchars && (HDstrcmp(fmt, "%s")))) {
/* Truncation return value as documented by C99, or zero return value with either of the
* following conditions, each of which indicates that the proper C99 return value probably
* should have been positive when the format string is
@@ -193,24 +176,21 @@ h5tools_str_append(h5tools_str_t *str/*in,out*/, const char *fmt, ...)
* Function: h5tools_str_reset
*
* Purpose: Reset the string to the empty value. If no memory is
- * allocated yet then initialize the h5tools_str_t struct.
+ * allocated yet then initialize the h5tools_str_t struct.
*
* Return: Success: Ptr to the buffer which contains a null
- * character as the first element.
+ * character as the first element.
*
- * Failure: NULL
+ * Failure: NULL
*
* Programmer: Robb Matzke
* Monday, April 26, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
char *
h5tools_str_reset(h5tools_str_t *str/*in,out*/)
{
- if (!str->s || str->nalloc <= 0) {
+ if(!str->s || str->nalloc <= 0) {
str->nalloc = STR_INIT_LEN;
str->s = (char*)HDmalloc(str->nalloc);
HDassert(str->s);
@@ -228,19 +208,16 @@ h5tools_str_reset(h5tools_str_t *str/*in,out*/)
*
* Return: Success: Pointer to the string
*
- * Failure: NULL
+ * Failure: NULL
*
* Programmer: Robb Matzke
* Monday, April 26, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
char *
h5tools_str_trunc(h5tools_str_t *str/*in,out*/, size_t size)
{
- if (size < str->len) {
+ if(size < str->len) {
str->len = size;
str->s[size] = '\0';
}
@@ -252,41 +229,38 @@ h5tools_str_trunc(h5tools_str_t *str/*in,out*/, size_t size)
* Function: h5tools_str_fmt
*
* Purpose: Reformat a string contents beginning at character START
- * according to printf format FMT. FMT should contain no format
- * specifiers except possibly the `%s' variety. For example, if
- * the input string is `hello' and the format is "<<%s>>" then
- * the output value will be "<<hello>>".
+ * according to printf format FMT. FMT should contain no format
+ * specifiers except possibly the `%s' variety. For example, if
+ * the input string is `hello' and the format is "<<%s>>" then
+ * the output value will be "<<hello>>".
*
* Return: Success: A pointer to the resulting string.
*
- * Failure: NULL
+ * Failure: NULL
*
* Programmer: Robb Matzke
* Monday, April 26, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
char *
h5tools_str_fmt(h5tools_str_t *str/*in,out*/, size_t start, const char *fmt)
{
- char _temp[1024], *temp = _temp;
+ char _temp[1024], *temp = _temp;
HDassert(str);
HDassert(fmt);
/* If the format string is simply "%s" then don't bother doing anything */
- if (!HDstrcmp(fmt, "%s"))
+ if(!HDstrcmp(fmt, "%s"))
return str->s;
/*
* Save the input value if there is a `%' anywhere in FMT. Otherwise
* don't bother because we don't need a temporary copy.
*/
- if (HDstrchr(fmt, '%')) {
+ if(HDstrchr(fmt, '%')) {
size_t n = sizeof(_temp);
- if (str->len - start + 1 > n) {
+ if(str->len - start + 1 > n) {
n = str->len - start + 1;
temp = (char*)HDmalloc(n);
HDassert(temp);
@@ -300,7 +274,7 @@ h5tools_str_fmt(h5tools_str_t *str/*in,out*/, size_t start, const char *fmt)
h5tools_str_append(str, fmt, temp);
/* Free the temp buffer if we allocated one */
- if (temp != _temp)
+ if(temp != _temp)
HDfree(temp);
return str->s;
@@ -313,37 +287,34 @@ h5tools_str_fmt(h5tools_str_t *str/*in,out*/, size_t start, const char *fmt)
*
* Return: Success: Pointer to the prefix.
*
- * Failure: NULL
+ * Failure: NULL
*
* Programmer: Robb Matzke
* Thursday, July 23, 1998
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
char *
h5tools_str_prefix(h5tools_str_t *str/*in,out*/, const h5tool_format_t *info,
hsize_t elmtno, unsigned ndims, h5tools_context_t *ctx)
{
- size_t i = 0;
- hsize_t curr_pos = elmtno;
+ size_t i = 0;
+ hsize_t curr_pos = elmtno;
h5tools_str_reset(str);
- if (ndims > 0) {
+ if(ndims > 0) {
/*
* Calculate the number of elements represented by a unit change in a
* certain index position.
*/
- for (i = 0; i < (size_t) ndims; i++) {
+ for(i = 0; i < (size_t) ndims; i++) {
ctx->pos[i] = curr_pos / ctx->acc[i];
curr_pos -= ctx->acc[i] * ctx->pos[i];
}
HDassert(curr_pos == 0);
/* Print the index values */
- for (i = 0; i < (size_t) ndims; i++) {
+ for(i = 0; i < (size_t) ndims; i++) {
if (i)
h5tools_str_append(str, "%s", OPT(info->idx_sep, ","));
@@ -352,10 +323,8 @@ h5tools_str_prefix(h5tools_str_t *str/*in,out*/, const h5tool_format_t *info,
}
}
- else {
- /* Scalar */
+ else /* Scalar */
h5tools_str_append(str, OPT(info->idx_n_fmt, HSIZE_T_FORMAT), (hsize_t) 0);
- }
/* Add prefix and suffix to the index */
return h5tools_str_fmt(str, (size_t)0, OPT(info->idx_fmt, "%s: "));
@@ -379,21 +348,21 @@ h5tools_str_region_prefix(h5tools_str_t *str, const h5tool_format_t *info,
hsize_t elmtno, hsize_t *ptdata, unsigned ndims, hsize_t max_idx[],
h5tools_context_t *ctx)
{
- hsize_t p_prod[H5S_MAX_RANK];
- size_t i = 0;
- hsize_t curr_pos = elmtno;
+ size_t i = 0;
+ hsize_t curr_pos = elmtno;
+ hsize_t p_prod[H5S_MAX_RANK];
h5tools_str_reset(str);
- if (ndims > 0) {
+ if(ndims > 0) {
/*
* Calculate the number of elements represented by a unit change in a
* certain index position.
*/
- for (i = ndims - 1, p_prod[ndims - 1] = 1; i > 0; --i)
+ for(i = ndims - 1, p_prod[ndims - 1] = 1; i > 0; --i)
p_prod[i - 1] = (max_idx[i]) * p_prod[i];
- for (i = 0; i < (size_t) ndims; i++) {
+ for(i = 0; i < (size_t) ndims; i++) {
if(curr_pos > 0) {
ctx->pos[i] = curr_pos / p_prod[i];
curr_pos -= p_prod[i] * ctx->pos[i];
@@ -404,18 +373,16 @@ h5tools_str_region_prefix(h5tools_str_t *str, const h5tool_format_t *info,
}
/* Print the index values */
- for (i = 0; i < (size_t) ndims; i++) {
- if (i)
+ for(i = 0; i < (size_t) ndims; i++) {
+ if(i)
h5tools_str_append(str, "%s", OPT(info->idx_sep, ","));
h5tools_str_append(str, OPT(info->idx_n_fmt, HSIZE_T_FORMAT), (hsize_t) ctx->pos[i]);
}
} /* if (ndims > 0) */
- else {
- /* Scalar */
+ else /* Scalar */
h5tools_str_append(str, OPT(info->idx_n_fmt, HSIZE_T_FORMAT), (hsize_t) 0);
- }
/* Add prefix and suffix to the index */
return h5tools_str_fmt(str, (size_t)0, OPT(info->idx_fmt, "%s: "));
@@ -451,7 +418,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace,
/* Start coordinates */
h5tools_str_append(str, "%s%s ", info->line_indent, START);
- for (j = 0; j < ndims; j++)
+ for(j = 0; j < ndims; j++)
h5tools_str_append(str, "%s" HSIZE_T_FORMAT, j ? "," : "(", start[j]);
h5tools_str_append(str, ")");
h5tools_str_append(str, "%s", "\n");
@@ -459,7 +426,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace,
/* Stride coordinates */
h5tools_str_append(str, "%s ", STRIDE);
- for (j = 0; j < ndims; j++)
+ for(j = 0; j < ndims; j++)
h5tools_str_append(str, "%s" HSIZE_T_FORMAT, j ? "," : "(", stride[j]);
h5tools_str_append(str, ")");
h5tools_str_append(str, "%s", "\n");
@@ -467,7 +434,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace,
/* Count coordinates */
h5tools_str_append(str, "%s ", COUNT);
- for (j = 0; j < ndims; j++) {
+ for(j = 0; j < ndims; j++) {
if(count[j] == H5S_UNLIMITED)
h5tools_str_append(str, "%s%s", j ? "," : "(","H5S_UNLIMITED");
else
@@ -479,7 +446,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace,
/* Block coordinates */
h5tools_str_append(str, "%s ", BLOCK);
- for (j = 0; j < ndims; j++) {
+ for(j = 0; j < ndims; j++) {
if(block[j] == H5S_UNLIMITED)
h5tools_str_append(str, "%s%s", j ? "," : "(","H5S_UNLIMITED");
else
@@ -496,8 +463,7 @@ h5tools_str_dump_space_slabs(h5tools_str_t *str, hid_t rspace,
*
* Return: none
*
- * In/Out:
- * h5tools_str_t *str
+ * In/Out: h5tools_str_t *str
*-------------------------------------------------------------------------
*/
void
@@ -514,12 +480,12 @@ h5tools_str_dump_space_blocks(h5tools_str_t *str, hid_t rspace,
} H5E_END_TRY;
/* Print block information */
- if (snblocks > 0) {
+ if(snblocks > 0) {
+ hsize_t alloc_size;
hsize_t nblocks;
hsize_t *ptdata;
- hsize_t alloc_size;
+ hsize_t u;
unsigned ndims = (unsigned)H5Sget_simple_extent_ndims(rspace);
- hsize_t u;
nblocks = (hsize_t)snblocks;
alloc_size = nblocks * ndims * 2 * sizeof(ptdata[0]);
@@ -527,17 +493,17 @@ h5tools_str_dump_space_blocks(h5tools_str_t *str, hid_t rspace,
ptdata = (hsize_t *)HDmalloc((size_t) alloc_size);
H5Sget_select_hyper_blocklist(rspace, (hsize_t)0, nblocks, ptdata);
- for (u = 0; u < nblocks; u++) {
+ for(u = 0; u < nblocks; u++) {
unsigned v;
h5tools_str_append(str, info->dset_blockformat_pre, u ? "," OPTIONAL_LINE_BREAK " " : "", (unsigned long)u);
/* Start coordinates and opposite corner */
- for (v = 0; v < ndims; v++)
+ for(v = 0; v < ndims; v++)
h5tools_str_append(str, "%s" HSIZE_T_FORMAT, v ? "," : "(",
ptdata[u * 2 * ndims + v]);
- for (v = 0; v < ndims; v++)
+ for(v = 0; v < ndims; v++)
h5tools_str_append(str, "%s" HSIZE_T_FORMAT, v ? "," : ")-(",
ptdata[u * 2 * ndims + v + ndims]);
@@ -556,8 +522,7 @@ h5tools_str_dump_space_blocks(h5tools_str_t *str, hid_t rspace,
*
* Return: none
*
- * In/Out:
- * h5tools_str_t *str
+ * In/Out: h5tools_str_t *str
*-------------------------------------------------------------------------
*/
void
@@ -575,11 +540,11 @@ h5tools_str_dump_space_points(h5tools_str_t *str, hid_t rspace,
/* Print point information */
if (snpoints > 0) {
- hsize_t npoints;
hsize_t alloc_size;
+ hsize_t npoints;
hsize_t *ptdata;
- unsigned ndims = (unsigned)H5Sget_simple_extent_ndims(rspace);
- hsize_t u;
+ hsize_t u;
+ unsigned ndims = (unsigned)H5Sget_simple_extent_ndims(rspace);
npoints = (hsize_t)snpoints;
alloc_size = npoints * ndims * sizeof(ptdata[0]);
@@ -587,13 +552,13 @@ h5tools_str_dump_space_points(h5tools_str_t *str, hid_t rspace,
ptdata = (hsize_t *)HDmalloc((size_t) alloc_size);
H5Sget_select_elem_pointlist(rspace, (hsize_t)0, npoints, ptdata);
- for (u = 0; u < npoints; u++) {
+ for(u = 0; u < npoints; u++) {
unsigned v;
h5tools_str_append(str, info->dset_ptformat_pre, u ? "," OPTIONAL_LINE_BREAK " " : "",
(unsigned long)u);
- for (v = 0; v < ndims; v++)
+ for(v = 0; v < ndims; v++)
h5tools_str_append(str, "%s" HSIZE_T_FORMAT, v ? "," : "(",
(ptdata[u * ndims + v]));
@@ -616,16 +581,16 @@ h5tools_str_dump_space_points(h5tools_str_t *str, hid_t rspace,
static void
h5tools_print_char(h5tools_str_t *str, const h5tool_format_t *info, char ch)
{
- if (info->str_locale == ESCAPE_HTML) {
- if (ch <= ' ' || ch > '~')
+ if(info->str_locale == ESCAPE_HTML) {
+ if(ch <= ' ' || ch > '~')
h5tools_str_append(str, "%%%02x", ch);
else
h5tools_str_append(str, "%c", ch);
}
else {
- switch (ch) {
+ switch(ch) {
case '"':
- if (!info->do_escape)
+ if(!info->do_escape)
h5tools_str_append(str, "\"");
else
h5tools_str_append(str, "\\\"");
@@ -637,19 +602,19 @@ h5tools_print_char(h5tools_str_t *str, const h5tool_format_t *info, char ch)
h5tools_str_append(str, "\\\\");
break;
case '\b':
- if (!info->do_escape)
+ if(!info->do_escape)
h5tools_str_append(str, "\b");
else
h5tools_str_append(str, "\\b");
break;
case '\f':
- if (!info->do_escape)
+ if(!info->do_escape)
h5tools_str_append(str, "\f");
else
h5tools_str_append(str, "\\f");
break;
case '\n':
- if (!info->do_escape) {
+ if(!info->do_escape) {
h5tools_str_append(str, "\n");
h5tools_str_append(str, " ");
}
@@ -657,7 +622,7 @@ h5tools_print_char(h5tools_str_t *str, const h5tool_format_t *info, char ch)
h5tools_str_append(str, "\\n");
break;
case '\r':
- if (!info->do_escape) {
+ if(!info->do_escape) {
h5tools_str_append(str, "\r");
h5tools_str_append(str, " ");
}
@@ -665,13 +630,13 @@ h5tools_print_char(h5tools_str_t *str, const h5tool_format_t *info, char ch)
h5tools_str_append(str, "\\r");
break;
case '\t':
- if (!info->do_escape)
+ if(!info->do_escape)
h5tools_str_append(str, "\t");
else
h5tools_str_append(str, "\\t");
break;
default:
- if (isprint(ch))
+ if(isprint(ch))
h5tools_str_append(str, "%c", ch);
else
h5tools_str_append(str, "\\%03o", ch);
@@ -687,7 +652,7 @@ h5tools_str_indent(h5tools_str_t *str, const h5tool_format_t *info,
unsigned u, indentlevel = 0;
/* Write new prefix */
- if (ctx->indent_level > 0)
+ if(ctx->indent_level > 0)
indentlevel = ctx->indent_level;
else
/*
@@ -698,7 +663,7 @@ h5tools_str_indent(h5tools_str_t *str, const h5tool_format_t *info,
*/
indentlevel = ctx->default_indent_level;
- for (u = 0; u < indentlevel; u++)
+ for(u = 0; u < indentlevel; u++)
h5tools_str_append(str, "%s", OPT(info->line_indent, ""));
}
@@ -740,7 +705,7 @@ h5tools_str_indent(h5tools_str_t *str, const h5tool_format_t *info,
* added H5T_NATIVE_LDOUBLE case
*
* Vailin Choi; August 2010
- * Modified to handle printing of selected compound fields for h5watch.
+ * Modified to handle printing of selected compound fields for h5watch.
*
* Raymond Lu, 2011-09-01
* CLANG compiler complained about the line (about 800):
@@ -767,7 +732,7 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
H5T_class_t type_class;
/* Build default formats for long long types */
- if (!fmt_llong[0]) {
+ if(!fmt_llong[0]) {
HDsnprintf(fmt_llong, sizeof(fmt_llong), "%%%sd", H5_PRINTF_LL_WIDTH);
HDsnprintf(fmt_ullong, sizeof(fmt_ullong), "%%%su", H5_PRINTF_LL_WIDTH);
}
@@ -777,33 +742,31 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
nsize = H5Tget_size(type);
nsign = H5Tget_sign(type);
- if (info->raw) {
+ if(info->raw) {
size_t i;
- if (1 == nsize) {
+ if(1 == nsize)
h5tools_str_append(str, OPT(info->fmt_raw, "0x%02x"), ucp_vp[0]);
- }
- else {
- for (i = 0; i < nsize; i++) {
- if (i)
+ else
+ for(i = 0; i < nsize; i++) {
+ if(i)
h5tools_str_append(str, ":");
h5tools_str_append(str, OPT(info->fmt_raw, "%02x"), ucp_vp[i]);
}
- }
}
else {
if((type_class = H5Tget_class(type)) < 0)
return NULL;
switch (type_class) {
case H5T_FLOAT:
- if (sizeof(float) == nsize) {
+ if(sizeof(float) == nsize) {
/* if (H5Tequal(type, H5T_NATIVE_FLOAT)) */
float tempfloat;
HDmemcpy(&tempfloat, vp, sizeof(float));
h5tools_str_append(str, OPT(info->fmt_float, "%g"), (double)tempfloat);
}
- else if (sizeof(double) == nsize) {
+ else if(sizeof(double) == nsize) {
/* if (H5Tequal(type, H5T_NATIVE_DOUBLE)) */
double tempdouble;
@@ -811,7 +774,7 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_str_append(str, OPT(info->fmt_double, "%g"), tempdouble);
#if H5_SIZEOF_LONG_DOUBLE !=0
}
- else if (sizeof(long double) == nsize) {
+ else if(sizeof(long double) == nsize) {
/* if (H5Tequal(type, H5T_NATIVE_LDOUBLE)) */
long double templdouble;
@@ -824,15 +787,15 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
case H5T_STRING:
{
unsigned int i;
- char quote = '\0';
- char *s;
+ char quote = '\0';
+ char *s;
quote = '\0';
- if (H5Tis_variable_str(type)) {
+ if(H5Tis_variable_str(type)) {
/* cp_vp is the pointer into the struct where a `char*' is stored. So we have
* to dereference the pointer to get the `char*' to pass to HDstrlen(). */
s = *(char**) cp_vp;
- if (s != NULL) size = HDstrlen(s);
+ if(s != NULL) size = HDstrlen(s);
}
else {
s = cp_vp;
@@ -841,10 +804,10 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
pad = H5Tget_strpad(type);
/* Check for NULL pointer for string */
- if (s == NULL)
+ if(s == NULL)
h5tools_str_append(str, "NULL");
else {
- for (i = 0; i < size && (s[i] || pad != H5T_STR_NULLTERM); i++) {
+ for(i = 0; i < size && (s[i] || pad != H5T_STR_NULLTERM); i++) {
unsigned j = 1;
/*
@@ -852,7 +815,7 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
* threshold is zero then that means it can repeat any number
* of times.
*/
- if (info->str_repeat > 0) while (i + j < size && s[i] == s[i + j])
+ if(info->str_repeat > 0) while (i + j < size && s[i] == s[i + j])
j++;
/*
@@ -861,14 +824,14 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
* the characters, then make sure the character to be repeated is
* in it's own quote.
*/
- if (info->str_repeat > 0 && j > info->str_repeat) {
- if (quote)
+ if(info->str_repeat > 0 && j > info->str_repeat) {
+ if(quote)
h5tools_str_append(str, "%c", quote);
quote = '\'';
h5tools_str_append(str, "%s%c", i ? " " : "", quote);
}
- else if (!quote) {
+ else if(!quote) {
quote = '"';
h5tools_str_append(str, "%s%c", i ? " " : "", quote);
}
@@ -877,7 +840,7 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_print_char(str, info, s[i]);
/* Print the repeat count */
- if (info->str_repeat && j > info->str_repeat) {
+ if(info->str_repeat && j > info->str_repeat) {
#ifdef REPEAT_VERBOSE
h5tools_str_append(str, "%c repeats %d times", quote, j - 1);
#else
@@ -888,10 +851,10 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
}
}
- if (quote)
+ if(quote)
h5tools_str_append(str, "%c", quote);
- if (i == 0)
+ if(i == 0)
/*empty string*/
h5tools_str_append(str, "\"\"");
} /* end else */
@@ -899,15 +862,15 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
break;
case H5T_INTEGER:
- if (sizeof(char) == nsize) {
- if (info->ascii)
+ if(sizeof(char) == nsize) {
+ if(info->ascii)
h5tools_print_char(str, info, (char) (*ucp_vp));
else if(H5T_SGN_NONE == nsign) {
unsigned char tempuchar;
HDmemcpy(&tempuchar, ucp_vp, sizeof(unsigned char));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(unsigned char))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(unsigned char))
tempuchar = 0;
else
tempuchar = (unsigned char)((unsigned long long)(tempuchar >> packed_data_offset) & packed_data_mask);
@@ -918,8 +881,8 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
signed char tempchar;
HDmemcpy(&tempchar, cp_vp, sizeof(char));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(char))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(char))
tempchar = 0;
else
tempchar = (signed char)((unsigned long long)(tempchar >> packed_data_offset) & packed_data_mask);
@@ -927,13 +890,13 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_str_append(str, OPT(info->fmt_schar, "%hhd"), tempchar);
}
} /* end if (sizeof(char) == nsize) */
- else if (sizeof(int) == nsize) {
+ else if(sizeof(int) == nsize) {
if(H5T_SGN_NONE == nsign) {
- unsigned int tempuint;
+ unsigned int tempuint;
HDmemcpy(&tempuint, vp, sizeof(unsigned int));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(unsigned int))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(unsigned int))
tempuint = 0;
else
tempuint = (unsigned)((tempuint >> packed_data_offset) & packed_data_mask);
@@ -941,11 +904,11 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_str_append(str, OPT(info->fmt_uint, "%u"), tempuint);
}
else {
- int tempint;
+ int tempint;
HDmemcpy(&tempint, vp, sizeof(int));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(int))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(int))
tempint = 0;
else
tempint = (int)((unsigned long long)(tempint >> packed_data_offset) & packed_data_mask);
@@ -953,13 +916,13 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_str_append(str, OPT(info->fmt_int, "%d"), tempint);
}
} /* end if (sizeof(int) == nsize) */
- else if (sizeof(short) == nsize) {
+ else if(sizeof(short) == nsize) {
if(H5T_SGN_NONE == nsign) {
unsigned short tempushort;
HDmemcpy(&tempushort, vp, sizeof(unsigned short));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(unsigned short))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(unsigned short))
tempushort = 0;
else
tempushort = (unsigned short)((unsigned long long)(tempushort >> packed_data_offset) & packed_data_mask);
@@ -970,8 +933,8 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
short tempshort;
HDmemcpy(&tempshort, vp, sizeof(short));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(short))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(short))
tempshort = 0;
else
tempshort = (short)((unsigned long long)(tempshort >> packed_data_offset) & packed_data_mask);
@@ -979,13 +942,13 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_str_append(str, OPT(info->fmt_short, "%hd"), tempshort);
}
} /* end if (sizeof(short) == nsize) */
- else if (sizeof(long) == nsize) {
+ else if(sizeof(long) == nsize) {
if(H5T_SGN_NONE == nsign) {
- unsigned long tempulong;
+ unsigned long tempulong;
HDmemcpy(&tempulong, vp, sizeof(unsigned long));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(unsigned long))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(unsigned long))
tempulong = 0;
else
tempulong = (tempulong >> packed_data_offset) & packed_data_mask;
@@ -993,11 +956,11 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_str_append(str, OPT(info->fmt_ulong, "%lu"), tempulong);
}
else {
- long templong;
+ long templong;
HDmemcpy(&templong, vp, sizeof(long));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(long))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(long))
templong = 0;
else
templong = (long)((unsigned long long)(templong >> packed_data_offset) & packed_data_mask);
@@ -1006,13 +969,13 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
}
} /* end if (sizeof(long) == nsize) */
#if H5_SIZEOF_LONG != H5_SIZEOF_LONG_LONG
- else if (sizeof(long long) == nsize) {
+ else if(sizeof(long long) == nsize) {
if(H5T_SGN_NONE == nsign) {
unsigned long long tempullong;
HDmemcpy(&tempullong, vp, sizeof(unsigned long long));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(unsigned long long))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(unsigned long long))
tempullong = 0;
else
tempullong = (tempullong >> packed_data_offset) & packed_data_mask;
@@ -1020,11 +983,11 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
h5tools_str_append(str, OPT(info->fmt_ullong, fmt_ullong), tempullong);
}
else {
- long long templlong;
+ long long templlong;
HDmemcpy(&templlong, vp, sizeof(long long));
- if (packed_bits_num) {
- if (packed_data_offset >= 8 * sizeof(long long))
+ if(packed_bits_num) {
+ if(packed_data_offset >= 8 * sizeof(long long))
templlong = 0;
else
templlong = (templlong >> packed_data_offset) & packed_data_mask;
@@ -1037,31 +1000,31 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
case H5T_COMPOUND:
if(ctx->cmpd_listv) { /* there is <list_of_fields> */
- int save_indent_level; /* The indentation level */
- size_t curr_field; /* Current field to display */
- int i = 0, x = 0; /* Local index variable */
- H5LD_memb_t **listv; /* Vector of information for <list_of_fields> */
+ int save_indent_level; /* The indentation level */
+ size_t curr_field; /* Current field to display */
+ int i = 0, x = 0; /* Local index variable */
+ H5LD_memb_t **listv; /* Vector of information for <list_of_fields> */
- listv = ctx->cmpd_listv;
+ listv = ctx->cmpd_listv;
ctx->cmpd_listv = NULL;
h5tools_str_append(str, "%s", OPT(info->cmpd_pre, "{"));
- /*
+ /*
* Go through the vector containing info about the comma-separated list of
- * compound fields and then members in each field:
- * put in "{", "}", ",", member name and value accordingly.
+ * compound fields and then members in each field:
+ * put in "{", "}", ",", member name and value accordingly.
*/
save_indent_level = ctx->indent_level;
for(curr_field = 0; listv[curr_field] != NULL; curr_field++) {
- if (curr_field)
+ if(curr_field)
h5tools_str_append(str, "%s", OPT(info->cmpd_sep, ", "OPTIONAL_LINE_BREAK));
- else
+ else
h5tools_str_append(str, "%s", OPT(info->cmpd_end, ""));
if(info->arr_linebreak)
h5tools_str_indent(str, info, ctx);
-
+
/* Process members of each field */
for(i = 0; listv[curr_field]->names[i] != NULL; i++) {
h5tools_str_append(str, OPT(info->cmpd_name, ""), listv[curr_field]->names[i]);
@@ -1086,7 +1049,8 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
ctx->cmpd_listv = info->cmpd_listv;
- } else {
+ }
+ else {
unsigned nmembs;
unsigned j;
@@ -1095,13 +1059,13 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
ctx->indent_level++;
- for (j = 0; j < nmembs; j++) {
- if (j)
+ for(j = 0; j < nmembs; j++) {
+ if(j)
h5tools_str_append(str, "%s", OPT(info->cmpd_sep, ", "OPTIONAL_LINE_BREAK));
else
h5tools_str_append(str, "%s", OPT(info->cmpd_end, ""));
- if (info->arr_linebreak)
+ if(info->arr_linebreak)
h5tools_str_indent(str, info, ctx);
/* The name */
@@ -1119,7 +1083,7 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
}
ctx->indent_level--;
- if (info->arr_linebreak) {
+ if(info->arr_linebreak) {
h5tools_str_append(str, "%s", OPT(info->cmpd_end, ""));
h5tools_str_indent(str, info, ctx);
}
@@ -1131,27 +1095,24 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
{
char enum_name[1024];
- if (H5Tenum_nameof(type, vp, enum_name, sizeof enum_name) >= 0) {
+ if(H5Tenum_nameof(type, vp, enum_name, sizeof enum_name) >= 0)
h5tools_str_append(str, h5tools_escape(enum_name, sizeof(enum_name)));
- }
else {
size_t i;
- if (1 == nsize) {
+ if(1 == nsize)
h5tools_str_append(str, "0x%02x", ucp_vp[0]);
- }
- else {
- for (i = 0; i < nsize; i++)
+ else
+ for(i = 0; i < nsize; i++)
h5tools_str_append(str, "%s%02x", i ? ":" : "", ucp_vp[i]);
- }
}
}
break;
case H5T_REFERENCE:
- if (h5tools_str_is_zero(vp, nsize))
+ if(h5tools_str_is_zero(vp, nsize))
h5tools_str_append(str, "NULL");
else {
- if (nsize == H5R_DSET_REG_REF_BUF_SIZE) {
+ if(nsize == H5R_DSET_REG_REF_BUF_SIZE) {
/* if (H5Tequal(type, H5T_STD_REF_DSETREG)) */
h5tools_str_sprint_region(str, info, container, vp);
}
@@ -1161,14 +1122,14 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
* Object references -- show the type and OID of the referenced
* object.
*/
- H5O_info_t oi;
+ H5O_info_t oi;
const char *path;
obj = H5Rdereference2(container, H5P_DEFAULT, H5R_OBJECT, vp);
H5Oget_info(obj, &oi);
/* Print object type and close object */
- switch (oi.type) {
+ switch(oi.type) {
case H5O_TYPE_GROUP:
h5tools_str_append(str, H5_TOOLS_GROUP);
break;
@@ -1190,14 +1151,14 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
H5Oclose(obj);
/* Print OID */
- if (info->obj_hidefileno)
+ if(info->obj_hidefileno)
h5tools_str_append(str, info->obj_format, oi.addr);
else
h5tools_str_append(str, info->obj_format, oi.fileno, oi.addr);
/* Print name */
path = lookup_ref_path(*(haddr_t *) vp);
- if (path) {
+ if(path) {
h5tools_str_append(str, " ");
h5tools_str_append(str, path);
h5tools_str_append(str, " ");
@@ -1208,8 +1169,8 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
case H5T_ARRAY:
{
- int k, ndims;
- hsize_t i, dims[H5S_MAX_RANK], temp_nelmts;
+ int k, ndims;
+ hsize_t i, dims[H5S_MAX_RANK], temp_nelmts;
static int is_next_arry_elmt = 0;
/* Get the array's base datatype for each element */
@@ -1220,7 +1181,7 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
HDassert(ndims >= 1 && ndims <= H5S_MAX_RANK);
/* Calculate the number of array elements */
- for (k = 0, nelmts = 1; k < ndims; k++) {
+ for(k = 0, nelmts = 1; k < ndims; k++) {
temp_nelmts = nelmts;
temp_nelmts *= dims[k];
HDassert(temp_nelmts == (hsize_t) ((size_t) temp_nelmts));
@@ -1231,18 +1192,18 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
ctx->indent_level++;
- for (i = 0; i < nelmts; i++) {
- if (i)
+ for(i = 0; i < nelmts; i++) {
+ if(i)
h5tools_str_append(str, "%s", OPT(info->arr_sep, "," OPTIONAL_LINE_BREAK));
- if (info->arr_linebreak && i && i % dims[ndims - 1] == 0) {
+ if(info->arr_linebreak && i && i % dims[ndims - 1] == 0) {
h5tools_str_append(str, "%s", "\n");
h5tools_str_indent(str, info, ctx);
} /* end if */
- else if (i && info->arr_sep) {
+ else if(i && info->arr_sep) {
/* if next element begin, add next line with indent */
- if (is_next_arry_elmt) {
+ if(is_next_arry_elmt) {
is_next_arry_elmt = 0;
h5tools_str_append(str, "%s", "\n ");
@@ -1283,21 +1244,21 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
/* Get the number of sequence elements */
nelmts = ((hvl_t *) cp_vp)->len;
- for (i = 0; i < nelmts; i++) {
- if (i) h5tools_str_append(str, "%s", OPT(info->vlen_sep, "," OPTIONAL_LINE_BREAK));
+ for(i = 0; i < nelmts; i++) {
+ if(i) h5tools_str_append(str, "%s", OPT(info->vlen_sep, "," OPTIONAL_LINE_BREAK));
#ifdef LATER
/* Need to fix so VL data breaks at correct location on end of line -QAK */
- if (info->arr_linebreak && h5tools_str_len(str)>=info->line_ncols) {
+ if(info->arr_linebreak && h5tools_str_len(str)>=info->line_ncols) {
int x;
h5tools_str_append(str, "%s", "\n");
/* need to indent some more here */
- if (ctx->indent_level >= 0)
+ if(ctx->indent_level >= 0)
h5tools_str_append(str, "%s", OPT(info->line_pre, ""));
- for (x = 0; x < ctx->indent_level + 1; x++)
+ for(x = 0; x < ctx->indent_level + 1; x++)
h5tools_str_append(str,"%s",OPT(info->line_indent,""));
} /* end if */
#endif /* LATER */
@@ -1321,13 +1282,11 @@ h5tools_str_sprint(h5tools_str_t *str, const h5tool_format_t *info, hid_t contai
{
/* All other types get printed as hexadecimal */
size_t i;
- if (1 == nsize) {
+ if(1 == nsize)
h5tools_str_append(str, "0x%02x", ucp_vp[0]);
- }
- else {
- for (i = 0; i < nsize; i++)
+ else
+ for(i = 0; i < nsize; i++)
h5tools_str_append(str, "%s%02x", i ? ":" : "", ucp_vp[i]);
- }
}
break;
@@ -1354,14 +1313,14 @@ void
h5tools_str_sprint_region(h5tools_str_t *str, const h5tool_format_t *info,
hid_t container, void *vp)
{
- hid_t obj, region;
- char ref_name[1024];
+ hid_t obj, region;
+ char ref_name[1024];
H5S_sel_type region_type;
obj = H5Rdereference2(container, H5P_DEFAULT, H5R_DATASET_REGION, vp);
- if (obj >= 0) {
+ if(obj >= 0) {
region = H5Rget_region(container, H5R_DATASET_REGION, vp);
- if (region >= 0) {
+ if(region >= 0) {
H5Rget_name(obj, H5R_DATASET_REGION, vp, (char*) ref_name, 1024);
h5tools_str_append(str, info->dset_format, ref_name);
@@ -1383,33 +1342,30 @@ h5tools_str_sprint_region(h5tools_str_t *str, const h5tool_format_t *info,
}
/*-------------------------------------------------------------------------
- * Function: h5tools_escape
+ * Function: h5tools_escape
*
- * Purpose: Changes all "funny" characters in S into standard C escape
- * sequences.
+ * Purpose: Changes all "funny" characters in S into standard C escape
+ * sequences.
*
- * Return: Success: S
+ * Return: Success: S
*
- * Failure: NULL if the buffer would overflow. The
+ * Failure: NULL if the buffer would overflow. The
* buffer has as many left-to-right escapes as
* possible before overflow would have happened.
*
* Programmer: Robb Matzke
* Monday, April 26, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static char *
h5tools_escape(char *s/*in,out*/, size_t size)
{
- register size_t i;
- size_t n = HDstrlen(s);
+ register size_t i;
const char *escape;
- char octal[8];
+ char octal[8];
+ size_t n = HDstrlen(s);
- for (i = 0; i < n; i++) {
+ for(i = 0; i < n; i++) {
switch (s[i]) {
case '\'':
escape = "\\\'";
@@ -1445,21 +1401,20 @@ h5tools_escape(char *s/*in,out*/, size_t size)
escape = "\\v";
break;
default:
- if (!isprint(s[i])) {
+ if(!isprint(s[i])) {
HDsnprintf(octal, sizeof(octal), "\\%03o", (unsigned char) s[i]);
escape = octal;
}
- else {
+ else
escape = NULL;
- }
break;
}
- if (escape) {
+ if(escape) {
size_t esc_size = HDstrlen(escape);
- if (n + esc_size + 1 > size)
+ if(n + esc_size + 1 > size)
/*would overflow*/
return NULL;
@@ -1476,15 +1431,12 @@ h5tools_escape(char *s/*in,out*/, size_t size)
/*-------------------------------------------------------------------------
* Function: h5tools_str_is_zero
*
- * Purpose: Determines if memory is initialized to all zero bytes.
+ * Purpose: Determines if memory is initialized to all zero bytes.
*
- * Return: TRUE if all bytes are zero; FALSE otherwise
+ * Return: TRUE if all bytes are zero; FALSE otherwise
*
* Programmer: Robb Matzke
* Monday, June 7, 1999
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static hbool_t
@@ -1492,8 +1444,8 @@ h5tools_str_is_zero(const void *_mem, size_t size)
{
const unsigned char *mem = (const unsigned char *) _mem;
- while (size-- > 0)
- if (mem[size])
+ while(size-- > 0)
+ if(mem[size])
return FALSE;
return TRUE;
@@ -1518,11 +1470,11 @@ h5tools_str_is_zero(const void *_mem, size_t size)
char *
h5tools_str_replace ( const char *string, const char *substr, const char *replacement )
{
- char *tok = NULL;
+ char *tok = NULL;
char *newstr = NULL;
- char *head = NULL;
-
- if(substr == NULL || replacement == NULL)
+ char *head = NULL;
+
+ if(substr == NULL || replacement == NULL)
return HDstrdup(string);
newstr = HDstrdup(string);
head = newstr;
@@ -1533,8 +1485,8 @@ h5tools_str_replace ( const char *string, const char *substr, const char *replac
newstr = (char *)HDmalloc(HDstrlen(oldstr) - HDstrlen(substr) + HDstrlen(replacement) + 1);
if(newstr == NULL) {
- HDfree(oldstr);
- return NULL;
+ HDfree(oldstr);
+ return NULL;
}
HDmemcpy(newstr, oldstr, (size_t)(tok - oldstr));
HDmemcpy(newstr + (tok - oldstr), replacement, HDstrlen(replacement));
diff --git a/tools/lib/h5tools_str.h b/tools/lib/h5tools_str.h
index 6173b89..a5045ac 100644
--- a/tools/lib/h5tools_str.h
+++ b/tools/lib/h5tools_str.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/lib/h5tools_type.c b/tools/lib/h5tools_type.c
index 8a56d29..ae7160b 100644
--- a/tools/lib/h5tools_type.c
+++ b/tools/lib/h5tools_type.c
@@ -5,51 +5,16 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h"
#include "h5tools.h"
/*-------------------------------------------------------------------------
- * Function: h5tools_get_native_type
- *
- * Purpose: Wrapper around H5Tget_native_type() to work around
- * Problems with bitfields.
- *
- * Return: Success: datatype ID
- *
- * Failure: FAIL
- *
- * Programmer: Quincey Koziol
- * Tuesday, October 5, 2004
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-hid_t
-h5tools_get_native_type(hid_t type)
-{
- hid_t p_type;
- H5T_class_t type_class;
-
- type_class = H5Tget_class(type);
- if(type_class==H5T_BITFIELD)
- p_type=H5Tcopy(type);
- else
- p_type = H5Tget_native_type(type,H5T_DIR_DEFAULT);
-
- return(p_type);
-}
-
-
-/*-------------------------------------------------------------------------
* Function: h5tools_get_little_endian_type
*
* Purpose: Get a little endian type from a file type
@@ -103,8 +68,18 @@ h5tools_get_little_endian_type(hid_t tid)
p_type=H5Tcopy(H5T_IEEE_F64LE);
break;
- case H5T_TIME:
case H5T_BITFIELD:
+ if ( size == 1)
+ p_type=H5Tcopy(H5T_STD_B8LE);
+ else if ( size == 2)
+ p_type=H5Tcopy(H5T_STD_B16LE);
+ else if ( size == 4)
+ p_type=H5Tcopy(H5T_STD_B32LE);
+ else if ( size == 8)
+ p_type=H5Tcopy(H5T_STD_B64LE);
+ break;
+
+ case H5T_TIME:
case H5T_OPAQUE:
case H5T_STRING:
case H5T_COMPOUND:
@@ -180,8 +155,18 @@ h5tools_get_big_endian_type(hid_t tid)
p_type=H5Tcopy(H5T_IEEE_F64BE);
break;
- case H5T_TIME:
case H5T_BITFIELD:
+ if ( size == 1)
+ p_type=H5Tcopy(H5T_STD_B8BE);
+ else if ( size == 2)
+ p_type=H5Tcopy(H5T_STD_B16BE);
+ else if ( size == 4)
+ p_type=H5Tcopy(H5T_STD_B32BE);
+ else if ( size == 8)
+ p_type=H5Tcopy(H5T_STD_B64BE);
+ break;
+
+ case H5T_TIME:
case H5T_OPAQUE:
case H5T_STRING:
case H5T_COMPOUND:
diff --git a/tools/lib/h5tools_utils.c b/tools/lib/h5tools_utils.c
index 0884945..c361e25 100644
--- a/tools/lib/h5tools_utils.c
+++ b/tools/lib/h5tools_utils.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/lib/h5tools_utils.h b/tools/lib/h5tools_utils.h
index f7ab65b..a31ba3a 100644
--- a/tools/lib/h5tools_utils.h
+++ b/tools/lib/h5tools_utils.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/lib/h5trav.c b/tools/lib/h5trav.c
index 66beb0d..3d55f2d 100644
--- a/tools/lib/h5trav.c
+++ b/tools/lib/h5trav.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/tools/lib/h5trav.h b/tools/lib/h5trav.h
index 352a9e5..c2ad9b7 100644
--- a/tools/lib/h5trav.h
+++ b/tools/lib/h5trav.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5TRAV_H__
diff --git a/tools/lib/io_timer.c b/tools/lib/io_timer.c
index 6053ce9..e3318e9 100644
--- a/tools/lib/io_timer.c
+++ b/tools/lib/io_timer.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* changes:
* rename pio_timer.c as io_timer.c;
diff --git a/tools/lib/io_timer.h b/tools/lib/io_timer.h
index 4ce0733..48b6c87 100644
--- a/tools/lib/io_timer.h
+++ b/tools/lib/io_timer.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef IO_TIMER__
diff --git a/tools/lib/ph5diff.h b/tools/lib/ph5diff.h
index 2a75228..9628d45 100644
--- a/tools/lib/ph5diff.h
+++ b/tools/lib/ph5diff.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef _PH5DIFF_H__
diff --git a/tools/src/CMakeLists.txt b/tools/src/CMakeLists.txt
index 5001b26..37680dd 100644
--- a/tools/src/CMakeLists.txt
+++ b/tools/src/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC)
#-----------------------------------------------------------------------------
diff --git a/tools/src/Makefile.am b/tools/src/Makefile.am
index 0cea969..beceee5 100644
--- a/tools/src/Makefile.am
+++ b/tools/src/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -24,6 +22,7 @@ include $(top_srcdir)/config/commence.am
CONFIG=ordered
# All subdirectories
-SUBDIRS=h5diff h5ls h5dump misc h5import h5repack h5jam h5copy h5stat
+SUBDIRS=h5diff h5ls h5dump misc h5import h5repack h5jam h5copy \
+ h5format_convert h5stat
include $(top_srcdir)/config/conclude.am
diff --git a/tools/src/h5copy/CMakeLists.txt b/tools/src/h5copy/CMakeLists.txt
index 2a33c9e..d1f5f50 100644
--- a/tools/src/h5copy/CMakeLists.txt
+++ b/tools/src/h5copy/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5COPY)
#-----------------------------------------------------------------------------
@@ -18,6 +18,17 @@ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5copy")
set (H5_DEP_EXECUTABLES h5copy)
+if (BUILD_SHARED_LIBS)
+ add_executable (h5copy-shared ${HDF5_TOOLS_SRC_H5COPY_SOURCE_DIR}/h5copy.c)
+ TARGET_NAMING (h5copy-shared SHARED)
+ TARGET_C_PROPERTIES (h5copy-shared SHARED " " " ")
+ target_link_libraries (h5copy-shared ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (h5copy-shared PROPERTIES FOLDER tools)
+ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5copy-shared")
+
+ set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES} h5copy-shared)
+endif ()
+
##############################################################################
##############################################################################
### I N S T A L L A T I O N ###
@@ -27,13 +38,17 @@ set (H5_DEP_EXECUTABLES h5copy)
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS)
+ if (BUILD_SHARED_LIBS)
+ INSTALL_PROGRAM_PDB (h5copy-shared ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+ endif ()
+ INSTALL_PROGRAM_PDB (h5copy ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5copy ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5copy
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+ install (
+ TARGETS
+ ${H5_DEP_EXECUTABLES}
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif ()
diff --git a/tools/src/h5copy/Makefile.am b/tools/src/h5copy/Makefile.am
index b2cdf92..d67fafc 100644
--- a/tools/src/h5copy/Makefile.am
+++ b/tools/src/h5copy/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/src/h5copy/h5copy.c b/tools/src/h5copy/h5copy.c
index 5371a21..390b93e 100644
--- a/tools/src/h5copy/h5copy.c
+++ b/tools/src/h5copy/h5copy.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h"
diff --git a/tools/src/h5diff/CMakeLists.txt b/tools/src/h5diff/CMakeLists.txt
index 85a24ef..00d93de 100644
--- a/tools/src/h5diff/CMakeLists.txt
+++ b/tools/src/h5diff/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5DIFF)
#-----------------------------------------------------------------------------
@@ -21,6 +21,20 @@ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5diff")
set (H5_DEP_EXECUTABLES h5diff)
+if (BUILD_SHARED_LIBS)
+ add_executable (h5diff-shared
+ ${HDF5_TOOLS_SRC_H5DIFF_SOURCE_DIR}/h5diff_common.c
+ ${HDF5_TOOLS_SRC_H5DIFF_SOURCE_DIR}/h5diff_main.c
+ )
+ TARGET_NAMING (h5diff-shared SHARED)
+ TARGET_C_PROPERTIES (h5diff-shared SHARED " " " ")
+ target_link_libraries (h5diff-shared ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (h5diff-shared PROPERTIES FOLDER tools)
+ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5diff-shared")
+
+ set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES} h5diff-shared)
+endif ()
+
if (H5_HAVE_PARALLEL)
add_executable (ph5diff
${HDF5_TOOLS_SRC_H5DIFF_SOURCE_DIR}/h5diff_common.c
@@ -31,7 +45,7 @@ if (H5_HAVE_PARALLEL)
target_link_libraries (ph5diff ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (ph5diff PROPERTIES FOLDER tools)
set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};ph5diff")
-endif (H5_HAVE_PARALLEL)
+endif ()
##############################################################################
##############################################################################
@@ -42,26 +56,29 @@ endif (H5_HAVE_PARALLEL)
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
-
-#INSTALL_PROGRAM_PDB (h5diff ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5diff
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
-
-if (H5_HAVE_PARALLEL)
-
- #INSTALL_PROGRAM_PDB (ph5diff ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+if (HDF5_EXPORTED_TARGETS)
+ if (BUILD_SHARED_LIBS)
+ INSTALL_PROGRAM_PDB (h5diff-shared ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+ endif ()
+ INSTALL_PROGRAM_PDB (h5diff ${HDF5_INSTALL_BIN_DIR} toolsapplications)
install (
TARGETS
- ph5diff
+ ${H5_DEP_EXECUTABLES}
EXPORT
${HDF5_EXPORTED_TARGETS}
RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
)
-endif (H5_HAVE_PARALLEL)
+
+ if (H5_HAVE_PARALLEL)
+ #INSTALL_PROGRAM_PDB (ph5diff ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+
+ install (
+ TARGETS
+ ph5diff
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+ endif ()
+endif ()
diff --git a/tools/src/h5diff/Makefile.am b/tools/src/h5diff/Makefile.am
index a5af3f2..3957e59 100644
--- a/tools/src/h5diff/Makefile.am
+++ b/tools/src/h5diff/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/src/h5diff/h5diff_common.c b/tools/src/h5diff/h5diff_common.c
index 2453ffc..0537b9f 100644
--- a/tools/src/h5diff/h5diff_common.c
+++ b/tools/src/h5diff/h5diff_common.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
@@ -58,7 +56,7 @@ static struct long_options l_opts[] = {
static void check_options(diff_opt_t* options)
{
/*--------------------------------------------------------------
- * check for mutually exclusive options
+ * check for mutually exclusive options
*--------------------------------------------------------------*/
/* check between -d , -p, --use-system-epsilon.
@@ -129,13 +127,13 @@ void parse_command_line(int argc,
h5diff_exit(EXIT_SUCCESS);
case 'v':
options->m_verbose = 1;
- /* This for loop is for handling style like
+ /* This for loop is for handling style like
* -v, -v1, --verbose, --verbose=1.
*/
for (i = 1; i < argc; i++)
- {
- /*
- * short opt
+ {
+ /*
+ * short opt
*/
if (!strcmp (argv[i], "-v")) /* no arg */
{
@@ -147,10 +145,10 @@ void parse_command_line(int argc,
{
options->m_verbose_level = atoi(&argv[i][2]);
break;
- }
+ }
- /*
- * long opt
+ /*
+ * long opt
*/
if (!strcmp (argv[i], "--verbose")) /* no arg */
{
@@ -179,7 +177,7 @@ void parse_command_line(int argc,
break;
case 'E':
options->exclude_path = 1;
-
+
/* create linked list of excluding objects */
if( (exclude_node = (struct exclude_path_list*) HDmalloc(sizeof(struct exclude_path_list))) == NULL)
{
@@ -191,8 +189,8 @@ void parse_command_line(int argc,
exclude_node->obj_path = (char*)opt_arg;
exclude_node->obj_type = H5TRAV_TYPE_UNKNOWN;
exclude_prev = exclude_head;
-
- if (NULL == exclude_head)
+
+ if (NULL == exclude_head)
{
exclude_head = exclude_node;
exclude_head->next = NULL;
@@ -204,7 +202,7 @@ void parse_command_line(int argc,
exclude_node->next = NULL;
exclude_prev->next = exclude_node;
- }
+ }
break;
case 'd':
options->d=1;
@@ -460,7 +458,7 @@ check_d_input( const char *str )
void usage(void)
{
- printf("usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]] \n");
+ printf("usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]\n");
printf(" file1 File name of the first HDF5 file\n");
printf(" file2 File name of the second HDF5 file\n");
printf(" [obj1] Name of an HDF5 object, in absolute path\n");
@@ -521,19 +519,22 @@ void usage(void)
printf(" -n C, --count=C\n");
printf(" Print differences up to C. C must be a positive integer.\n");
printf(" -d D, --delta=D\n");
- printf(" Print difference if (|a-b| > D). D must be a positive number.\n");
+ printf(" Print difference if (|a-b| > D). D must be a positive number. Where a\n");
+ printf(" is the data point value in file1 and b is the data point value in file2.\n");
printf(" Can not use with '-p' or '--use-system-epsilon'.\n");
printf(" -p R, --relative=R\n");
- printf(" Print difference if (|(a-b)/b| > R). R must be a positive number.\n");
+ printf(" Print difference if (|(a-b)/b| > R). R must be a positive number. Where a\n");
+ printf(" is the data point value in file1 and b is the data point value in file2.\n");
printf(" Can not use with '-d' or '--use-system-epsilon'.\n");
printf(" --use-system-epsilon\n");
- printf(" Print difference if (|a-b| > EPSILON), EPSILON is system defined value.\n");
+ printf(" Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a\n");
+ printf(" is the data point value in file1 and b is the data point value in file2.\n");
printf(" If the system epsilon is not defined,one of the following predefined\n");
printf(" values will be used:\n");
printf(" FLT_EPSILON = 1.19209E-07 for floating-point type\n");
printf(" DBL_EPSILON = 2.22045E-16 for double precision type\n");
printf(" Can not use with '-p' or '-d'.\n");
- printf(" --exclude-path \"path\" \n");
+ printf(" --exclude-path \"path\"\n");
printf(" Exclude the specified path to an object when comparing files or groups.\n");
printf(" If a group is excluded, all member objects will also be excluded.\n");
printf(" The specified path is excluded wherever it occurs.\n");
@@ -572,15 +573,15 @@ void usage(void)
printf("\n");
printf(" Object comparison:\n");
- printf(" 1) Groups \n");
+ printf(" 1) Groups\n");
printf(" First compares the names of member objects (relative path, from the\n");
printf(" specified group) and generates a report of objects that appear in only\n");
printf(" one group or in both groups. Common objects are then compared recursively.\n");
- printf(" 2) Datasets \n");
+ printf(" 2) Datasets\n");
printf(" Array rank and dimensions, datatypes, and data values are compared.\n");
- printf(" 3) Datatypes \n");
+ printf(" 3) Datatypes\n");
printf(" The comparison is based on the return value of H5Tequal.\n");
- printf(" 4) Symbolic links \n");
+ printf(" 4) Symbolic links\n");
printf(" The paths to the target objects are compared.\n");
printf(" (The option --follow-symlinks overrides the default behavior when\n");
printf(" symbolic links are compared.).\n");
diff --git a/tools/src/h5diff/h5diff_common.h b/tools/src/h5diff/h5diff_common.h
index 5b1317f..e5dfe3f 100644
--- a/tools/src/h5diff/h5diff_common.h
+++ b/tools/src/h5diff/h5diff_common.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5DIFFCOMMON_H__
diff --git a/tools/src/h5diff/h5diff_main.c b/tools/src/h5diff/h5diff_main.c
index cdaca29..92a1610 100644
--- a/tools/src/h5diff/h5diff_main.c
+++ b/tools/src/h5diff/h5diff_main.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/tools/src/h5diff/ph5diff_main.c b/tools/src/h5diff/ph5diff_main.c
index a26b6e9..bfeb408 100644
--- a/tools/src/h5diff/ph5diff_main.c
+++ b/tools/src/h5diff/ph5diff_main.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
diff --git a/tools/src/h5dump/CMakeLists.txt b/tools/src/h5dump/CMakeLists.txt
index ab310db..65a7cdd 100644
--- a/tools/src/h5dump/CMakeLists.txt
+++ b/tools/src/h5dump/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5DUMP)
#-----------------------------------------------------------------------------
@@ -22,6 +22,21 @@ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5dump")
set (H5_DEP_EXECUTABLES h5dump)
+if (BUILD_SHARED_LIBS)
+ add_executable (h5dump-shared
+ ${HDF5_TOOLS_SRC_H5DUMP_SOURCE_DIR}/h5dump.c
+ ${HDF5_TOOLS_SRC_H5DUMP_SOURCE_DIR}/h5dump_ddl.c
+ ${HDF5_TOOLS_SRC_H5DUMP_SOURCE_DIR}/h5dump_xml.c
+ )
+ TARGET_NAMING (h5dump-shared SHARED)
+ TARGET_C_PROPERTIES (h5dump-shared SHARED " " " ")
+ target_link_libraries (h5dump-shared ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (h5dump-shared PROPERTIES FOLDER tools)
+ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5dump-shared")
+
+ set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES} h5dump-shared)
+endif ()
+
##############################################################################
##############################################################################
### I N S T A L L A T I O N ###
@@ -31,13 +46,17 @@ set (H5_DEP_EXECUTABLES h5dump)
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS)
+ if (BUILD_SHARED_LIBS)
+ INSTALL_PROGRAM_PDB (h5dump-shared ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+ endif ()
+ INSTALL_PROGRAM_PDB (h5dump ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5dump ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5dump
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+ install (
+ TARGETS
+ ${H5_DEP_EXECUTABLES}
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif ()
diff --git a/tools/src/h5dump/Makefile.am b/tools/src/h5dump/Makefile.am
index dc43065..86109d5 100644
--- a/tools/src/h5dump/Makefile.am
+++ b/tools/src/h5dump/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/src/h5dump/h5dump.c b/tools/src/h5dump/h5dump.c
index 562cfd5..b53c212 100644
--- a/tools/src/h5dump/h5dump.c
+++ b/tools/src/h5dump/h5dump.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
#include <stdlib.h>
diff --git a/tools/src/h5dump/h5dump.h b/tools/src/h5dump/h5dump.h
index 8224c02..801f60d 100644
--- a/tools/src/h5dump/h5dump.h
+++ b/tools/src/h5dump/h5dump.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5DUMP_H__
#define H5DUMP_H__
@@ -80,10 +78,6 @@ int include_attrs = TRUE; /* Display attributes */
int display_vds_first = FALSE; /* vds display to all by default*/
int vds_gap_size = 0; /* vds skip missing files default is none */
-/* sort parameters */
-H5_index_t sort_by = H5_INDEX_NAME; /*sort_by [creation_order | name] */
-H5_iter_order_t sort_order = H5_ITER_INC; /*sort_order [ascending | descending] */
-
#define PACKED_BITS_MAX 8 /* Maximum number of packed-bits to display */
#define PACKED_BITS_SIZE_MAX (8*sizeof(long long)) /* Maximum bits size of integer types of packed-bits */
/* mask list for packed bits */
diff --git a/tools/src/h5dump/h5dump_ddl.c b/tools/src/h5dump/h5dump_ddl.c
index 182d570..8ce6cd6 100644
--- a/tools/src/h5dump/h5dump_ddl.c
+++ b/tools/src/h5dump/h5dump_ddl.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
#include <stdlib.h>
@@ -1186,9 +1184,11 @@ dump_fcpl(hid_t fid)
hsize_t userblock; /* userblock size retrieved from FCPL */
size_t off_size; /* size of offsets in the file */
size_t len_size; /* size of lengths in the file */
- H5F_file_space_type_t fs_strategy; /* file space strategy */
- hsize_t fs_threshold; /* free-space section threshold */
- H5F_info2_t finfo; /* file information */
+ H5F_fspace_strategy_t fs_strategy; /* file space strategy */
+ hbool_t fs_persist; /* Persisting free-space or not */
+ hsize_t fs_threshold; /* free-space section threshold */
+ hsize_t fsp_size; /* file space page size */
+ H5F_info2_t finfo; /* file information */
#ifdef SHOW_FILE_DRIVER
hid_t fapl; /* file access property list ID */
hid_t fdriver; /* file driver */
@@ -1204,7 +1204,8 @@ dump_fcpl(hid_t fid)
H5Pget_sizes(fcpl,&off_size,&len_size);
H5Pget_sym_k(fcpl,&sym_ik,&sym_lk);
H5Pget_istore_k(fcpl,&istore_ik);
- H5Pget_file_space(fcpl, &fs_strategy, &fs_threshold);
+ H5Pget_file_space_strategy(fcpl, &fs_strategy, &fs_persist, &fs_threshold);
+ H5Pget_file_space_page_size(fcpl, &fsp_size);
H5Pclose(fcpl);
#ifdef SHOW_FILE_DRIVER
fapl=h5_fileaccess();
@@ -1265,18 +1266,22 @@ dump_fcpl(hid_t fid)
PRINTSTREAM(rawoutstream, "%s %u\n","ISTORE_K", istore_ik);
indentation(dump_indent + COL);
- if(fs_strategy == H5F_FILE_SPACE_ALL_PERSIST) {
- PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "H5F_FILE_SPACE_ALL_PERSIST");
- } else if(fs_strategy == H5F_FILE_SPACE_ALL) {
- PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "H5F_FILE_SPACE_ALL");
- } else if(fs_strategy == H5F_FILE_SPACE_AGGR_VFD) {
- PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "H5F_FILE_SPACE_AGGR_VFD");
- } else if(fs_strategy == H5F_FILE_SPACE_VFD) {
- PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "H5F_FILE_SPACE_VFD");
+ if(fs_strategy == H5F_FSPACE_STRATEGY_FSM_AGGR) {
+ PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "H5F_FSPACE_STRATEGY_FSM_AGGR");
+ } else if(fs_strategy == H5F_FSPACE_STRATEGY_PAGE) {
+ PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "H5F_FSPACE_STRATEGY_PAGE");
+ } else if(fs_strategy == H5F_FSPACE_STRATEGY_AGGR) {
+ PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "H5F_FSPACE_STRATEGY_AGGR");
+ } else if(fs_strategy == H5F_FSPACE_STRATEGY_NONE) {
+ PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "H5F_FSPACE_STRATEGY_NONE");
} else
PRINTSTREAM(rawoutstream, "%s %s\n", "FILE_SPACE_STRATEGY", "Unknown strategy");
indentation(dump_indent + COL);
- PRINTSTREAM(rawoutstream, "%s %Hu\n","FREE_SPACE_THRESHOLD", fs_threshold);
+ PRINTSTREAM(rawoutstream, "%s %s\n","FREE_SPACE_PERSIST", fs_persist ? "TRUE" : "FALSE");
+ indentation(dump_indent + COL);
+ PRINTSTREAM(rawoutstream, "%s %Hu\n","FREE_SPACE_SECTION_THRESHOLD", fs_threshold);
+ indentation(dump_indent + COL);
+ PRINTSTREAM(rawoutstream, "%s %Hu\n","FILE_SPACE_PAGE_SIZE", fsp_size);
/*-------------------------------------------------------------------------
* USER_BLOCK
diff --git a/tools/src/h5dump/h5dump_ddl.h b/tools/src/h5dump/h5dump_ddl.h
index 2b3f61e..ae01086 100644
--- a/tools/src/h5dump/h5dump_ddl.h
+++ b/tools/src/h5dump/h5dump_ddl.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5DUMP_DDL_H__
diff --git a/tools/src/h5dump/h5dump_defines.h b/tools/src/h5dump/h5dump_defines.h
index 2be2dcc..7a9d4c0 100644
--- a/tools/src/h5dump/h5dump_defines.h
+++ b/tools/src/h5dump/h5dump_defines.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5DUMP_DEFINES_H__
#define H5DUMP_DEFINES_H__
diff --git a/tools/src/h5dump/h5dump_extern.h b/tools/src/h5dump/h5dump_extern.h
index 8fef1b9..00d3bd2 100644
--- a/tools/src/h5dump/h5dump_extern.h
+++ b/tools/src/h5dump/h5dump_extern.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5DUMP_EXTERN_H__
#define H5DUMP_EXTERN_H__
@@ -79,10 +77,6 @@ extern int include_attrs; /* Display attributes */
extern int display_vds_first; /* vds display to first missing */
extern int vds_gap_size; /* vds skip missing files */
-/* sort parameters */
-extern H5_index_t sort_by; /*sort_by [creation_order | name] */
-extern H5_iter_order_t sort_order; /*sort_order [ascending | descending] */
-
#define PACKED_BITS_MAX 8 /* Maximum number of packed-bits to display */
#define PACKED_BITS_SIZE_MAX 8*sizeof(long long) /* Maximum bits size of integer types of packed-bits */
/* mask list for packed bits */
diff --git a/tools/src/h5dump/h5dump_xml.c b/tools/src/h5dump/h5dump_xml.c
index 5290c3d..1c3978d 100644
--- a/tools/src/h5dump/h5dump_xml.c
+++ b/tools/src/h5dump/h5dump_xml.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
#include <stdlib.h>
@@ -69,7 +67,7 @@ static h5tool_format_t xml_dataformat = {
"", /*cmpd_pre */
"", /*cmpd_suf */
"", /*cmpd_end */
- "", /*cmpd_listv */
+ NULL, /*cmpd_listv */
" ", /*vlen_sep */
" ", /*vlen_pre */
@@ -110,7 +108,7 @@ static h5tool_format_t xml_dataformat = {
/* internal functions */
-static int xml_name_to_XID(const char *, char *, int , int );
+static int xml_name_to_XID(const char *, char *, int , int );
/* internal functions used by XML option */
static void xml_print_datatype(hid_t, unsigned);
@@ -124,34 +122,26 @@ static char *xml_escape_the_name(const char *);
* Function: xml_dump_all_cb
*
* Purpose: function callback called by H5Literate,
- * displays everything in the specified object
+ * displays everything in the specified object
*
* Return: Success: SUCCEED
*
* Failure: FAIL
*
* Programmer: Ruey-Hsia Li
- *
- * Modifications:
- * RMcG, November 2000
- * Added XML support. Also, optionally checks the op_data argument
- *
- * PVN, May 2008
- * Dump external links
- *
*-------------------------------------------------------------------------
*/
static herr_t
xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_ATTR_UNUSED *op_data)
{
- hid_t obj;
- herr_t ret = SUCCEED;
- char *obj_path = NULL; /* Full path of object */
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ hid_t obj;
+ herr_t ret = SUCCEED;
+ char *obj_path = NULL; /* Full path of object */
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ hsize_t curr_pos = 0; /* total data element position */
/* setup */
HDmemset(&buffer, 0, sizeof(h5tools_str_t));
@@ -159,7 +149,7 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -182,7 +172,7 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
if(!obj_path) {
ret = FAIL;
goto done;
- }
+ }
HDstrcpy(obj_path, prefix);
HDstrcat(obj_path, "/");
@@ -270,7 +260,7 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
ret = FAIL;
H5Dclose(obj);
goto done;
- }
+ }
else if(found_obj->displayed) {
/* the XML version */
char *t_obj_path = xml_escape_the_name(obj_path);
@@ -332,14 +322,14 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
H5Dclose(obj);
goto done;
- }
+ }
else
found_obj->displayed = TRUE;
} /* end if */
dump_function_table->dump_dataset_function(obj, name, NULL);
H5Dclose(obj);
- }
+ }
else {
error_msg("unable to dump dataset \"%s\"\n", name);
h5tools_setstatus(EXIT_FAILURE);
@@ -352,7 +342,7 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
error_msg("unable to dump datatype \"%s\"\n", name);
h5tools_setstatus(EXIT_FAILURE);
ret = FAIL;
- }
+ }
else {
dump_function_table->dump_named_datatype_function(obj, name);
H5Tclose(obj);
@@ -379,7 +369,7 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
error_msg("unable to get link value\n");
h5tools_setstatus(EXIT_FAILURE);
ret = FAIL;
- }
+ }
else {
/* print the value of a soft link */
/* XML */
@@ -428,7 +418,7 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
parentxid, /* Parents */
t_prefix); /* H5ParentPaths */
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
- }
+ }
else {
/* dangling link -- omit from xml attributes */
ctx.need_prefix = TRUE;
@@ -512,7 +502,7 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
parentxid, /* Parents */
t_prefix); /* H5ParentPaths */
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
-
+
HDfree(t_prefix);
HDfree(t_name);
HDfree(t_filename);
@@ -558,7 +548,7 @@ xml_dump_all_cb(hid_t group, const char *name, const H5L_info_t *linfo, void H5_
parentxid, /* Parents */
t_prefix); /* H5ParentPaths */
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
-
+
HDfree(t_prefix);
HDfree(t_name);
HDfree(t_obj_path);
@@ -600,18 +590,18 @@ xml_name_to_XID(const char *str , char *outstr, int outlen, int gen)
objno = ref_path_table_gen_fake(str);
sprintf(outstr, "xid_"H5_PRINTF_HADDR_FMT, objno);
return 0;
- }
+ }
else {
return 1;
}
}
- }
+ }
else {
if (gen) {
objno = ref_path_table_gen_fake(str);
sprintf(outstr, "xid_"H5_PRINTF_HADDR_FMT, objno);
return 0;
- }
+ }
else {
return 1;
}
@@ -638,9 +628,6 @@ static const char *apos = "&apos;";
* Return: The revised string.
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static char *
@@ -693,23 +680,23 @@ xml_escape_the_name(const char *str)
if (*cp == '\'') {
HDstrncpy(ncp, apos, ncp_len);
esc_len = HDstrlen(apos);
- }
+ }
else if (*cp == '<') {
HDstrncpy(ncp, lt, ncp_len);
esc_len = HDstrlen(lt);
- }
+ }
else if (*cp == '>') {
HDstrncpy(ncp, gt, ncp_len);
esc_len = HDstrlen(gt);
- }
+ }
else if (*cp == '\"') {
HDstrncpy(ncp, quote, ncp_len);
esc_len = HDstrlen(quote);
- }
+ }
else if (*cp == '&') {
HDstrncpy(ncp, amp, ncp_len);
esc_len = HDstrlen(amp);
- }
+ }
else {
*ncp = *cp;
esc_len = 1;
@@ -732,9 +719,6 @@ xml_escape_the_name(const char *str)
* Return: The revised string.
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static char *
@@ -840,9 +824,6 @@ xml_escape_the_string(const char *str, int slen)
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static void
@@ -867,12 +848,12 @@ xml_print_datatype(hid_t type, unsigned in_group)
size_t mpos;
size_t msize;
int nmembs;
- htri_t is_vlstr=FALSE;
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
- h5tool_format_t *outputformat = &xml_dataformat;
- h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ htri_t is_vlstr = FALSE;
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
+ h5tool_format_t *outputformat = &xml_dataformat;
+ h5tool_format_t string_dataformat;
+ hsize_t curr_pos = 0; /* total data element position */
/* setup */
HDmemset(&buffer, 0, sizeof(h5tools_str_t));
@@ -880,7 +861,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -925,7 +906,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
h5tools_str_append(&buffer, "<%sNamedDataTypePtr OBJ-XID=\"/%s\"/>",
xmlnsprefix, dtxid);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
- }
+ }
else {
/* point to the NDT by name */
char *t_objname = xml_escape_the_name(found_obj->objname);
@@ -941,7 +922,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
HDfree(t_objname);
}
HDfree(dtxid);
- }
+ }
else {
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
@@ -952,7 +933,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
h5tools_setstatus(EXIT_FAILURE);
}
- }
+ }
else {
switch (H5Tget_class(type)) {
case H5T_INTEGER:
@@ -990,9 +971,9 @@ xml_print_datatype(hid_t type, unsigned in_group)
h5tools_str_append(&buffer, "ERROR_UNKNOWN");
break;
} /* end switch */
-
+
h5tools_str_append(&buffer, "\" Sign=\"");
-
+
switch (sgn) {
case H5T_SGN_NONE:
h5tools_str_append(&buffer, "false");
@@ -1006,7 +987,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
h5tools_str_append(&buffer, "ERROR_UNKNOWN");
break;
} /* end switch */
-
+
h5tools_str_append(&buffer, "\" Size=\"");
sz = H5Tget_size(type);
h5tools_str_append(&buffer, "%lu", (unsigned long)sz);
@@ -1045,7 +1026,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sFloatType ByteOrder=\"",xmlnsprefix);
-
+
switch (ord) {
case H5T_ORDER_LE:
h5tools_str_append(&buffer, "LE");
@@ -1062,7 +1043,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
default:
h5tools_str_append(&buffer, "ERROR_UNKNOWN");
} /* end switch */
-
+
h5tools_str_append(&buffer, "\" Size=\"");
sz = H5Tget_size(type);
h5tools_str_append(&buffer, "%lu", (unsigned long)sz);
@@ -1184,7 +1165,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sBitfieldType ByteOrder=\"",xmlnsprefix);
-
+
switch (ord) {
case H5T_ORDER_LE:
h5tools_str_append(&buffer, "LE");
@@ -1199,7 +1180,7 @@ xml_print_datatype(hid_t type, unsigned in_group)
default:
h5tools_str_append(&buffer, "ERROR_UNKNOWN");
} /* end switch */
-
+
size = H5Tget_size(type);
h5tools_str_append(&buffer, "\" Size=\"%lu\"/>", (unsigned long)size);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
@@ -1561,19 +1542,16 @@ xml_print_datatype(hid_t type, unsigned in_group)
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
void
xml_dump_datatype(hid_t type)
{
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ hsize_t curr_pos = 0; /* total data element position */
/* setup */
HDmemset(&buffer, 0, sizeof(h5tools_str_t));
@@ -1581,7 +1559,7 @@ xml_dump_datatype(hid_t type)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -1631,7 +1609,7 @@ xml_dump_datatype(hid_t type)
h5tools_str_append(&buffer, "<%sNamedDataTypePtr OBJ-XID=\"%s\"/>",
xmlnsprefix, dtxid);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
- }
+ }
else {
/* pointer to a named datatype already in XML */
char *t_objname = xml_escape_the_name(found_obj->objname);
@@ -1647,7 +1625,7 @@ xml_dump_datatype(hid_t type)
HDfree(t_objname);
}
HDfree(dtxid);
- }
+ }
else {
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
@@ -1694,25 +1672,22 @@ xml_dump_datatype(hid_t type)
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
void
xml_dump_dataspace(hid_t space)
{
- hsize_t size[H5DUMP_MAX_RANK];
- hsize_t maxsize[H5DUMP_MAX_RANK];
- int i;
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ hsize_t size[H5DUMP_MAX_RANK];
+ hsize_t maxsize[H5DUMP_MAX_RANK];
+ int i;
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
-
- int ndims = H5Sget_simple_extent_dims(space, size, maxsize);
- H5S_class_t space_type = H5Sget_simple_extent_type(space);
+ hsize_t curr_pos = 0; /* total data element position */
+
+ int ndims = H5Sget_simple_extent_dims(space, size, maxsize);
+ H5S_class_t space_type = H5Sget_simple_extent_type(space);
/* setup */
HDmemset(&buffer, 0, sizeof(h5tools_str_t));
@@ -1720,7 +1695,7 @@ xml_dump_dataspace(hid_t space)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -1786,7 +1761,7 @@ xml_dump_dataspace(hid_t space)
h5tools_str_append(&buffer, "<%sDimension DimSize=\"%" H5_PRINTF_LL_WIDTH "u\" MaxDimSize=\"UNLIMITED\"/>",
xmlnsprefix,size[i]);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
- }
+ }
else if (maxsize[i] == (hsize_t) 0) {
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
@@ -1796,7 +1771,7 @@ xml_dump_dataspace(hid_t space)
h5tools_str_append(&buffer, "<%sDimension DimSize=\"%" H5_PRINTF_LL_WIDTH "u\" MaxDimSize=\"%" H5_PRINTF_LL_WIDTH "u\"/>",
xmlnsprefix,size[i], size[i]);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
- }
+ }
else {
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
@@ -1870,9 +1845,6 @@ xml_dump_dataspace(hid_t space)
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
void
@@ -1888,19 +1860,19 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
int status = -1;
void *buf = NULL;
hsize_t curr_pos = 0; /* total data element position */
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
/* Print all the values. */
/* setup */
HDmemset(&buffer, 0, sizeof(h5tools_str_t));
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -1924,7 +1896,7 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sData>", xmlnsprefix);
@@ -1934,14 +1906,14 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sDataFromFile>", xmlnsprefix);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
ctx.indent_level--;
-
+
dump_indent += COL;
ctx.indent_level++;
@@ -1959,7 +1931,7 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
datactx.cur_column = ctx.cur_column;
status = h5tools_dump_dset(rawoutstream, outputformat, &datactx, obj_id, NULL);
}
- }
+ }
else {
/* Attribute data */
type = H5Aget_type(obj_id);
@@ -1972,15 +1944,15 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
*/
status = xml_print_refs(obj_id, ATTRIBUTE_DATA);
H5Tclose(type);
- }
+ }
else if (H5Tget_class(type) == H5T_STRING) {
status = xml_print_strs(obj_id, ATTRIBUTE_DATA);
- }
+ }
else { /* all other data */
/* VL data special information */
unsigned int vl_data = 0; /* contains VL datatypes */
-
- p_type = h5tools_get_native_type(type);
+
+ p_type = H5Tget_native_type(type, H5T_DIR_DEFAULT);
/* Check if we have VL data in the dataset's datatype */
if (h5tools_detect_vlen(p_type) == TRUE)
@@ -1997,7 +1969,7 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
buf = HDmalloc((size_t)(nelmts * MAX(H5Tget_size(type), H5Tget_size(p_type))));
HDassert(buf);
-
+
if (H5Aread(obj_id, p_type, buf) >= 0) {
h5tools_context_t datactx;
HDmemset(&datactx, 0, sizeof(datactx));
@@ -2022,7 +1994,7 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "Unable to print data.");
@@ -2038,7 +2010,7 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
ctx.indent_level++;
ctx.need_prefix = TRUE;
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sDataFromFile>",xmlnsprefix);
@@ -2048,7 +2020,7 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sData>", xmlnsprefix);
@@ -2065,9 +2037,6 @@ xml_dump_data(hid_t obj_id, int obj_data, struct subset_t H5_ATTR_UNUSED * sset,
* Return: herr_t
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
herr_t
@@ -2079,11 +2048,11 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
hid_t space = -1;
H5S_class_t space_type;
hsize_t curr_pos = 0; /* total data element position */
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
-
+
char *t_aname = xml_escape_the_name(attr_name);
/* setup */
@@ -2092,7 +2061,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -2112,7 +2081,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sAttribute Name=\"%s\">", xmlnsprefix, t_aname);
@@ -2148,7 +2117,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sData>", xmlnsprefix);
@@ -2156,7 +2125,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<!-- Time data not yet implemented. -->");
@@ -2164,7 +2133,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sNoData/>", xmlnsprefix);
@@ -2172,7 +2141,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<hdf5:Data>");
@@ -2180,7 +2149,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sData>", xmlnsprefix);
@@ -2193,7 +2162,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
case H5T_COMPOUND:
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<!-- Note: format of compound data not specified -->");
@@ -2204,7 +2173,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
case H5T_REFERENCE:
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sData>", xmlnsprefix);
@@ -2212,7 +2181,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
if (!H5Tequal(type, H5T_STD_REF_OBJ)) {
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<!-- Note: Region references not supported -->");
@@ -2220,7 +2189,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sNoData />", xmlnsprefix);
@@ -2229,7 +2198,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
else {
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sDataFromFile>", xmlnsprefix);
@@ -2239,7 +2208,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sDataFromFile>", xmlnsprefix);
@@ -2248,7 +2217,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sData>", xmlnsprefix);
@@ -2258,7 +2227,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
case H5T_VLEN:
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<!-- Note: format of VL data not specified -->");
@@ -2273,7 +2242,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
default:
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sData>", xmlnsprefix);
@@ -2281,7 +2250,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<!-- Unknown datatype: %d -->", H5Tget_class(type));
@@ -2289,7 +2258,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sNoData/>", xmlnsprefix);
@@ -2297,7 +2266,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sData>", xmlnsprefix);
@@ -2310,7 +2279,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
* Or dataspace is H5S_NULL. */
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sData>", xmlnsprefix);
@@ -2320,7 +2289,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sNoData/>", xmlnsprefix);
@@ -2330,7 +2299,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sData>", xmlnsprefix);
@@ -2345,7 +2314,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sAttribute>", xmlnsprefix);
@@ -2360,7 +2329,7 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<!-- h5dump error: unable to open attribute. -->");
@@ -2370,14 +2339,14 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sAttribute>", xmlnsprefix);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
h5tools_str_close(&buffer);
-
+
h5tools_setstatus(EXIT_FAILURE);
return FAIL;
}
@@ -2391,17 +2360,14 @@ xml_dump_attr(hid_t attr, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED
* Return: herr_t
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
void
xml_dump_named_datatype(hid_t type, const char *name)
{
hsize_t curr_pos = 0; /* total data element position */
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
char *tmp;
@@ -2422,7 +2388,7 @@ xml_dump_named_datatype(hid_t type, const char *name)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -2460,7 +2426,7 @@ xml_dump_named_datatype(hid_t type, const char *name)
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sNamedDataType Name=\"%s\" OBJ-XID=\"%s\" "
@@ -2469,13 +2435,13 @@ xml_dump_named_datatype(hid_t type, const char *name)
name, dtxid,
parentxid, HDstrcmp(prefix,"") ? t_prefix : "/");
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
- }
+ }
else {
H5O_info_t oinfo; /* Object info */
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sNamedDataType Name=\"%s\" OBJ-XID=\"%s\" "
@@ -2498,7 +2464,7 @@ xml_dump_named_datatype(hid_t type, const char *name)
error_msg("internal error (file %s:line %d)\n", __FILE__, __LINE__);
h5tools_setstatus(EXIT_FAILURE);
goto done;
- }
+ }
else if(found_obj->displayed) {
/* We have already printed this named datatype, print it as a
* NamedDatatypePtr
@@ -2507,29 +2473,29 @@ xml_dump_named_datatype(hid_t type, const char *name)
char *t_objname = xml_escape_the_name(found_obj->objname);
ctx.indent_level++;
-
+
xml_name_to_XID(found_obj->objname, pointerxid, (int)sizeof(pointerxid), 1);
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sNamedDatatypePtr OBJ-XID=\"%s\" H5Path=\"%s\"/>", xmlnsprefix, pointerxid, t_objname);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
-
+
ctx.indent_level--;
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sNamedDataType>", xmlnsprefix);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
HDfree(t_objname);
goto done;
- }
+ }
else
found_obj->displayed = TRUE;
}
@@ -2540,7 +2506,7 @@ xml_dump_named_datatype(hid_t type, const char *name)
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "<%sDataType>",xmlnsprefix);
@@ -2551,10 +2517,10 @@ xml_dump_named_datatype(hid_t type, const char *name)
xml_print_datatype(type,1);
ctx.indent_level--;
dump_indent -= COL;
-
+
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sDataType>",xmlnsprefix);
@@ -2565,7 +2531,7 @@ xml_dump_named_datatype(hid_t type, const char *name)
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
-
+
/* Render the element */
h5tools_str_reset(&buffer);
h5tools_str_append(&buffer, "</%sNamedDataType>",xmlnsprefix);
@@ -2591,11 +2557,6 @@ done:
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- * Pedro Vicente, October 9, 2007
- * added parameters to H5A(L)iterate to allow for other iteration orders
- *
*-------------------------------------------------------------------------
*/
void
@@ -2613,11 +2574,11 @@ xml_dump_group(hid_t gid, const char *name)
char *cp = NULL;
char *tmp = NULL;
char *par = NULL;
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
- h5tool_format_t *outputformat = &xml_dataformat;
- h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
+ h5tool_format_t *outputformat = &xml_dataformat;
+ h5tool_format_t string_dataformat;
+ hsize_t curr_pos = 0; /* total data element position */
if ((gcpl_id = H5Gget_create_plist(gid)) < 0) {
error_msg("error in getting group creation property list ID\n");
@@ -2647,7 +2608,7 @@ xml_dump_group(hid_t gid, const char *name)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -2735,7 +2696,7 @@ xml_dump_group(hid_t gid, const char *name)
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
HDfree(t_objname);
HDfree(par_name);
-
+
ctx.indent_level++;
t_objname = xml_escape_the_name(found_obj->objname);/* point to the NDT by name */
@@ -2753,7 +2714,7 @@ xml_dump_group(hid_t gid, const char *name)
xmlnsprefix,
ptrstr, t_objname, parentxid, par_name);
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
-
+
ctx.indent_level--;
HDfree(t_objname);
@@ -2799,7 +2760,7 @@ xml_dump_group(hid_t gid, const char *name)
found_obj->displayed = TRUE;
/* 1. do all the attributes of the group */
-
+
ctx.indent_level++;
dump_indent += COL;
@@ -2882,7 +2843,7 @@ xml_dump_group(hid_t gid, const char *name)
HDfree(parentxid);
/* 1. do all the attributes of the group */
-
+
ctx.indent_level++;
dump_indent += COL;
@@ -2938,7 +2899,7 @@ xml_dump_group(hid_t gid, const char *name)
h5tools_render_element(rawoutstream, outputformat, &ctx, &buffer, &curr_pos, (size_t)outputformat->line_ncols, (hsize_t)0, (hsize_t)0);
h5tools_str_close(&buffer);
-
+
if(par)
HDfree(par);
if(tmp)
@@ -2953,27 +2914,24 @@ xml_dump_group(hid_t gid, const char *name)
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static int
xml_print_refs(hid_t did, int source)
{
- herr_t e;
- hid_t type = -1;
- hid_t space = -1;
- hssize_t ssiz = -1;
- hsize_t i;
- size_t tsiz;
- hobj_ref_t *refbuf = NULL;
- char *buf = NULL;
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ herr_t e;
+ hid_t type = -1;
+ hid_t space = -1;
+ hssize_t ssiz = -1;
+ hsize_t i;
+ size_t tsiz;
+ hobj_ref_t *refbuf = NULL;
+ char *buf = NULL;
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ hsize_t curr_pos = 0; /* total data element position */
if (source == DATASET_DATA) {
type = H5Dget_type(did);
@@ -3033,7 +2991,7 @@ xml_print_refs(hid_t did, int source)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -3087,7 +3045,7 @@ xml_print_refs(hid_t did, int source)
H5Tclose(type);
H5Sclose(space);
return SUCCEED;
-
+
error:
if(buf)
HDfree(buf);
@@ -3107,30 +3065,27 @@ error:
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static int
xml_print_strs(hid_t did, int source)
{
- herr_t e;
- hid_t type = -1;
- hid_t space = -1;
- hssize_t ssiz = -1;
- htri_t is_vlstr = FALSE;
- size_t tsiz = 0;
- hsize_t i;
- size_t str_size = 0;
- char *bp = NULL;
- char *onestring = NULL;
- void *buf = NULL;
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ herr_t e;
+ hid_t type = -1;
+ hid_t space = -1;
+ hssize_t ssiz = -1;
+ htri_t is_vlstr = FALSE;
+ size_t tsiz = 0;
+ hsize_t i;
+ size_t str_size = 0;
+ char *bp = NULL;
+ char *onestring = NULL;
+ void *buf = NULL;
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ hsize_t curr_pos = 0; /* total data element position */
if (source == DATASET_DATA)
type = H5Dget_type(did);
@@ -3186,7 +3141,7 @@ xml_print_strs(hid_t did, int source)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -3254,7 +3209,7 @@ xml_print_strs(hid_t did, int source)
H5Tclose(type);
H5Sclose(space);
return SUCCEED;
-
+
error:
if(buf)
HDfree(buf);
@@ -3275,26 +3230,23 @@ error:
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static void
check_filters(hid_t dcpl)
{
- int nfilt;
- int i;
- H5Z_filter_t filter;
- char namebuf[120];
- size_t cd_nelmts = 20;
- unsigned int cd_values[20];
- unsigned int flags;
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ int nfilt;
+ int i;
+ H5Z_filter_t filter;
+ char namebuf[120];
+ size_t cd_nelmts = 20;
+ unsigned int cd_values[20];
+ unsigned int flags;
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ hsize_t curr_pos = 0; /* total data element position */
/* setup */
HDmemset(&buffer, 0, sizeof(h5tools_str_t));
@@ -3302,7 +3254,7 @@ check_filters(hid_t dcpl)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -3425,16 +3377,16 @@ check_filters(hid_t dcpl)
static void
xml_dump_fill_value(hid_t dcpl, hid_t type)
{
- size_t sz;
- size_t i;
- hsize_t space;
- void *buf;
- char *name;
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ size_t sz;
+ size_t i;
+ hsize_t space;
+ void *buf;
+ char *name;
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ hsize_t curr_pos = 0; /* total data element position */
/* setup */
HDmemset(&buffer, 0, sizeof(h5tools_str_t));
@@ -3442,7 +3394,7 @@ xml_dump_fill_value(hid_t dcpl, hid_t type)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -3454,7 +3406,7 @@ xml_dump_fill_value(hid_t dcpl, hid_t type)
string_dataformat.line_ncols = 65535;
string_dataformat.line_per_line = 1;
}
- else
+ else
string_dataformat.line_ncols = h5tools_nCols;
string_dataformat.do_escape = display_escape;
@@ -3766,11 +3718,6 @@ xml_dump_fill_value(hid_t dcpl, hid_t type)
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- * Pedro Vicente, October 9, 2007
- * added parameters to H5Aiterate2 to allow for other iteration orders
- *
*-------------------------------------------------------------------------
*/
void
@@ -3793,11 +3740,11 @@ xml_dump_dataset(hid_t did, const char *name, struct subset_t H5_ATTR_UNUSED * s
char *t_prefix;
unsigned attr_crt_order_flags;
h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
+ h5tools_context_t ctx; /* print context */
h5tool_format_t *outputformat = &xml_dataformat;
h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
-
+ hsize_t curr_pos = 0; /* total data element position */
+
char *rstr = (char*) HDmalloc((size_t)100);
char *pstr = (char*) HDmalloc((size_t)100);
@@ -3816,7 +3763,7 @@ xml_dump_dataset(hid_t did, const char *name, struct subset_t H5_ATTR_UNUSED * s
t_name = xml_escape_the_name(name);
t_tmp = xml_escape_the_name(tmp);
t_prefix = xml_escape_the_name(prefix);
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -3927,7 +3874,7 @@ xml_dump_dataset(hid_t did, const char *name, struct subset_t H5_ATTR_UNUSED * s
ctx.indent_level--;
dump_indent -= COL;
-
+
ctx.need_prefix = TRUE;
h5tools_simple_prefix(rawoutstream, outputformat, &ctx, (hsize_t)0, 0);
@@ -4378,9 +4325,6 @@ xml_dump_dataset(hid_t did, const char *name, struct subset_t H5_ATTR_UNUSED * s
* Return: void
*
* Programmer: REMcG
- *
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
static void
@@ -4394,11 +4338,11 @@ xml_print_enum(hid_t type)
size_t dst_size; /*destination value type size */
unsigned i; /*miscellaneous counters */
size_t j;
- h5tools_str_t buffer; /* string into which to render */
- h5tools_context_t ctx; /* print context */
- h5tool_format_t *outputformat = &xml_dataformat;
- h5tool_format_t string_dataformat;
- hsize_t curr_pos = 0; /* total data element position */
+ h5tools_str_t buffer; /* string into which to render */
+ h5tools_context_t ctx; /* print context */
+ h5tool_format_t *outputformat = &xml_dataformat;
+ h5tool_format_t string_dataformat;
+ hsize_t curr_pos = 0; /* total data element position */
/* setup */
HDmemset(&buffer, 0, sizeof(h5tools_str_t));
@@ -4406,7 +4350,7 @@ xml_print_enum(hid_t type)
HDmemset(&ctx, 0, sizeof(ctx));
ctx.indent_level = dump_indent / COL;
ctx.cur_column = dump_indent;
-
+
string_dataformat = *outputformat;
if (fp_format) {
@@ -4457,11 +4401,11 @@ xml_print_enum(hid_t type)
if (H5T_SGN_NONE == H5Tget_sign(type)) {
native = H5T_NATIVE_ULLONG;
- }
+ }
else {
native = H5T_NATIVE_LLONG;
}
- }
+ }
else {
dst_size = H5Tget_size(type);
}
@@ -4532,11 +4476,11 @@ xml_print_enum(hid_t type)
for (j = 0; j < dst_size; j++)
h5tools_str_append(&buffer, "%02x", value[i * dst_size + j]);
- }
+ }
else if (H5T_SGN_NONE == H5Tget_sign(native)) {
h5tools_str_append(&buffer,"%" H5_PRINTF_LL_WIDTH "u", *((unsigned long long *)
((void *) (value + i * dst_size))));
- }
+ }
else {
h5tools_str_append(&buffer,"%" H5_PRINTF_LL_WIDTH "d",
*((long long *) ((void *) (value + i * dst_size))));
diff --git a/tools/src/h5dump/h5dump_xml.h b/tools/src/h5dump/h5dump_xml.h
index c1d6c62..3c59917 100644
--- a/tools/src/h5dump/h5dump_xml.h
+++ b/tools/src/h5dump/h5dump_xml.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef H5DUMP_XML_H__
#define H5DUMP_XML_H__
diff --git a/tools/src/h5format_convert/CMakeLists.txt b/tools/src/h5format_convert/CMakeLists.txt
index 957055d..fa3abc0 100644
--- a/tools/src/h5format_convert/CMakeLists.txt
+++ b/tools/src/h5format_convert/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5FC)
#-----------------------------------------------------------------------------
@@ -27,13 +27,14 @@ set (H5_DEP_EXECUTABLES h5format_convert)
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS)
+ INSTALL_PROGRAM_PDB (h5format_convert ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5format_convert ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5format_convert
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+ install (
+ TARGETS
+ h5format_convert
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif () \ No newline at end of file
diff --git a/tools/src/h5format_convert/Makefile.am b/tools/src/h5format_convert/Makefile.am
index e2d1acf..2b36949 100644
--- a/tools/src/h5format_convert/Makefile.am
+++ b/tools/src/h5format_convert/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -25,6 +23,7 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib
# These are our main targets, the tools
bin_PROGRAMS=h5format_convert
+bin_SCRIPTS=
# Add h5format_convert specific linker flags here
h5format_convert_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
@@ -32,4 +31,6 @@ h5format_convert_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
# All programs rely on hdf5 library and h5tools library
LDADD=$(LIBH5TOOLS) $(LIBHDF5)
+CLEANFILES=
+
include $(top_srcdir)/config/conclude.am
diff --git a/tools/src/h5format_convert/h5format_convert.c b/tools/src/h5format_convert/h5format_convert.c
index 8ce28dd..2bfe280 100644
--- a/tools/src/h5format_convert/h5format_convert.c
+++ b/tools/src/h5format_convert/h5format_convert.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/src/h5import/CMakeLists.txt b/tools/src/h5import/CMakeLists.txt
index 97ab4ec..9a61beb 100644
--- a/tools/src/h5import/CMakeLists.txt
+++ b/tools/src/h5import/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5IMPORT)
#-----------------------------------------------------------------------------
@@ -28,13 +28,14 @@ set (H5_DEP_EXECUTABLES h5import)
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS)
+ INSTALL_PROGRAM_PDB (h5import ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5import ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5import
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+ install (
+ TARGETS
+ h5import
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif ()
diff --git a/tools/src/h5import/Makefile.am b/tools/src/h5import/Makefile.am
index 2eae9ec..4623eb5 100644
--- a/tools/src/h5import/Makefile.am
+++ b/tools/src/h5import/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/src/h5import/h5import.c b/tools/src/h5import/h5import.c
index d1aab0c..c71aeef 100644
--- a/tools/src/h5import/h5import.c
+++ b/tools/src/h5import/h5import.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/tools/src/h5import/h5import.h b/tools/src/h5import/h5import.h
index c242483..c69a542 100644
--- a/tools/src/h5import/h5import.h
+++ b/tools/src/h5import/h5import.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/src/h5jam/CMakeLists.txt b/tools/src/h5jam/CMakeLists.txt
index cef54c2..3cca771 100644
--- a/tools/src/h5jam/CMakeLists.txt
+++ b/tools/src/h5jam/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5JAM)
#-----------------------------------------------------------------------------
@@ -37,13 +37,14 @@ set (H5_DEP_EXECUTABLES
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
-
-#INSTALL_PROGRAM_PDB (h5jam ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5jam h5unjam
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+if (HDF5_EXPORTED_TARGETS)
+ INSTALL_PROGRAM_PDB (h5jam ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+
+ install (
+ TARGETS
+ h5jam h5unjam
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif ()
diff --git a/tools/src/h5jam/Makefile.am b/tools/src/h5jam/Makefile.am
index e244625..4c44b08 100644
--- a/tools/src/h5jam/Makefile.am
+++ b/tools/src/h5jam/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/src/h5jam/h5jam.c b/tools/src/h5jam/h5jam.c
index ae45714..61de604 100644
--- a/tools/src/h5jam/h5jam.c
+++ b/tools/src/h5jam/h5jam.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/tools/src/h5jam/h5unjam.c b/tools/src/h5jam/h5unjam.c
index 8f88398..1cc8cb3 100644
--- a/tools/src/h5jam/h5unjam.c
+++ b/tools/src/h5jam/h5unjam.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
diff --git a/tools/src/h5ls/CMakeLists.txt b/tools/src/h5ls/CMakeLists.txt
index 24e11cc..2e23634 100644
--- a/tools/src/h5ls/CMakeLists.txt
+++ b/tools/src/h5ls/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5LS)
#-----------------------------------------------------------------------------
@@ -16,9 +16,18 @@ target_link_libraries (h5ls ${HDF5_TOOLS_LIB_TARGET} ${HDF5_LIB_TARGET})
set_target_properties (h5ls PROPERTIES FOLDER tools)
set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5ls")
-set (H5_DEP_EXECUTABLES
- h5ls
-)
+set (H5_DEP_EXECUTABLES h5ls)
+
+if (BUILD_SHARED_LIBS)
+ add_executable (h5ls-shared ${HDF5_TOOLS_SRC_H5LS_SOURCE_DIR}/h5ls.c)
+ TARGET_NAMING (h5ls-shared SHARED)
+ TARGET_C_PROPERTIES (h5ls-shared SHARED " " " ")
+ target_link_libraries (h5ls-shared ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (h5ls-shared PROPERTIES FOLDER tools)
+ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5ls-shared")
+
+ set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES} h5ls-shared)
+endif ()
##############################################################################
##############################################################################
@@ -29,13 +38,17 @@ set (H5_DEP_EXECUTABLES
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS)
+ if (BUILD_SHARED_LIBS)
+ INSTALL_PROGRAM_PDB (h5ls-shared ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+ endif ()
+ INSTALL_PROGRAM_PDB (h5ls ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5ls ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5ls
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+ install (
+ TARGETS
+ ${H5_DEP_EXECUTABLES}
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif ()
diff --git a/tools/src/h5ls/Makefile.am b/tools/src/h5ls/Makefile.am
index dfa8bb6..07279c1 100644
--- a/tools/src/h5ls/Makefile.am
+++ b/tools/src/h5ls/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/src/h5ls/h5ls.c b/tools/src/h5ls/h5ls.c
index 88bab20..8c18794 100644
--- a/tools/src/h5ls/h5ls.c
+++ b/tools/src/h5ls/h5ls.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -1622,7 +1620,7 @@ list_attr(hid_t obj, const char *attr_name, const H5A_info_t H5_ATTR_UNUSED *ain
if(hexdump_g)
p_type = H5Tcopy(type);
else
- p_type = h5tools_get_native_type(type);
+ p_type = H5Tget_native_type(type, H5T_DIR_DEFAULT);
if(p_type >= 0) {
/* VL data special information */
diff --git a/tools/src/h5repack/CMakeLists.txt b/tools/src/h5repack/CMakeLists.txt
index cb7f5f6..81e6275 100644
--- a/tools/src/h5repack/CMakeLists.txt
+++ b/tools/src/h5repack/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5REPACK)
#-----------------------------------------------------------------------------
@@ -29,6 +29,17 @@ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5repack")
set (H5_DEP_EXECUTABLES h5repack)
+if (BUILD_SHARED_LIBS)
+ add_executable (h5repack-shared ${REPACK_COMMON_SOURCES} ${HDF5_TOOLS_SRC_H5REPACK_SOURCE_DIR}/h5repack_main.c)
+ TARGET_NAMING (h5repack-shared SHARED)
+ TARGET_C_PROPERTIES (h5repack-shared SHARED " " " ")
+ target_link_libraries (h5repack-shared ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (h5repack-shared PROPERTIES FOLDER tools)
+ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5repack-shared")
+
+ set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES} h5repack-shared)
+endif ()
+
##############################################################################
##############################################################################
### I N S T A L L A T I O N ###
@@ -38,13 +49,17 @@ set (H5_DEP_EXECUTABLES h5repack)
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS)
+ if (BUILD_SHARED_LIBS)
+ INSTALL_PROGRAM_PDB (h5repack-shared ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+ endif ()
+ INSTALL_PROGRAM_PDB (h5repack ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5repack ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5repack
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+ install (
+ TARGETS
+ ${H5_DEP_EXECUTABLES}
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif ()
diff --git a/tools/src/h5repack/Makefile.am b/tools/src/h5repack/Makefile.am
index e6e5a56..c71e65b 100644
--- a/tools/src/h5repack/Makefile.am
+++ b/tools/src/h5repack/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -23,20 +21,25 @@ include $(top_srcdir)/config/commence.am
# Include src, test, and tools/lib directories
AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/test -I$(top_srcdir)/tools/lib
+# A convenience library for the h5repack tool and the h5repack tests
+noinst_LTLIBRARIES=libh5repack.la
+
+libh5repack_la_SOURCES=h5repack.c h5repack_copy.c h5repack_filters.c \
+ h5repack_opttable.c h5repack_parse.c h5repack_refs.c \
+ h5repack_verify.c
+libh5repack_la_LDFLAGS = $(AM_LDFLAGS)
+libh5repack_la_LIBADD=$(LIBH5TOOLS) $(LIBH5TEST) $(LIBHDF5)
+
+
# Our main target, h5repack tool
bin_PROGRAMS=h5repack
+h5repack_SOURCES=h5repack_main.c
+
# Add h5repack specific linker flags here
h5repack_LDFLAGS = $(LT_STATIC_EXEC) $(AM_LDFLAGS)
-# Depend on the hdf5 library, the tools library, the test library
-LDADD=$(LIBH5TOOLS) $(LIBH5TEST) $(LIBHDF5)
-
-# Source files
-COMMON_SOURCES=h5repack.c h5repack_copy.c h5repack_filters.c \
- h5repack_opttable.c h5repack_parse.c h5repack_refs.c \
- h5repack_verify.c
-
-h5repack_SOURCES=$(COMMON_SOURCES) h5repack_main.c
+# Depend on the hdf5 library, the tools library, the h5repack library
+h5repack_LDADD=libh5repack.la $(LIBH5TOOLS) $(LIBHDF5)
include $(top_srcdir)/config/conclude.am
diff --git a/tools/src/h5repack/h5repack.c b/tools/src/h5repack/h5repack.c
index ef2085c..bc8527b 100644
--- a/tools/src/h5repack/h5repack.c
+++ b/tools/src/h5repack/h5repack.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
@@ -53,19 +51,19 @@ static int have_request(pack_opt_t *options);
*-------------------------------------------------------------------------
*/
int h5repack(const char* infile, const char* outfile, pack_opt_t *options) {
- /* check input */
- if (check_options(options) < 0)
- return -1;
+ /* check input */
+ if (check_options(options) < 0)
+ return -1;
- /* check for objects in input that are in the file */
- if (check_objects(infile, options) < 0)
- return -1;
+ /* check for objects in input that are in the file */
+ if (check_objects(infile, options) < 0)
+ return -1;
- /* copy the objects */
- if (copy_objects(infile, outfile, options) < 0)
- return -1;
+ /* copy the objects */
+ if (copy_objects(infile, outfile, options) < 0)
+ return -1;
- return 0;
+ return 0;
}
/*-------------------------------------------------------------------------
@@ -77,27 +75,25 @@ int h5repack(const char* infile, const char* outfile, pack_opt_t *options) {
*
*-------------------------------------------------------------------------
*/
-int h5repack_init(pack_opt_t *options, int verbose, hbool_t latest,
- H5F_file_space_type_t strategy, hsize_t threshold) {
- int k, n;
-
- HDmemset(options, 0, sizeof(pack_opt_t));
- options->min_comp = 0;
- options->verbose = verbose;
- options->latest = latest;
- options->layout_g = H5D_LAYOUT_ERROR;
-
- for (n = 0; n < H5_REPACK_MAX_NFILTERS; n++) {
- options->filter_g[n].filtn = -1;
- options->filter_g[n].cd_nelmts = 0;
- for (k = 0; k < CD_VALUES; k++)
- options->filter_g[n].cd_values[k] = 0;
- }
-
- options->fs_strategy = strategy;
- options->fs_threshold = threshold;
-
- return (options_table_init(&(options->op_tbl)));
+int
+h5repack_init(pack_opt_t *options, int verbose, hbool_t latest)
+{
+ int k, n;
+
+ HDmemset(options, 0, sizeof(pack_opt_t));
+ options->min_comp = 0;
+ options->verbose = verbose;
+ options->latest = latest;
+ options->layout_g = H5D_LAYOUT_ERROR;
+
+ for (n = 0; n < H5_REPACK_MAX_NFILTERS; n++) {
+ options->filter_g[n].filtn = -1;
+ options->filter_g[n].cd_nelmts = 0;
+ for (k = 0; k < CD_VALUES; k++)
+ options->filter_g[n].cd_values[k] = 0;
+ }
+
+ return (options_table_init(&(options->op_tbl)));
}
/*-------------------------------------------------------------------------
@@ -109,7 +105,7 @@ int h5repack_init(pack_opt_t *options, int verbose, hbool_t latest,
*/
int h5repack_end(pack_opt_t *options) {
- return options_table_free(options->op_tbl);
+ return options_table_free(options->op_tbl);
}
/*-------------------------------------------------------------------------
@@ -125,10 +121,10 @@ int h5repack_end(pack_opt_t *options) {
int
h5repack_addfilter(const char* str, pack_opt_t *options)
{
- obj_list_t *obj_list = NULL; /* one object list for the -f and -l option entry */
- filter_info_t filter; /* filter info for the current -f option entry */
- unsigned n_objs; /* number of objects in the current -f or -l option entry */
- int is_glb; /* is the filter global */
+ obj_list_t *obj_list = NULL; /* one object list for the -f and -l option entry */
+ filter_info_t filter; /* filter info for the current -f option entry */
+ unsigned n_objs; /* number of objects in the current -f or -l option entry */
+ int is_glb; /* is the filter global */
/* parse the -f option */
if (NULL == (obj_list = parse_filter(str, &n_objs, &filter, options, &is_glb)))
@@ -167,10 +163,10 @@ int
h5repack_addlayout(const char* str, pack_opt_t *options)
{
obj_list_t *obj_list = NULL; /*one object list for the -t and -c option entry */
- unsigned n_objs; /*number of objects in the current -t or -c option entry */
- pack_info_t pack; /*info about layout to extract from parse */
- int j;
- int ret_value = -1;
+ unsigned n_objs; /*number of objects in the current -t or -c option entry */
+ pack_info_t pack; /*info about layout to extract from parse */
+ int j;
+ int ret_value = -1;
init_packobject(&pack);
@@ -236,87 +232,84 @@ h5repack_addlayout(const char* str, pack_opt_t *options)
*-------------------------------------------------------------------------
*/
hid_t copy_named_datatype(hid_t type_in, hid_t fidout,
- named_dt_t **named_dt_head_p, trav_table_t *travt, pack_opt_t *options) {
- named_dt_t *dt = *named_dt_head_p; /* Stack pointer */
- named_dt_t *dt_ret = NULL; /* Datatype to return */
- H5O_info_t oinfo; /* Object info of input dtype */
- hid_t ret_value = -1; /* The identifier of the named dtype in the out file */
-
- if (H5Oget_info(type_in, &oinfo) < 0)
- goto done;
-
- if (*named_dt_head_p) {
- /* Stack already exists, search for the datatype */
- while (dt && dt->addr_in != oinfo.addr)
- dt = dt->next;
-
- dt_ret = dt;
- }
- else {
- /* Create the stack */
- size_t i;
-
- for (i = 0; i < travt->nobjs; i++) {
- if (travt->objs[i].type == H5TRAV_TYPE_NAMED_DATATYPE) {
- /* Push onto the stack */
- if (NULL == (dt = (named_dt_t *) HDmalloc(sizeof(named_dt_t)))) {
- goto done;
- }
- dt->next = *named_dt_head_p;
- *named_dt_head_p = dt;
-
- /* Update the address and id */
- dt->addr_in = travt->objs[i].objno;
- dt->id_out = -1;
-
- /* Check if this type is the one requested */
- if (oinfo.addr == dt->addr_in) {
- HDassert(!dt_ret);
- dt_ret = dt;
- } /* end if */
- } /* end if */
- } /* end for */
- } /* end else */
-
- /* Handle the case that the requested datatype was not found. This is
- * possible if the datatype was committed anonymously in the input file. */
- if (!dt_ret) {
- /* Push the new datatype onto the stack */
- if (NULL == (dt_ret = (named_dt_t *) HDmalloc(sizeof(named_dt_t)))) {
- goto done;
- }
- dt_ret->next = *named_dt_head_p;
- *named_dt_head_p = dt_ret;
-
- /* Update the address and id */
- dt_ret->addr_in = oinfo.addr;
- dt_ret->id_out = -1;
- } /* end if */
-
- /* If the requested datatype does not yet exist in the output file, copy it
- * anonymously */
- if (dt_ret->id_out < 0) {
- if (options->use_native == 1)
- dt_ret->id_out = h5tools_get_native_type(type_in);
- else
- dt_ret->id_out = H5Tcopy(type_in);
- if (dt_ret->id_out < 0)
- goto done;
- if (H5Tcommit_anon(fidout, dt_ret->id_out, H5P_DEFAULT, H5P_DEFAULT) < 0)
- goto done;
- } /* end if */
-
- /* Set return value */
- ret_value = dt_ret->id_out;
-
- /* Increment the ref count on id_out, because the calling function will try
- * to close it */
- if(H5Iinc_ref(ret_value) < 0) {
- ret_value = -1;
- }
+ named_dt_t **named_dt_head_p, trav_table_t *travt, pack_opt_t *options) {
+ named_dt_t *dt = *named_dt_head_p; /* Stack pointer */
+ named_dt_t *dt_ret = NULL; /* Datatype to return */
+ H5O_info_t oinfo; /* Object info of input dtype */
+ hid_t ret_value = -1; /* The identifier of the named dtype in the out file */
+
+ if (H5Oget_info(type_in, &oinfo) < 0)
+ goto done;
+
+ if (*named_dt_head_p) {
+ /* Stack already exists, search for the datatype */
+ while (dt && dt->addr_in != oinfo.addr)
+ dt = dt->next;
+
+ dt_ret = dt;
+ }
+ else {
+ /* Create the stack */
+ size_t i;
+
+ for (i = 0; i < travt->nobjs; i++) {
+ if (travt->objs[i].type == H5TRAV_TYPE_NAMED_DATATYPE) {
+ /* Push onto the stack */
+ if (NULL == (dt = (named_dt_t *)HDmalloc(sizeof(named_dt_t))))
+ goto done;
+ dt->next = *named_dt_head_p;
+ *named_dt_head_p = dt;
+
+ /* Update the address and id */
+ dt->addr_in = travt->objs[i].objno;
+ dt->id_out = -1;
+
+ /* Check if this type is the one requested */
+ if (oinfo.addr == dt->addr_in) {
+ HDassert(!dt_ret);
+ dt_ret = dt;
+ } /* end if */
+ } /* end if */
+ } /* end for */
+ } /* end else */
+
+ /* Handle the case that the requested datatype was not found. This is
+ * possible if the datatype was committed anonymously in the input file. */
+ if (!dt_ret) {
+ /* Push the new datatype onto the stack */
+ if (NULL == (dt_ret = (named_dt_t *)HDmalloc(sizeof(named_dt_t))))
+ goto done;
+ dt_ret->next = *named_dt_head_p;
+ *named_dt_head_p = dt_ret;
+
+ /* Update the address and id */
+ dt_ret->addr_in = oinfo.addr;
+ dt_ret->id_out = -1;
+ } /* end if */
+
+ /* If the requested datatype does not yet exist in the output file, copy it
+ * anonymously */
+ if (dt_ret->id_out < 0) {
+ if (options->use_native == 1)
+ dt_ret->id_out = H5Tget_native_type(type_in, H5T_DIR_DEFAULT);
+ else
+ dt_ret->id_out = H5Tcopy(type_in);
+ if (dt_ret->id_out < 0)
+ goto done;
+ if (H5Tcommit_anon(fidout, dt_ret->id_out, H5P_DEFAULT, H5P_DEFAULT) < 0)
+ goto done;
+ } /* end if */
+
+ /* Set return value */
+ ret_value = dt_ret->id_out;
+
+ /* Increment the ref count on id_out, because the calling function will try
+ * to close it */
+ if(H5Iinc_ref(ret_value) < 0)
+ ret_value = -1;
done:
- return (ret_value);
+ return (ret_value);
} /* end copy_named_datatype */
/*-------------------------------------------------------------------------
@@ -331,22 +324,22 @@ done:
*-------------------------------------------------------------------------
*/
int named_datatype_free(named_dt_t **named_dt_head_p, int ignore_err) {
- named_dt_t *dt = *named_dt_head_p;
- int ret_value = -1;
+ named_dt_t *dt = *named_dt_head_p;
+ int ret_value = -1;
- while (dt) {
- /* Pop the datatype off the stack and free it */
- if (H5Tclose(dt->id_out) < 0 && !ignore_err)
- goto done;
- dt = dt->next;
- HDfree(*named_dt_head_p);
- *named_dt_head_p = dt;
- } /* end while */
+ while (dt) {
+ /* Pop the datatype off the stack and free it */
+ if (H5Tclose(dt->id_out) < 0 && !ignore_err)
+ goto done;
+ dt = dt->next;
+ HDfree(*named_dt_head_p);
+ *named_dt_head_p = dt;
+ } /* end while */
- ret_value = 0;
+ ret_value = 0;
done:
- return (ret_value);
+ return (ret_value);
} /* end named_datatype_free */
/*-------------------------------------------------------------------------
@@ -367,25 +360,25 @@ done:
*/
int
copy_attr(hid_t loc_in, hid_t loc_out, named_dt_t **named_dt_head_p,
- trav_table_t *travt, pack_opt_t *options)
+ trav_table_t *travt, pack_opt_t *options)
{
- int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
- hid_t attr_id = -1; /* attr ID */
- hid_t attr_out = -1; /* attr ID */
- hid_t space_id = -1; /* space ID */
- hid_t ftype_id = -1; /* file type ID */
- hid_t wtype_id = -1; /* read/write type ID */
- size_t msize; /* size of type */
- void *buf = NULL; /* data buffer */
- hsize_t nelmts; /* number of elements in dataset */
- int rank; /* rank of dataset */
- htri_t is_named; /* Whether the datatype is named */
- hsize_t dims[H5S_MAX_RANK];/* dimensions of dataset */
- char name[255];
- H5O_info_t oinfo; /* object info */
- int j;
- unsigned u;
- hbool_t is_ref = 0;
+ int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
+ hid_t attr_id = -1; /* attr ID */
+ hid_t attr_out = -1; /* attr ID */
+ hid_t space_id = -1; /* space ID */
+ hid_t ftype_id = -1; /* file type ID */
+ hid_t wtype_id = -1; /* read/write type ID */
+ size_t msize; /* size of type */
+ void *buf = NULL; /* data buffer */
+ hsize_t nelmts; /* number of elements in dataset */
+ int rank; /* rank of dataset */
+ htri_t is_named; /* Whether the datatype is named */
+ hsize_t dims[H5S_MAX_RANK];/* dimensions of dataset */
+ char name[255];
+ H5O_info_t oinfo; /* object info */
+ int j;
+ unsigned u;
+ hbool_t is_ref = 0;
H5T_class_t type_class = -1;
if (H5Oget_info(loc_in, &oinfo) < 0)
@@ -429,7 +422,7 @@ copy_attr(hid_t loc_in, hid_t loc_out, named_dt_t **named_dt_head_p,
} /* end if */
else {
if (options->use_native == 1)
- wtype_id = h5tools_get_native_type(ftype_id);
+ wtype_id = H5Tget_native_type(ftype_id, H5T_DIR_DEFAULT);
else
wtype_id = H5Tcopy(ftype_id);
} /* end else */
@@ -470,7 +463,7 @@ copy_attr(hid_t loc_in, hid_t loc_out, named_dt_t **named_dt_head_p,
int nmembers = H5Tget_nmembers(wtype_id);
for (j = 0; j < nmembers; j++) {
- hid_t mtid = H5Tget_member_type(wtype_id, (unsigned) j);
+ hid_t mtid = H5Tget_member_type(wtype_id, (unsigned)j);
H5T_class_t mtclass = H5Tget_class(mtid);
H5Tclose(mtid);
@@ -481,16 +474,13 @@ copy_attr(hid_t loc_in, hid_t loc_out, named_dt_t **named_dt_head_p,
} /* for (j=0; i<nmembers; j++) */
} /* if (type_class == H5T_COMPOUND) */
- if (is_ref) {
- ; /* handled by copy_refs_attr() */
- }
- else {
+ if (!is_ref) {
/*-------------------------------------------------------------------------
* read to memory
*-------------------------------------------------------------------------
*/
- buf = (void *) HDmalloc((size_t)(nelmts * msize));
+ buf = (void *)HDmalloc((size_t)(nelmts * msize));
if (buf == NULL) {
error_msg("h5repack", "cannot read into memory\n");
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "HDmalloc failed");
@@ -572,193 +562,188 @@ done:
* Programmer: pvn@ncsa.uiuc.edu
*
* Date: September, 22, 2003
- *
- * Modification:
- * Peter Cao, July 9, 2007
- * Add "-L, --latest" and other options to pack a file with the latest file format
- *
*-------------------------------------------------------------------------
*/
static int check_options(pack_opt_t *options) {
- unsigned int i;
- int k, j, has_cp = 0, has_ck = 0;
- char slayout[30];
-
- /*-------------------------------------------------------------------------
- * objects to layout
- *-------------------------------------------------------------------------
- */
- if (options->verbose && have_request(options) /* only print if requested */) {
- printf("Objects to modify layout are...\n");
- if (options->all_layout == 1) {
- switch (options->layout_g) {
- case H5D_COMPACT:
- strcpy(slayout, "compact");
- break;
- case H5D_CONTIGUOUS:
- strcpy(slayout, "contiguous");
- break;
- case H5D_CHUNKED:
- strcpy(slayout, "chunked");
- break;
- case H5D_VIRTUAL:
- strcpy(slayout, "virtual");
- break;
- case H5D_LAYOUT_ERROR:
- case H5D_NLAYOUTS:
- error_msg("invalid layout\n");
- return -1;
- default:
- strcpy(slayout, "invalid layout\n");
- return -1;
- }
- printf(" Apply %s layout to all\n", slayout);
- if (H5D_CHUNKED == options->layout_g) {
- printf("with dimension [");
- for (j = 0; j < options->chunk_g.rank; j++)
- printf("%d ", (int) options->chunk_g.chunk_lengths[j]);
- printf("]\n");
- }
- }
- }/* verbose */
-
- for (i = 0; i < options->op_tbl->nelems; i++) {
- char* name = options->op_tbl->objs[i].path;
-
- if (options->op_tbl->objs[i].chunk.rank > 0) {
- if (options->verbose) {
- printf(" <%s> with chunk size ", name);
- for (k = 0; k < options->op_tbl->objs[i].chunk.rank; k++)
- printf("%d ",
- (int) options->op_tbl->objs[i].chunk.chunk_lengths[k]);
- printf("\n");
- }
- has_ck = 1;
- }
- else if (options->op_tbl->objs[i].chunk.rank == -2) {
- if (options->verbose)
- printf(" <%s> %s\n", name, "NONE (contigous)");
- has_ck = 1;
- }
- }
-
- if (options->all_layout == 1 && has_ck) {
- error_msg(
- "invalid chunking input: 'all' option\
+ unsigned int i;
+ int k, j, has_cp = 0, has_ck = 0;
+ char slayout[30];
+
+ /*-------------------------------------------------------------------------
+ * objects to layout
+ *-------------------------------------------------------------------------
+ */
+ if (options->verbose && have_request(options) /* only print if requested */) {
+ printf("Objects to modify layout are...\n");
+ if (options->all_layout == 1) {
+ switch (options->layout_g) {
+ case H5D_COMPACT:
+ strcpy(slayout, "compact");
+ break;
+ case H5D_CONTIGUOUS:
+ strcpy(slayout, "contiguous");
+ break;
+ case H5D_CHUNKED:
+ strcpy(slayout, "chunked");
+ break;
+ case H5D_VIRTUAL:
+ strcpy(slayout, "virtual");
+ break;
+ case H5D_LAYOUT_ERROR:
+ case H5D_NLAYOUTS:
+ error_msg("invalid layout\n");
+ return -1;
+ default:
+ strcpy(slayout, "invalid layout\n");
+ return -1;
+ }
+ printf(" Apply %s layout to all\n", slayout);
+ if (H5D_CHUNKED == options->layout_g) {
+ printf("with dimension [");
+ for (j = 0; j < options->chunk_g.rank; j++)
+ printf("%d ", (int) options->chunk_g.chunk_lengths[j]);
+ printf("]\n");
+ }
+ }
+ }/* verbose */
+
+ for (i = 0; i < options->op_tbl->nelems; i++) {
+ char* name = options->op_tbl->objs[i].path;
+
+ if (options->op_tbl->objs[i].chunk.rank > 0) {
+ if (options->verbose) {
+ printf(" <%s> with chunk size ", name);
+ for (k = 0; k < options->op_tbl->objs[i].chunk.rank; k++)
+ printf("%d ",
+ (int) options->op_tbl->objs[i].chunk.chunk_lengths[k]);
+ printf("\n");
+ }
+ has_ck = 1;
+ }
+ else if (options->op_tbl->objs[i].chunk.rank == -2) {
+ if (options->verbose)
+ printf(" <%s> %s\n", name, "NONE (contigous)");
+ has_ck = 1;
+ }
+ }
+
+ if (options->all_layout == 1 && has_ck) {
+ error_msg(
+ "invalid chunking input: 'all' option\
is present with other objects\n");
- return -1;
- }
-
- /*-------------------------------------------------------------------------
- * objects to filter
- *-------------------------------------------------------------------------
- */
-
- if (options->verbose && have_request(options) /* only print if requested */) {
- printf("Objects to apply filter are...\n");
- if (options->all_filter == 1) {
- for (k = 0; k < options->n_filter_g; k++) {
- H5Z_filter_t filtn = options->filter_g[k].filtn;
- switch (filtn) {
- case H5Z_FILTER_NONE:
- printf(" Uncompress all\n");
- break;
- case H5Z_FILTER_SHUFFLE:
- case H5Z_FILTER_FLETCHER32:
- printf(" All with %s\n", get_sfilter(filtn));
- break;
- case H5Z_FILTER_SZIP:
- case H5Z_FILTER_DEFLATE:
- printf(" All with %s, parameter %d\n", get_sfilter(filtn),
- options->filter_g[k].cd_values[0]);
- break;
- default:
- printf(" User Defined %d\n", filtn);
- break;
- } /* k */
- };
- }
- } /* verbose */
-
- for (i = 0; i < options->op_tbl->nelems; i++) {
- pack_info_t pack = options->op_tbl->objs[i];
- char* name = pack.path;
-
- for (j = 0; j < pack.nfilters; j++) {
- if (options->verbose) {
- printf(" <%s> with %s filter\n", name,
- get_sfilter(pack.filter[j].filtn));
- }
-
- has_cp = 1;
-
- } /* j */
- } /* i */
-
- if (options->all_filter == 1 && has_cp) {
- error_msg(
- "invalid compression input: 'all' option\
+ return -1;
+ }
+
+ /*-------------------------------------------------------------------------
+ * objects to filter
+ *-------------------------------------------------------------------------
+ */
+
+ if (options->verbose && have_request(options) /* only print if requested */) {
+ printf("Objects to apply filter are...\n");
+ if (options->all_filter == 1) {
+ for (k = 0; k < options->n_filter_g; k++) {
+ H5Z_filter_t filtn = options->filter_g[k].filtn;
+ switch (filtn) {
+ case H5Z_FILTER_NONE:
+ printf(" Uncompress all\n");
+ break;
+ case H5Z_FILTER_SHUFFLE:
+ case H5Z_FILTER_FLETCHER32:
+ printf(" All with %s\n", get_sfilter(filtn));
+ break;
+ case H5Z_FILTER_SZIP:
+ case H5Z_FILTER_DEFLATE:
+ printf(" All with %s, parameter %d\n", get_sfilter(filtn),
+ options->filter_g[k].cd_values[0]);
+ break;
+ default:
+ printf(" User Defined %d\n", filtn);
+ break;
+ } /* k */
+ };
+ }
+ } /* verbose */
+
+ for (i = 0; i < options->op_tbl->nelems; i++) {
+ pack_info_t pack = options->op_tbl->objs[i];
+ char* name = pack.path;
+
+ for (j = 0; j < pack.nfilters; j++) {
+ if (options->verbose) {
+ printf(" <%s> with %s filter\n", name,
+ get_sfilter(pack.filter[j].filtn));
+ }
+
+ has_cp = 1;
+
+ } /* j */
+ } /* i */
+
+ if (options->all_filter == 1 && has_cp) {
+ error_msg(
+ "invalid compression input: 'all' option\
is present with other objects\n");
- return -1;
- }
-
- /*-------------------------------------------------------------------------
- * check options for the latest format
- *-------------------------------------------------------------------------
- */
-
- if (options->grp_compact < 0) {
- error_msg(
- "invalid maximum number of links to store as header messages\n");
- return -1;
- }
- if (options->grp_indexed < 0) {
- error_msg(
- "invalid minimum number of links to store in the indexed format\n");
- return -1;
- }
- if (options->grp_indexed > options->grp_compact) {
- error_msg(
- "minimum indexed size is greater than the maximum compact size\n");
- return -1;
- }
- for (i = 0; i < 8; i++) {
- if (options->msg_size[i] < 0) {
- error_msg("invalid shared message size\n");
- return -1;
- }
- }
-
- /*--------------------------------------------------------------------------------
- * verify new user userblock options; file name must be present
- *---------------------------------------------------------------------------------
- */
- if (options->ublock_filename != NULL && options->ublock_size == 0) {
- if (options->verbose) {
- printf(
- "Warning: user block size missing for file %s. Assigning a default size of 1024...\n",
- options->ublock_filename);
- options->ublock_size = 1024;
- }
- }
-
- if (options->ublock_filename == NULL && options->ublock_size != 0) {
- error_msg("file name missing for user block\n",
- options->ublock_filename);
- return -1;
- }
-
- /*--------------------------------------------------------------------------------
- * verify alignment options; threshold is zero default but alignment not
- *---------------------------------------------------------------------------------
- */
-
- if (options->alignment == 0 && options->threshold != 0) {
- error_msg("alignment for H5Pset_alignment missing\n");
- return -1;
- }
-
- return 0;
+ return -1;
+ }
+
+ /*-------------------------------------------------------------------------
+ * check options for the latest format
+ *-------------------------------------------------------------------------
+ */
+
+ if (options->grp_compact < 0) {
+ error_msg(
+ "invalid maximum number of links to store as header messages\n");
+ return -1;
+ }
+ if (options->grp_indexed < 0) {
+ error_msg(
+ "invalid minimum number of links to store in the indexed format\n");
+ return -1;
+ }
+ if (options->grp_indexed > options->grp_compact) {
+ error_msg(
+ "minimum indexed size is greater than the maximum compact size\n");
+ return -1;
+ }
+ for (i = 0; i < 8; i++) {
+ if (options->msg_size[i] < 0) {
+ error_msg("invalid shared message size\n");
+ return -1;
+ }
+ }
+
+ /*--------------------------------------------------------------------------------
+ * verify new user userblock options; file name must be present
+ *---------------------------------------------------------------------------------
+ */
+ if (options->ublock_filename != NULL && options->ublock_size == 0) {
+ if (options->verbose) {
+ printf(
+ "Warning: user block size missing for file %s. Assigning a default size of 1024...\n",
+ options->ublock_filename);
+ options->ublock_size = 1024;
+ }
+ }
+
+ if (options->ublock_filename == NULL && options->ublock_size != 0) {
+ error_msg("file name missing for user block\n",
+ options->ublock_filename);
+ return -1;
+ }
+
+ /*--------------------------------------------------------------------------------
+ * verify alignment options; threshold is zero default but alignment not
+ *---------------------------------------------------------------------------------
+ */
+
+ if (options->alignment == 0 && options->threshold != 0) {
+ error_msg("alignment for H5Pset_alignment missing\n");
+ return -1;
+ }
+
+ return 0;
}
/*-------------------------------------------------------------------------
@@ -776,119 +761,121 @@ static int check_options(pack_opt_t *options) {
*-------------------------------------------------------------------------
*/
static int check_objects(const char* fname, pack_opt_t *options) {
- hid_t fid;
- unsigned int i;
- trav_table_t *travt = NULL;
-
- /* nothing to do */
- if (options->op_tbl->nelems == 0)
- return 0;
-
- /*-------------------------------------------------------------------------
- * open the file
- *-------------------------------------------------------------------------
- */
- if ((fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, 0))
- < 0) {
- printf("<%s>: %s\n", fname, H5FOPENERROR);
- return -1;
- }
-
- /*-------------------------------------------------------------------------
- * get the list of objects in the file
- *-------------------------------------------------------------------------
- */
-
- /* init table */
- trav_table_init(&travt);
-
- /* get the list of objects in the file */
- if (h5trav_gettable(fid, travt) < 0)
- goto out;
-
- /*-------------------------------------------------------------------------
- * compare with user supplied list
- *-------------------------------------------------------------------------
- */
-
- if (options->verbose)
- printf("Opening file <%s>. Searching for objects to modify...\n",
- fname);
-
- for (i = 0; i < options->op_tbl->nelems; i++) {
- char* name = options->op_tbl->objs[i].path;
- if (options->verbose)
- printf(" <%s>", name);
-
- /* the input object names are present in the file and are valid */
- if (h5trav_getindext(name, travt) < 0) {
- error_msg("%s Could not find <%s> in file <%s>. Exiting...\n",
- (options->verbose ? "\n" : ""), name, fname);
- goto out;
- }
- if (options->verbose)
- printf("...Found\n");
-
- /* check for extra filter conditions */
- switch (options->op_tbl->objs[i].filter->filtn) {
- /* chunk size must be smaller than pixels per block */
- case H5Z_FILTER_SZIP:
- {
- int j;
- hsize_t csize = 1;
- unsigned ppb = options->op_tbl->objs[i].filter->cd_values[0];
- hsize_t dims[H5S_MAX_RANK];
- int rank;
- hid_t did;
- hid_t sid;
-
- if (options->op_tbl->objs[i].chunk.rank > 0) {
- rank = options->op_tbl->objs[i].chunk.rank;
- for (j = 0; j < rank; j++)
- csize *= options->op_tbl->objs[i].chunk.chunk_lengths[j];
- }
- else {
- if ((did = H5Dopen2(fid, name, H5P_DEFAULT)) < 0)
- goto out;
- if ((sid = H5Dget_space(did)) < 0)
- goto out;
- if ((rank = H5Sget_simple_extent_ndims(sid)) < 0)
- goto out;
- HDmemset(dims, 0, sizeof dims);
- if (H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
- goto out;
- for (j = 0; j < rank; j++)
- csize *= dims[j];
- if (H5Sclose(sid) < 0)
- goto out;
- if (H5Dclose(did) < 0)
- goto out;
- }
-
- if (csize < ppb) {
- printf(
- " <warning: SZIP settins, chunk size is smaller than pixels per block>\n");
- goto out;
- }
- }
- break;
- default:
- break;
- }
- } /* i */
-
- /*-------------------------------------------------------------------------
- * close
- *-------------------------------------------------------------------------
- */
- H5Fclose(fid);
- trav_table_free(travt);
- return 0;
+ hid_t fid;
+ unsigned int i;
+ trav_table_t *travt = NULL;
+
+ /* nothing to do */
+ if (options->op_tbl->nelems == 0)
+ return 0;
+
+ /*-------------------------------------------------------------------------
+ * open the file
+ *-------------------------------------------------------------------------
+ */
+ if ((fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, 0))
+ < 0) {
+ printf("<%s>: %s\n", fname, H5FOPENERROR);
+ return -1;
+ }
+
+ /*-------------------------------------------------------------------------
+ * get the list of objects in the file
+ *-------------------------------------------------------------------------
+ */
+
+ /* Initialize indexing options */
+ h5trav_set_index(sort_by, sort_order);
+ /* init table */
+ trav_table_init(&travt);
+
+ /* get the list of objects in the file */
+ if (h5trav_gettable(fid, travt) < 0)
+ goto out;
+
+ /*-------------------------------------------------------------------------
+ * compare with user supplied list
+ *-------------------------------------------------------------------------
+ */
+
+ if (options->verbose)
+ printf("Opening file <%s>. Searching for objects to modify...\n",
+ fname);
+
+ for (i = 0; i < options->op_tbl->nelems; i++) {
+ char* name = options->op_tbl->objs[i].path;
+ if (options->verbose)
+ printf(" <%s>", name);
+
+ /* the input object names are present in the file and are valid */
+ if (h5trav_getindext(name, travt) < 0) {
+ error_msg("%s Could not find <%s> in file <%s>. Exiting...\n",
+ (options->verbose ? "\n" : ""), name, fname);
+ goto out;
+ }
+ if (options->verbose)
+ printf("...Found\n");
+
+ /* check for extra filter conditions */
+ switch (options->op_tbl->objs[i].filter->filtn) {
+ /* chunk size must be smaller than pixels per block */
+ case H5Z_FILTER_SZIP:
+ {
+ int j;
+ hsize_t csize = 1;
+ unsigned ppb = options->op_tbl->objs[i].filter->cd_values[0];
+ hsize_t dims[H5S_MAX_RANK];
+ int rank;
+ hid_t did;
+ hid_t sid;
+
+ if (options->op_tbl->objs[i].chunk.rank > 0) {
+ rank = options->op_tbl->objs[i].chunk.rank;
+ for (j = 0; j < rank; j++)
+ csize *= options->op_tbl->objs[i].chunk.chunk_lengths[j];
+ }
+ else {
+ if ((did = H5Dopen2(fid, name, H5P_DEFAULT)) < 0)
+ goto out;
+ if ((sid = H5Dget_space(did)) < 0)
+ goto out;
+ if ((rank = H5Sget_simple_extent_ndims(sid)) < 0)
+ goto out;
+ HDmemset(dims, 0, sizeof dims);
+ if (H5Sget_simple_extent_dims(sid, dims, NULL) < 0)
+ goto out;
+ for (j = 0; j < rank; j++)
+ csize *= dims[j];
+ if (H5Sclose(sid) < 0)
+ goto out;
+ if (H5Dclose(did) < 0)
+ goto out;
+ }
+
+ if (csize < ppb) {
+ printf(
+ " <warning: SZIP settins, chunk size is smaller than pixels per block>\n");
+ goto out;
+ }
+ }
+ break;
+ default:
+ break;
+ }
+ } /* i */
+
+ /*-------------------------------------------------------------------------
+ * close
+ *-------------------------------------------------------------------------
+ */
+ H5Fclose(fid);
+ trav_table_free(travt);
+ return 0;
out:
- H5Fclose(fid);
- trav_table_free(travt);
- return -1;
+ H5Fclose(fid);
+ trav_table_free(travt);
+ return -1;
}
/*-------------------------------------------------------------------------
@@ -904,10 +891,10 @@ out:
*/
static int have_request(pack_opt_t *options) {
- if (options->all_filter || options->all_layout || options->op_tbl->nelems)
- return 1;
+ if (options->all_filter || options->all_layout || options->op_tbl->nelems)
+ return 1;
- return 0;
+ return 0;
}
@@ -922,21 +909,21 @@ static int have_request(pack_opt_t *options) {
*/
static const char* get_sfilter(H5Z_filter_t filtn) {
- if (filtn == H5Z_FILTER_NONE)
- return "NONE";
- else if (filtn == H5Z_FILTER_DEFLATE)
- return "GZIP";
- else if (filtn == H5Z_FILTER_SZIP)
- return "SZIP";
- else if (filtn == H5Z_FILTER_SHUFFLE)
- return "SHUFFLE";
- else if (filtn == H5Z_FILTER_FLETCHER32)
- return "FLETCHER32";
- else if (filtn == H5Z_FILTER_NBIT)
- return "NBIT";
- else if (filtn == H5Z_FILTER_SCALEOFFSET)
- return "SOFF";
- else
- return "UD";
+ if (filtn == H5Z_FILTER_NONE)
+ return "NONE";
+ else if (filtn == H5Z_FILTER_DEFLATE)
+ return "GZIP";
+ else if (filtn == H5Z_FILTER_SZIP)
+ return "SZIP";
+ else if (filtn == H5Z_FILTER_SHUFFLE)
+ return "SHUFFLE";
+ else if (filtn == H5Z_FILTER_FLETCHER32)
+ return "FLETCHER32";
+ else if (filtn == H5Z_FILTER_NBIT)
+ return "NBIT";
+ else if (filtn == H5Z_FILTER_SCALEOFFSET)
+ return "SOFF";
+ else
+ return "UD";
}
diff --git a/tools/src/h5repack/h5repack.h b/tools/src/h5repack/h5repack.h
index d2ab923..e5ae03f 100644
--- a/tools/src/h5repack/h5repack.h
+++ b/tools/src/h5repack/h5repack.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -30,6 +28,13 @@
#define FORMAT_OBJ_ATTR " %-27s %s\n" /* obj type, name */
#define MAX_COMPACT_DSIZE 64512 /* max data size for compact layout. -1k for header size */
+/* File space default information */
+#define FS_PAGESIZE_DEF 4096
+#define FS_STRATEGY_DEF H5F_FSPACE_STRATEGY_FSM_AGGR
+#define FS_PERSIST_DEF FALSE
+#define FS_THRESHOLD_DEF 1
+
+
/*-------------------------------------------------------------------------
* data structures for command line options
*-------------------------------------------------------------------------
@@ -115,8 +120,10 @@ typedef struct {
hsize_t meta_block_size; /* metadata aggregation block size (for H5Pset_meta_block_size) */
hsize_t threshold; /* alignment threshold for H5Pset_alignment */
hsize_t alignment; /* alignment for H5Pset_alignment */
- H5F_file_space_type_t fs_strategy; /* File space handling strategy */
- hsize_t fs_threshold; /* Free space section threshold */
+ H5F_fspace_strategy_t fs_strategy; /* File space handling strategy */
+ int fs_persist; /* Free space section threshold */
+ long fs_threshold; /* Free space section threshold */
+ long long fs_pagesize; /* File space page size */
} pack_opt_t;
@@ -138,16 +145,15 @@ extern "C" {
int h5repack(const char* infile, const char* outfile, pack_opt_t *options);
int h5repack_addfilter(const char* str, pack_opt_t *options);
int h5repack_addlayout(const char* str, pack_opt_t *options);
-int h5repack_init(pack_opt_t *options, int verbose, hbool_t latest,
- H5F_file_space_type_t strategy, hsize_t threshold);
+int h5repack_init(pack_opt_t *options, int verbose, hbool_t latest);
int h5repack_end(pack_opt_t *options);
int h5repack_verify(const char *in_fname, const char *out_fname, pack_opt_t *options);
int h5repack_cmp_pl(const char *fname1, const char *fname2);
-/* Note: The below copy_named_datatype(), named_datatype_free(), copy_attr()
- * and struct named_dt_t were located in h5repack_copy.c as static prior to
- * bugfix1726.
- * Made shared functions as copy_attr() was needed in h5repack_refs.c.
+/* Note: The below copy_named_datatype(), named_datatype_free(), copy_attr()
+ * and struct named_dt_t were located in h5repack_copy.c as static prior to
+ * bugfix1726.
+ * Made shared functions as copy_attr() was needed in h5repack_refs.c.
* However copy_attr() may be obsoleted when H5Acopy is available and put back
* others to static in h5repack_copy.c.
*/
diff --git a/tools/src/h5repack/h5repack_copy.c b/tools/src/h5repack/h5repack_copy.c
index 547f61a..42fedbc 100644
--- a/tools/src/h5repack/h5repack_copy.c
+++ b/tools/src/h5repack/h5repack_copy.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5repack.h"
@@ -33,7 +31,7 @@
*/
/* size of buffer/# of bytes to xfer at a time when copying userblock */
-#define USERBLOCK_XFER_SIZE 512
+#define USERBLOCK_XFER_SIZE 512
/* check H5Dread()/H5Dwrite() error, e.g. memory allocation error inside the library. */
#define CHECK_H5DRW_ERROR(_fun, _fail, _did, _mtid, _msid, _fsid, _pid, _buf) { \
@@ -54,12 +52,12 @@
*-------------------------------------------------------------------------
*/
static int Get_hyperslab(hid_t dcpl_id, int rank_dset, hsize_t dims_dset[],
- size_t size_datum, hsize_t dims_hslab[], hsize_t * hslab_nbytes_p);
+ size_t size_datum, hsize_t dims_hslab[], hsize_t * hslab_nbytes_p);
static void print_dataset_info(hid_t dcpl_id, char *objname, double per, int pr);
static int do_copy_objects(hid_t fidin, hid_t fidout, trav_table_t *travt,
- pack_opt_t *options);
+ pack_opt_t *options);
static int copy_user_block(const char *infile, const char *outfile,
- hsize_t size);
+ hsize_t size);
#if defined (H5REPACK_DEBUG_USER_BLOCK)
static void print_user_block(const char *filename, hid_t fid);
#endif
@@ -67,45 +65,45 @@ static herr_t walk_error_callback(unsigned n, const H5E_error2_t *err_desc, void
/* get the major number from the error stack. */
static herr_t walk_error_callback(H5_ATTR_UNUSED unsigned n, const H5E_error2_t *err_desc, void *udata) {
- if (err_desc)
- *((hid_t *) udata) = err_desc->maj_num;
+ if (err_desc)
+ *((hid_t *) udata) = err_desc->maj_num;
- return 0;
+ return 0;
}
/*-------------------------------------------------------------------------
* Function: copy_objects
*
- * Purpose: duplicate all HDF5 objects in the file
+ * Purpose: duplicate all HDF5 objects in the file
*
- * Return: 0, ok, -1 no
+ * Return: 0, ok,
+ * -1 no
*
* Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu
*
* Date: October, 23, 2003
*
- * Modification:
- * Peter Cao, June 13, 2007
- * Add "-L, --latest" and other options to pack a file with the latest file format
- *
- * Peter Cao, September 25, 2007
- * Copy user block when repacking a file
- *
- * Pedro Vicente, August 20, 2008
- * Add a user block to file if requested
- *
*-------------------------------------------------------------------------
*/
int copy_objects(const char* fnamein, const char* fnameout, pack_opt_t *options)
{
- int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
- hid_t fidin;
- hid_t fidout = -1;
- trav_table_t *travt = NULL;
- hsize_t ub_size = 0; /* size of user block */
- hid_t fcpl = H5P_DEFAULT; /* file creation property list ID */
- hid_t fapl = H5P_DEFAULT; /* file access property list ID */
+ int ret_value = 0; /* no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
+ hid_t fidin;
+ hid_t fidout = -1;
+ trav_table_t *travt = NULL;
+ hsize_t ub_size = 0; /* size of user block */
+ hid_t fcpl = H5P_DEFAULT; /* file creation property list ID */
+ hid_t fapl = H5P_DEFAULT; /* file access property list ID */
+ H5F_fspace_strategy_t set_strategy; /* Strategy to be set in outupt file */
+ hbool_t set_persist; /* Persist free-space status to be set in output file */
+ hsize_t set_threshold; /* Free-space section threshold to be set in output file */
+ hsize_t set_pagesize; /* File space page size to be set in output file */
+ H5F_fspace_strategy_t in_strategy; /* Strategy from input file */
+ hbool_t in_persist; /* Persist free-space status from input file */
+ hsize_t in_threshold; /* Free-space section threshold from input file */
+ hsize_t in_pagesize; /* File space page size from input file */
+ unsigned crt_order_flags; /* group creation order flag */
/*-------------------------------------------------------------------------
* open input file
@@ -116,9 +114,11 @@ int copy_objects(const char* fnamein, const char* fnameout, pack_opt_t *options)
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose failed");
}
- /* get user block size and file space strategy/threshold */
+ /* get user block size and file space strategy/persist/threshold */
{
hid_t fcpl_in; /* file creation property list ID for input file */
+ hid_t grp_in = -1; /* group ID */
+ hid_t gcpl_in = -1; /* group creation property list */
if ((fcpl_in = H5Fget_create_plist(fidin)) < 0) {
error_msg("failed to retrieve file creation property list\n");
@@ -130,19 +130,28 @@ int copy_objects(const char* fnamein, const char* fnameout, pack_opt_t *options)
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose failed");
}
- if (!options->fs_strategy) {
- if (H5Pget_file_space(fcpl_in, &options->fs_strategy, NULL) < 0) {
- error_msg("failed to retrieve file space strategy\n");
- HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose failed");
- }
+ /* If the -S option is not set, get "strategy" from the input file */
+ if(H5Pget_file_space_strategy(fcpl_in, &in_strategy, &in_persist, &in_threshold) < 0) {
+ error_msg("failed to retrieve file space strategy\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose failed");
}
- if (!options->fs_threshold) {
- if (H5Pget_file_space(fcpl_in, NULL, &options->fs_threshold) < 0) {
- error_msg("failed to retrieve file space threshold\n");
- HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose failed");
- }
+ /* If the -G option is not set, get "pagesize" from the input file */
+ if(H5Pget_file_space_page_size(fcpl_in, &in_pagesize) < 0) {
+ error_msg("failed to retrieve file space threshold\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose failed");
}
+ /* open root group */
+ if ((grp_in = H5Gopen2(fidin, "/", H5P_DEFAULT)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Gopen2 failed");
+
+ /* get root group creation property list */
+ if ((gcpl_in = H5Gget_create_plist(grp_in)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Gget_create_plist failed");
+
+ /* query and set the group creation properties */
+ if (H5Pget_link_creation_order(gcpl_in, &crt_order_flags) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pget_link_creation_order failed");
if (H5Pclose(fcpl_in) < 0) {
error_msg("failed to close property list\n");
@@ -301,7 +310,7 @@ print_user_block(fnamein, fidin);
}
/*-------------------------------------------------------------------------
- * set free-space strategy options
+ * Set file space information
*-------------------------------------------------------------------------
*/
@@ -314,12 +323,47 @@ print_user_block(fnamein, fidin);
}
}
- /* set file space strategy and free space threshold */
- if (H5Pset_file_space(fcpl, options->fs_strategy, options->fs_threshold) < 0) {
- error_msg("failed to set file space strategy & threshold \n");
+ if(H5Pset_link_creation_order(fcpl, crt_order_flags ) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pset_link_creation_order failed");
+
+ /* Set file space info to those from input file */
+ set_strategy = in_strategy;
+ set_persist = in_persist;
+ set_threshold = in_threshold;
+ set_pagesize = in_pagesize;
+
+ if(options->fs_strategy == (H5F_fspace_strategy_t)-1) /* A default strategy is specified by user */
+ set_strategy = FS_STRATEGY_DEF;
+ else if(options->fs_strategy != (H5F_fspace_strategy_t)0) /* Set strategy as specified by user */
+ set_strategy = options->fs_strategy;
+
+ if(options->fs_persist == -1) /* A default "persist" is specified by user */
+ set_persist = FS_PERSIST_DEF;
+ else if(options->fs_persist != 0) /* Set "persist" as specified by user */
+ set_persist = (hbool_t)options->fs_persist;
+
+ if(options->fs_threshold == -1) /* A "0" threshold is specified by user */
+ set_threshold = (hsize_t)0;
+ else if(options->fs_threshold != 0) /* Set threshold as specified by user */
+ set_threshold = (hsize_t)options->fs_threshold;
+
+ /* Set file space information as specified */
+ if(H5Pset_file_space_strategy(fcpl, set_strategy, set_persist, set_threshold) < 0) {
+ error_msg("failed to set file space strategy\n");
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose failed");
}
+ if(options->fs_pagesize == -1) /* A "0" file space page size is specified by user */
+ set_pagesize = (hsize_t)0;
+ else if(options->fs_pagesize != 0) /* Set file space page size as specified by user */
+ set_pagesize = (hsize_t)options->fs_pagesize;
+
+ if(set_pagesize != FS_PAGESIZE_DEF) /* Set non-default file space page size as specified */
+ if(H5Pset_file_space_page_size(fcpl, set_pagesize) < 0) {
+ error_msg("failed to set file space page size\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pclose failed");
+ }
+
/*-------------------------------------------------------------------------
* create the output file
*-------------------------------------------------------------------------
@@ -348,6 +392,8 @@ print_user_block(fnamein, fidin);
*-------------------------------------------------------------------------
*/
+ /* Initialize indexing options */
+ h5trav_set_index(sort_by, sort_order);
/* init table */
trav_table_init(&travt);
@@ -464,10 +510,10 @@ done:
int Get_hyperslab(hid_t dcpl_id, int rank_dset, hsize_t dims_dset[],
size_t size_datum, hsize_t dims_hslab[], hsize_t * hslab_nbytes_p)
{
- int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
- int k;
+ int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
+ int k;
H5D_layout_t dset_layout;
- int rank_chunk;
+ int rank_chunk;
hsize_t dims_chunk[H5S_MAX_RANK];
hsize_t size_chunk = 1;
hsize_t nchunk_fit; /* number of chunks that fits in hyperslab buffer (H5TOOLS_BUFSIZE) */
@@ -679,36 +725,36 @@ done:
*/
int do_copy_objects(hid_t fidin, hid_t fidout, trav_table_t *travt,
- pack_opt_t *options) /* repack options */
+ pack_opt_t *options) /* repack options */
{
int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
- hid_t grp_in = -1; /* group ID */
- hid_t grp_out = -1; /* group ID */
- hid_t dset_in = -1; /* read dataset ID */
+ hid_t grp_in = -1; /* group ID */
+ hid_t grp_out = -1; /* group ID */
+ hid_t dset_in = -1; /* read dataset ID */
hid_t dset_out = -1; /* write dataset ID */
- hid_t gcpl_in = -1; /* group creation property list */
+ hid_t gcpl_in = -1; /* group creation property list */
hid_t gcpl_out = -1; /* group creation property list */
- hid_t type_in = -1; /* named type ID */
+ hid_t type_in = -1; /* named type ID */
hid_t type_out = -1; /* named type ID */
- hid_t dcpl_in = -1; /* dataset creation property list ID */
+ hid_t dcpl_in = -1; /* dataset creation property list ID */
hid_t dcpl_out = -1; /* dataset creation property list ID */
hid_t f_space_id = -1; /* file space ID */
hid_t ftype_id = -1; /* file type ID */
hid_t wtype_id = -1; /* read/write type ID */
named_dt_t *named_dt_head = NULL; /* Pointer to the stack of named datatypes copied */
- size_t msize; /* size of type */
- hsize_t nelmts; /* number of elements in dataset */
+ size_t msize; /* size of type */
+ hsize_t nelmts; /* number of elements in dataset */
H5D_space_status_t space_status; /* determines whether space has been allocated for the dataset */
- int rank; /* rank of dataset */
+ int rank; /* rank of dataset */
hsize_t dims[H5S_MAX_RANK];/* dimensions of dataset */
- hsize_t dsize_in; /* input dataset size before filter */
- hsize_t dsize_out; /* output dataset size after filter */
- int apply_s; /* flag for apply filter to small dataset sizes */
- int apply_f; /* flag for apply filter to return error on H5Dcreate */
- void *buf = NULL; /* buffer for raw data */
+ hsize_t dsize_in; /* input dataset size before filter */
+ hsize_t dsize_out; /* output dataset size after filter */
+ int apply_s; /* flag for apply filter to small dataset sizes */
+ int apply_f; /* flag for apply filter to return error on H5Dcreate */
+ void *buf = NULL; /* buffer for raw data */
void *hslab_buf = NULL; /* hyperslab buffer for raw data */
- int has_filter; /* current object has a filter */
- int req_filter; /* there was a request for a filter */
+ int has_filter; /* current object has a filter */
+ int req_filter; /* there was a request for a filter */
int req_obj_layout = 0; /* request layout to current object */
unsigned crt_order_flags; /* group creation order flag */
unsigned i;
@@ -889,7 +935,7 @@ int do_copy_objects(hid_t fidin, hid_t fidout, trav_table_t *travt,
/* wtype_id will have already been set if using a named dtype */
if (!is_named) {
if (options->use_native == 1)
- wtype_id = h5tools_get_native_type(ftype_id);
+ wtype_id = H5Tget_native_type(ftype_id, H5T_DIR_DEFAULT);
else
wtype_id = H5Tcopy(ftype_id);
} /* end if */
@@ -1323,20 +1369,19 @@ done:
*
*-------------------------------------------------------------------------
*/
-static void print_dataset_info(hid_t dcpl_id, char *objname, double ratio,
- int pr)
+static void print_dataset_info(hid_t dcpl_id, char *objname, double ratio, int pr)
{
- char strfilter[255];
+ char strfilter[255];
#if defined (PRINT_DEBUG )
- char temp[255];
+ char temp[255];
#endif
- int nfilters; /* number of filters */
- unsigned filt_flags; /* filter flags */
- H5Z_filter_t filtn; /* filter identification number */
- unsigned cd_values[20]; /* filter client data values */
- size_t cd_nelmts; /* filter client number of values */
- char f_objname[256]; /* filter objname */
- int i;
+ int nfilters; /* number of filters */
+ unsigned filt_flags; /* filter flags */
+ H5Z_filter_t filtn; /* filter identification number */
+ unsigned cd_values[20]; /* filter client data values */
+ size_t cd_nelmts; /* filter client number of values */
+ char f_objname[256]; /* filter objname */
+ int i;
HDstrcpy(strfilter, "\0");
@@ -1435,10 +1480,9 @@ static void print_dataset_info(hid_t dcpl_id, char *objname, double ratio,
*
*-------------------------------------------------------------------------
*/
-static int copy_user_block(const char *infile, const char *outfile,
- hsize_t size)
+static int copy_user_block(const char *infile, const char *outfile, hsize_t size)
{
- int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
+ int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
int infid = -1, outfid = -1; /* File descriptors */
/* User block must be any power of 2 equal to 512 or greater (512, 1024, 2048, etc.) */
@@ -1515,67 +1559,67 @@ done:
static
void print_user_block(const char *filename, hid_t fid)
{
- int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
- int fh; /* file handle */
- hsize_t ub_size; /* user block size */
- hsize_t size; /* size read */
- hid_t fcpl; /* file creation property list ID for HDF5 file */
- int i;
-
- /* get user block size */
- if(( fcpl = H5Fget_create_plist(fid)) < 0) {
- error_msg("failed to retrieve file creation property list\n");
+ int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
+ int fh; /* file handle */
+ hsize_t ub_size; /* user block size */
+ hsize_t size; /* size read */
+ hid_t fcpl; /* file creation property list ID for HDF5 file */
+ int i;
+
+ /* get user block size */
+ if(( fcpl = H5Fget_create_plist(fid)) < 0) {
+ error_msg("failed to retrieve file creation property list\n");
HGOTO_ERROR(H5E_tools_g, H5E_tools_min_id_g, "H5Fget_create_plist failed");
}
- if(H5Pget_userblock(fcpl, &ub_size) < 0) {
- error_msg("failed to retrieve userblock size\n");
+ if(H5Pget_userblock(fcpl, &ub_size) < 0) {
+ error_msg("failed to retrieve userblock size\n");
HGOTO_ERROR(H5E_tools_g, H5E_tools_min_id_g, "H5Pget_userblock failed");
}
- if(H5Pclose(fcpl) < 0) {
- error_msg("failed to close property list\n");
+ if(H5Pclose(fcpl) < 0) {
+ error_msg("failed to close property list\n");
HGOTO_ERROR(H5E_tools_g, H5E_tools_min_id_g, "H5Pclose failed");
}
- /* open file */
- if((fh = HDopen(filename, O_RDONLY, 0)) < 0) {
+ /* open file */
+ if((fh = HDopen(filename, O_RDONLY, 0)) < 0) {
HGOTO_ERROR(H5E_tools_g, H5E_tools_min_id_g, "HDopen failed");
}
- size = ub_size;
+ size = ub_size;
- /* read file */
- while(size > 0) {
- ssize_t nread; /* # of bytes read */
- char rbuf[USERBLOCK_XFER_SIZE]; /* buffer for reading */
+ /* read file */
+ while(size > 0) {
+ ssize_t nread; /* # of bytes read */
+ char rbuf[USERBLOCK_XFER_SIZE]; /* buffer for reading */
- /* read buffer */
- if(size > USERBLOCK_XFER_SIZE)
- nread = HDread(fh, rbuf, (size_t)USERBLOCK_XFER_SIZE);
- else
- nread = HDread(fh, rbuf, (size_t)size);
+ /* read buffer */
+ if(size > USERBLOCK_XFER_SIZE)
+ nread = HDread(fh, rbuf, (size_t)USERBLOCK_XFER_SIZE);
+ else
+ nread = HDread(fh, rbuf, (size_t)size);
- for(i = 0; i < nread; i++) {
+ for(i = 0; i < nread; i++) {
- printf("%c ", rbuf[i]);
+ printf("%c ", rbuf[i]);
- }
- printf("\n");
+ }
+ printf("\n");
- if(nread < 0) {
+ if(nread < 0) {
HGOTO_ERROR(H5E_tools_g, H5E_tools_min_id_g, "nread < 0");
}
- /* update size of userblock left to transfer */
- size -= nread;
- }
+ /* update size of userblock left to transfer */
+ size -= nread;
+ }
done:
- if(fh > 0)
- HDclose(fh);
+ if(fh > 0)
+ HDclose(fh);
- return;
+ return;
}
#endif
diff --git a/tools/src/h5repack/h5repack_filters.c b/tools/src/h5repack/h5repack_filters.c
index e21b829..804727b 100644
--- a/tools/src/h5repack/h5repack_filters.c
+++ b/tools/src/h5repack/h5repack_filters.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5repack.h"
diff --git a/tools/src/h5repack/h5repack_main.c b/tools/src/h5repack/h5repack_main.c
index 657e1a9..31b1e14 100644
--- a/tools/src/h5repack/h5repack_main.c
+++ b/tools/src/h5repack/h5repack_main.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5tools.h"
@@ -33,7 +31,7 @@ const char *outfile = NULL;
* Command-line options: The user can specify short or long-named
* parameters.
*/
-static const char *s_opts = "hVvf:l:m:e:nLc:d:s:u:b:M:t:a:i:o:S:T:E";
+static const char *s_opts = "hVvf:l:m:e:nLc:d:s:u:b:M:t:a:i:o:S:P:T:G:q:z:E";
static struct long_options l_opts[] = {
{ "help", no_arg, 'h' },
{ "version", no_arg, 'V' },
@@ -52,10 +50,14 @@ static struct long_options l_opts[] = {
{ "metadata_block_size", require_arg, 'M' },
{ "threshold", require_arg, 't' },
{ "alignment", require_arg, 'a' },
- { "infile", require_arg, 'i' }, /* -i for backward compability */
- { "outfile", require_arg, 'o' }, /* -o for backward compability */
+ { "infile", require_arg, 'i' }, /* -i for backward compability */
+ { "outfile", require_arg, 'o' }, /* -o for backward compability */
{ "fs_strategy", require_arg, 'S' },
+ { "fs_persist", require_arg, 'P' },
{ "fs_threshold", require_arg, 'T' },
+ { "fs_pagesize", require_arg, 'G' },
+ { "sort_by", require_arg, 'q' },
+ { "sort_order", require_arg, 'z' },
{ "enable-error-stack", no_arg, 'E' },
{ NULL, 0, '\0' }
};
@@ -90,10 +92,14 @@ static void usage(const char *prog) {
PRINTVALSTREAM(rawoutstream, " -M A, --metadata_block_size=A Metadata block size for H5Pset_meta_block_size\n");
PRINTVALSTREAM(rawoutstream, " -t T, --threshold=T Threshold value for H5Pset_alignment\n");
PRINTVALSTREAM(rawoutstream, " -a A, --alignment=A Alignment value for H5Pset_alignment\n");
+ PRINTVALSTREAM(rawoutstream, " -q Q, --sort_by=Q Sort groups and attributes by index Q\n");
+ PRINTVALSTREAM(rawoutstream, " -z Z, --sort_order=Z Sort groups and attributes by order Z\n");
PRINTVALSTREAM(rawoutstream, " -f FILT, --filter=FILT Filter type\n");
PRINTVALSTREAM(rawoutstream, " -l LAYT, --layout=LAYT Layout type\n");
- PRINTVALSTREAM(rawoutstream, " -S FS_STRGY, --fs_strategy=FS_STRGY File space management strategy\n");
- PRINTVALSTREAM(rawoutstream, " -T FS_THRD, --fs_threshold=FS_THRD Free-space section threshold\n");
+ PRINTVALSTREAM(rawoutstream, " -S FS_STRATEGY, --fs_strategy=FS_STRATEGY File space management strategy for H5Pset_file_space_strategy\n");
+ PRINTVALSTREAM(rawoutstream, " -P FS_PERSIST, --fs_persist=FS_PERSIST Persisting or not persisting free-space for H5Pset_file_space_strategy\n");
+ PRINTVALSTREAM(rawoutstream, " -T FS_THRESHOLD, --fs_threshold=FS_THRESHOLD Free-space section threshold for H5Pset_file_space_strategy\n");
+ PRINTVALSTREAM(rawoutstream, " -G FS_PAGESIZE, --fs_pagesize=FS_PAGESIZE File space page size for H5Pset_file_space_page_size\n");
PRINTVALSTREAM(rawoutstream, "\n");
PRINTVALSTREAM(rawoutstream, " M - is an integer greater than 1, size of dataset in bytes (default is 0) \n");
PRINTVALSTREAM(rawoutstream, " E - is a filename.\n");
@@ -101,6 +107,8 @@ static void usage(const char *prog) {
PRINTVALSTREAM(rawoutstream, " U - is a filename.\n");
PRINTVALSTREAM(rawoutstream, " T - is an integer\n");
PRINTVALSTREAM(rawoutstream, " A - is an integer greater than zero\n");
+ PRINTVALSTREAM(rawoutstream, " Q - is the sort index type for the input file. It can be \"name\" or \"creation_order\" (default)\n");
+ PRINTVALSTREAM(rawoutstream, " Z - is the sort order type for the input file. It can be \"descending\" or \"ascending\" (default)\n");
PRINTVALSTREAM(rawoutstream, " B - is the user block size, any value that is 512 or greater and is\n");
PRINTVALSTREAM(rawoutstream, " a power of 2 (1024 default)\n");
PRINTVALSTREAM(rawoutstream, " F - is the shared object header message type, any of <dspace|dtype|fill|\n");
@@ -109,18 +117,27 @@ static void usage(const char *prog) {
PRINTVALSTREAM(rawoutstream, " --enable-error-stack Prints messages from the HDF5 error stack as they\n");
PRINTVALSTREAM(rawoutstream, " occur.\n");
PRINTVALSTREAM(rawoutstream, "\n");
- PRINTVALSTREAM(rawoutstream, " FS_STRGY is the file space management strategy to use for the output file.\n");
- PRINTVALSTREAM(rawoutstream, " It is a string as listed below:\n");
- PRINTVALSTREAM(rawoutstream, " ALL_PERSIST - Use persistent free-space managers, aggregators and virtual file driver\n");
- PRINTVALSTREAM(rawoutstream, " for file space allocation\n");
- PRINTVALSTREAM(rawoutstream, " ALL - Use non-persistent free-space managers, aggregators and virtual file driver\n");
- PRINTVALSTREAM(rawoutstream, " for file space allocation\n");
- PRINTVALSTREAM(rawoutstream, " AGGR_VFD - Use aggregators and virtual file driver for file space allocation\n");
- PRINTVALSTREAM(rawoutstream, " VFD - Use virtual file driver for file space allocation\n");
+ PRINTVALSTREAM(rawoutstream, " FS_STRATEGY is a string indicating the file space strategy used:\n");
+ PRINTVALSTREAM(rawoutstream, " FSM_AGGR:\n");
+ PRINTVALSTREAM(rawoutstream, " The mechanisms used in managing file space are free-space managers, aggregators and virtual file driver.\n");
+ PRINTVALSTREAM(rawoutstream, " PAGE:\n");
+ PRINTVALSTREAM(rawoutstream, " The mechanisms used in managing file space are free-space managers with embedded paged aggregation and virtual file driver.\n");
+ PRINTVALSTREAM(rawoutstream, " AGGR:\n");
+ PRINTVALSTREAM(rawoutstream, " The mechanisms used in managing file space are aggregators and virtual file driver.\n");
+ PRINTVALSTREAM(rawoutstream, " NONE:\n");
+ PRINTVALSTREAM(rawoutstream, " The mechanisms used in managing file space are virtual file driver.\n");
+ PRINTVALSTREAM(rawoutstream, " The default strategy when not set is FSM_AGGR without persisting free-space.\n");
PRINTVALSTREAM(rawoutstream, "\n");
- PRINTVALSTREAM(rawoutstream, " FS_THRD is the free-space section threshold to use for the output file.\n");
- PRINTVALSTREAM(rawoutstream, " It is the minimum size (in bytes) of free-space sections to be tracked\n");
- PRINTVALSTREAM(rawoutstream, " by the the library's free-space managers.\n");
+ PRINTVALSTREAM(rawoutstream, " FS_PERSIST is 1 to persisting free-space or 0 to not persisting free-space.\n");
+ PRINTVALSTREAM(rawoutstream, " The default when not set is not persisting free-space.\n");
+ PRINTVALSTREAM(rawoutstream, " The value is ignored for AGGR and NONE strategies.\n");
+ PRINTVALSTREAM(rawoutstream, "\n");
+ PRINTVALSTREAM(rawoutstream, " FS_THRESHOLD is the minimum size (in bytes) of free-space sections to be tracked by the library.\n");
+ PRINTVALSTREAM(rawoutstream, " The default when not set is 1.\n");
+ PRINTVALSTREAM(rawoutstream, " The value is ignored for AGGR and NONE strategies.\n");
+ PRINTVALSTREAM(rawoutstream, "\n");
+ PRINTVALSTREAM(rawoutstream, " FS_PAGESIZE is the size (in bytes) >=512 that is used by the library when the file space strategy PAGE is used.\n");
+ PRINTVALSTREAM(rawoutstream, " The default when not set is 4096.\n");
PRINTVALSTREAM(rawoutstream, "\n");
PRINTVALSTREAM(rawoutstream, " FILT - is a string with the format:\n");
PRINTVALSTREAM(rawoutstream, "\n");
@@ -208,14 +225,12 @@ static void usage(const char *prog) {
* Programmer: Quincey Koziol
* Saturday, 31. January 2004
*
- * Modifications:
- *
*-------------------------------------------------------------------------
*/
-static void leave(int ret) {
- h5tools_close();
-
- HDexit(ret);
+static void leave(int ret)
+{
+ h5tools_close();
+ HDexit(ret);
}
/*-------------------------------------------------------------------------
@@ -231,11 +246,10 @@ static void leave(int ret) {
*
*-------------------------------------------------------------------------
*/
-
static
-int read_info(const char *filename, pack_opt_t *options) {
-
- char stype[10];
+int read_info(const char *filename, pack_opt_t *options)
+{
+ char stype[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
char comp_info[1024];
FILE *fp = NULL;
char c;
@@ -362,216 +376,294 @@ done:
}
/*-------------------------------------------------------------------------
+ * Function: set_sort_by
+ *
+ * Purpose: set the "by" form of sorting by translating from a string input
+ * parameter to a H5_index_t return value
+ * current sort values are [creation_order | name]
+ *
+ * Return: H5_index_t form of sort or H5_INDEX_UNKNOWN if none found
+ *-------------------------------------------------------------------------
+ */
+static H5_index_t
+set_sort_by(const char *form)
+{
+ H5_index_t idx_type = H5_INDEX_UNKNOWN;
+
+ if (HDstrcmp(form,"name")==0) /* H5_INDEX_NAME */
+ idx_type = H5_INDEX_NAME;
+ else if (HDstrcmp(form,"creation_order")==0) /* H5_INDEX_CRT_ORDER */
+ idx_type = H5_INDEX_CRT_ORDER;
+
+ return idx_type;
+}
+
+/*-------------------------------------------------------------------------
+ * Function: set_sort_order
+ *
+ * Purpose: set the order of sorting by translating from a string input
+ * parameter to a H5_iter_order_t return value
+ * current order values are [ascending | descending ]
+ *
+ * Return: H5_iter_order_t form of order or H5_ITER_UNKNOWN if none found
+ *-------------------------------------------------------------------------
+ */
+static H5_iter_order_t
+set_sort_order(const char *form)
+{
+ H5_iter_order_t iter_order = H5_ITER_UNKNOWN;
+
+ if (HDstrcmp(form,"ascending")==0) /* H5_ITER_INC */
+ iter_order = H5_ITER_INC;
+ else if (HDstrcmp(form,"descending")==0) /* H5_ITER_DEC */
+ iter_order = H5_ITER_DEC;
+
+ return iter_order;
+}
+
+/*-------------------------------------------------------------------------
* Function: parse_command_line
*
* Purpose: parse command line input
*
*-------------------------------------------------------------------------
*/
-
static
-int parse_command_line(int argc, const char **argv, pack_opt_t* options) {
-
- int opt;
- int ret_value = 0;
-
- /* parse command line options */
- while ((opt = get_option(argc, argv, s_opts, l_opts)) != EOF) {
- switch ((char) opt) {
-
- /* -i for backward compability */
- case 'i':
- infile = opt_arg;
- has_i_o = 1;
- break;
-
- /* -o for backward compability */
- case 'o':
- outfile = opt_arg;
- has_i_o = 1;
- break;
-
- case 'h':
- usage(h5tools_getprogname());
- h5tools_setstatus(EXIT_SUCCESS);
- ret_value = -1;
- goto done;
-
- case 'V':
- print_version(h5tools_getprogname());
- h5tools_setstatus(EXIT_SUCCESS);
- ret_value = -1;
- goto done;
-
- case 'v':
- options->verbose = 1;
- break;
-
- case 'f':
- /* parse the -f filter option */
- if (h5repack_addfilter(opt_arg, options) < 0) {
- error_msg("in parsing filter\n");
- h5tools_setstatus(EXIT_FAILURE);
- ret_value = -1;
- goto done;
- }
- break;
-
- case 'l':
- /* parse the -l layout option */
- if (h5repack_addlayout(opt_arg, options) < 0) {
- error_msg("in parsing layout\n");
- h5tools_setstatus(EXIT_FAILURE);
- ret_value = -1;
- goto done;
- }
- break;
-
- case 'm':
- options->min_comp = HDstrtoull(opt_arg , NULL, 0);
- if ((int) options->min_comp <= 0) {
- error_msg("invalid minimum compress size <%s>\n", opt_arg);
- h5tools_setstatus(EXIT_FAILURE);
- ret_value = -1;
- goto done;
- }
- break;
-
- case 'e':
- ret_value = read_info(opt_arg, options);
- if (ret_value < 0)
- goto done;
- break;
-
- case 'n':
- options->use_native = 1;
- break;
-
- case 'L':
- options->latest = TRUE;
- break;
-
- case 'c':
- options->grp_compact = HDatoi( opt_arg );
- if (options->grp_compact > 0)
- options->latest = TRUE; /* must use latest format */
- break;
-
- case 'd':
- options->grp_indexed = HDatoi( opt_arg );
- if (options->grp_indexed > 0)
- options->latest = TRUE; /* must use latest format */
- break;
-
- case 's':
- {
- int idx = 0;
- int ssize = 0;
- char *msgPtr = HDstrchr( opt_arg, ':');
- options->latest = TRUE; /* must use latest format */
- if (msgPtr == NULL) {
- ssize = HDatoi( opt_arg );
- for (idx = 0; idx < 5; idx++)
- options->msg_size[idx] = ssize;
- }
- else {
- char msgType[10];
- HDstrcpy(msgType, msgPtr + 1);
- msgPtr[0] = '\0';
- ssize = HDatoi( opt_arg );
- if (HDstrncmp(msgType, "dspace",6) == 0) {
- options->msg_size[0] = ssize;
- }
- else if (HDstrncmp(msgType, "dtype", 5) == 0) {
- options->msg_size[1] = ssize;
- }
- else if (HDstrncmp(msgType, "fill", 4) == 0) {
- options->msg_size[2] = ssize;
- }
- else if (HDstrncmp(msgType, "pline", 5) == 0) {
- options->msg_size[3] = ssize;
- }
- else if (HDstrncmp(msgType, "attr", 4) == 0) {
- options->msg_size[4] = ssize;
- }
- }
- }
- break;
-
- case 'u':
- options->ublock_filename = opt_arg;
- break;
-
- case 'b':
- options->ublock_size = (hsize_t) HDatol( opt_arg );
- break;
-
- case 'M':
- options->meta_block_size = (hsize_t) HDatol( opt_arg );
- break;
-
- case 't':
- options->threshold = (hsize_t) HDatol( opt_arg );
- break;
-
- case 'a':
- options->alignment = HDstrtoull(opt_arg , NULL, 0);
- if (options->alignment < 1) {
- error_msg("invalid alignment size\n", opt_arg);
- h5tools_setstatus(EXIT_FAILURE);
- ret_value = -1;
- goto done;
- }
- break;
-
- case 'S':
- {
- char strategy[MAX_NC_NAME];
-
- HDstrcpy(strategy, opt_arg);
- if (!HDstrcmp(strategy, "ALL_PERSIST"))
- options->fs_strategy = H5F_FILE_SPACE_ALL_PERSIST;
- else if (!HDstrcmp(strategy, "ALL"))
- options->fs_strategy = H5F_FILE_SPACE_ALL;
- else if (!HDstrcmp(strategy, "AGGR_VFD"))
- options->fs_strategy = H5F_FILE_SPACE_AGGR_VFD;
- else if (!HDstrcmp(strategy, "VFD"))
- options->fs_strategy = H5F_FILE_SPACE_VFD;
- else {
- error_msg("invalid file space management strategy\n", opt_arg);
- h5tools_setstatus(EXIT_FAILURE);
- ret_value = -1;
- goto done;
- }
- }
- break;
-
- case 'T':
- options->fs_threshold = (hsize_t) HDatol( opt_arg );
- break;
-
- case 'E':
+int parse_command_line(int argc, const char **argv, pack_opt_t* options)
+{
+ int opt;
+ int ret_value = 0;
+
+ /* parse command line options */
+ while ((opt = get_option(argc, argv, s_opts, l_opts)) != EOF) {
+ switch ((char) opt) {
+
+ /* -i for backward compability */
+ case 'i':
+ infile = opt_arg;
+ has_i_o = 1;
+ break;
+
+ /* -o for backward compability */
+ case 'o':
+ outfile = opt_arg;
+ has_i_o = 1;
+ break;
+
+ case 'h':
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_SUCCESS);
+ ret_value = -1;
+ goto done;
+
+ case 'V':
+ print_version(h5tools_getprogname());
+ h5tools_setstatus(EXIT_SUCCESS);
+ ret_value = -1;
+ goto done;
+
+ case 'v':
+ options->verbose = 1;
+ break;
+
+ case 'f':
+ /* parse the -f filter option */
+ if (h5repack_addfilter(opt_arg, options) < 0) {
+ error_msg("in parsing filter\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ ret_value = -1;
+ goto done;
+ }
+ break;
+
+ case 'l':
+ /* parse the -l layout option */
+ if (h5repack_addlayout(opt_arg, options) < 0) {
+ error_msg("in parsing layout\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ ret_value = -1;
+ goto done;
+ }
+ break;
+
+ case 'm':
+ options->min_comp = HDstrtoull(opt_arg , NULL, 0);
+ if ((int) options->min_comp <= 0) {
+ error_msg("invalid minimum compress size <%s>\n", opt_arg);
+ h5tools_setstatus(EXIT_FAILURE);
+ ret_value = -1;
+ goto done;
+ }
+ break;
+
+ case 'e':
+ ret_value = read_info(opt_arg, options);
+ if (ret_value < 0)
+ goto done;
+ break;
+
+ case 'n':
+ options->use_native = 1;
+ break;
+
+ case 'L':
+ options->latest = TRUE;
+ break;
+
+ case 'c':
+ options->grp_compact = HDatoi( opt_arg );
+ if (options->grp_compact > 0)
+ options->latest = TRUE; /* must use latest format */
+ break;
+
+ case 'd':
+ options->grp_indexed = HDatoi( opt_arg );
+ if (options->grp_indexed > 0)
+ options->latest = TRUE; /* must use latest format */
+ break;
+
+ case 's':
+ {
+ int idx = 0;
+ int ssize = 0;
+ char *msgPtr = HDstrchr( opt_arg, ':');
+ options->latest = TRUE; /* must use latest format */
+ if (msgPtr == NULL) {
+ ssize = HDatoi( opt_arg );
+ for (idx = 0; idx < 5; idx++)
+ options->msg_size[idx] = ssize;
+ }
+ else {
+ char msgType[10];
+
+ HDstrcpy(msgType, msgPtr + 1);
+ msgPtr[0] = '\0';
+ ssize = HDatoi( opt_arg );
+ if (HDstrncmp(msgType, "dspace",6) == 0)
+ options->msg_size[0] = ssize;
+ else if (HDstrncmp(msgType, "dtype", 5) == 0)
+ options->msg_size[1] = ssize;
+ else if (HDstrncmp(msgType, "fill", 4) == 0)
+ options->msg_size[2] = ssize;
+ else if (HDstrncmp(msgType, "pline", 5) == 0)
+ options->msg_size[3] = ssize;
+ else if (HDstrncmp(msgType, "attr", 4) == 0)
+ options->msg_size[4] = ssize;
+ }
+ }
+ break;
+
+ case 'u':
+ options->ublock_filename = opt_arg;
+ break;
+
+ case 'b':
+ options->ublock_size = (hsize_t) HDatol( opt_arg );
+ break;
+
+ case 'M':
+ options->meta_block_size = (hsize_t) HDatol( opt_arg );
+ break;
+
+ case 't':
+ options->threshold = (hsize_t) HDatol( opt_arg );
+ break;
+
+ case 'a':
+ options->alignment = HDstrtoull(opt_arg , NULL, 0);
+ if (options->alignment < 1) {
+ error_msg("invalid alignment size\n", opt_arg);
+ h5tools_setstatus(EXIT_FAILURE);
+ ret_value = -1;
+ goto done;
+ }
+ break;
+
+ case 'S':
+ {
+ char strategy[MAX_NC_NAME];
+
+ HDstrcpy(strategy, opt_arg);
+ if(!HDstrcmp(strategy, "FSM_AGGR"))
+ options->fs_strategy = H5F_FSPACE_STRATEGY_FSM_AGGR;
+ else if(!HDstrcmp(strategy, "PAGE"))
+ options->fs_strategy = H5F_FSPACE_STRATEGY_PAGE;
+ else if(!HDstrcmp(strategy, "AGGR"))
+ options->fs_strategy = H5F_FSPACE_STRATEGY_AGGR;
+ else if(!HDstrcmp(strategy, "NONE"))
+ options->fs_strategy = H5F_FSPACE_STRATEGY_NONE;
+ else {
+ error_msg("invalid file space management strategy\n", opt_arg);
+ h5tools_setstatus(EXIT_FAILURE);
+ ret_value = -1;
+ goto done;
+ }
+ if(options->fs_strategy == (H5F_fspace_strategy_t)0)
+ /* To distinguish the "specified" zero value */
+ options->fs_strategy = (H5F_fspace_strategy_t)-1;
+ }
+ break;
+
+ case 'P':
+ options->fs_persist = HDatoi(opt_arg);
+ if(options->fs_persist == 0)
+ /* To distinguish the "specified" zero value */
+ options->fs_persist = -1;
+ break;
+
+ case 'T':
+ options->fs_threshold = HDatol(opt_arg);
+ if(options->fs_threshold == 0)
+ /* To distinguish the "specified" zero value */
+ options->fs_threshold = -1;
+ break;
+
+ case 'G':
+ options->fs_pagesize = HDstrtoll(opt_arg, NULL, 0);
+ if(options->fs_pagesize == 0)
+ /* To distinguish the "specified" zero value */
+ options->fs_pagesize = -1;
+ break;
+
+ case 'q':
+ if((sort_by = set_sort_by(opt_arg)) < 0) {
+ error_msg(" failed to set sort by form <%s>\n", opt_arg);
+ h5tools_setstatus(EXIT_FAILURE);
+ ret_value = -1;
+ goto done;
+ }
+ break;
+
+ case 'z':
+ if((sort_order = set_sort_order(opt_arg)) < 0) {
+ error_msg(" failed to set sort order form <%s>\n", opt_arg);
+ h5tools_setstatus(EXIT_FAILURE);
+ ret_value = -1;
+ goto done;
+ }
+ break;
+
+ case 'E':
enable_error_stack = TRUE;
break;
- default:
- break;
+ default:
+ break;
} /* switch */
-
} /* while */
if (has_i_o == 0) {
- /* check for file names to be processed */
- if (argc <= opt_ind || argv[opt_ind + 1] == NULL) {
- error_msg("missing file names\n");
- usage(h5tools_getprogname());
- h5tools_setstatus(EXIT_FAILURE);
- ret_value = -1;
- }
- }
+ /* check for file names to be processed */
+ if (argc <= opt_ind || argv[opt_ind + 1] == NULL) {
+ error_msg("missing file names\n");
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_FAILURE);
+ ret_value = -1;
+ }
+ }
done:
- return ret_value;
+ return ret_value;
}
/*-------------------------------------------------------------------------
@@ -591,16 +683,16 @@ done:
*
*-------------------------------------------------------------------------
*/
-int main(int argc, const char **argv) {
+int main(int argc, const char **argv)
+{
+ pack_opt_t options; /*the global options */
H5E_auto2_t func;
H5E_auto2_t tools_func;
void *edata;
void *tools_edata;
- pack_opt_t options; /*the global options */
-
- h5tools_setprogname(PROGRAMNAME);
- h5tools_setstatus(EXIT_SUCCESS);
+ h5tools_setprogname(PROGRAMNAME);
+ h5tools_setstatus(EXIT_SUCCESS);
/* Disable error reporting */
H5Eget_auto2(H5E_DEFAULT, &func, &edata);
@@ -620,7 +712,10 @@ int main(int argc, const char **argv) {
}
/* initialize options */
- h5repack_init(&options, 0, FALSE, H5F_FILE_SPACE_DEFAULT, (hsize_t) 0);
+ h5repack_init(&options, 0, FALSE);
+
+ /* Initialize default indexing options */
+ sort_by = H5_INDEX_CRT_ORDER;
if (parse_command_line(argc, argv, &options) < 0)
goto done;
@@ -653,12 +748,12 @@ int main(int argc, const char **argv) {
}
/* pack it */
- h5tools_setstatus(h5repack(infile, outfile, &options));
+ h5tools_setstatus(h5repack(infile, outfile, &options));
done:
- /* free tables */
- h5repack_end(&options);
+ /* free tables */
+ h5repack_end(&options);
- leave(h5tools_getstatus());
+ leave(h5tools_getstatus());
}
diff --git a/tools/src/h5repack/h5repack_opttable.c b/tools/src/h5repack/h5repack_opttable.c
index 8c98b76..93e34b3 100644
--- a/tools/src/h5repack/h5repack_opttable.c
+++ b/tools/src/h5repack/h5repack_opttable.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5repack.h"
diff --git a/tools/src/h5repack/h5repack_parse.c b/tools/src/h5repack/h5repack_parse.c
index 004b9e4..a9a890d 100644
--- a/tools/src/h5repack/h5repack_parse.c
+++ b/tools/src/h5repack/h5repack_parse.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5repack.h"
@@ -47,9 +45,9 @@ obj_list_t* parse_filter(const char *str, unsigned *n_objs, filter_info_t *filt,
int k, l, p, q, end_obj = -1, no_param = 0;
unsigned j, n;
char sobj[MAX_NC_NAME];
- char scomp[10];
- char stype[6];
- char smask[3];
+ char scomp[10] = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ char stype[6] = {0, 0, 0, 0, 0, 0};
+ char smask[3] = {0, 0, 0};
obj_list_t* obj_list = NULL;
unsigned pixels_per_block;
@@ -396,6 +394,7 @@ obj_list_t* parse_filter(const char *str, unsigned *n_objs, filter_info_t *filt,
error_msg("invalid filter type in <%s>\n", str);
HDexit(EXIT_FAILURE);
}
+ break;
}
} /*i*/
diff --git a/tools/src/h5repack/h5repack_refs.c b/tools/src/h5repack/h5repack_refs.c
index f0f32c3..408142c 100644
--- a/tools/src/h5repack/h5repack_refs.c
+++ b/tools/src/h5repack/h5repack_refs.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5repack.h"
@@ -27,7 +25,7 @@ static const char* MapIdToName(hid_t refobj_id,trav_table_t *travt);
static int copy_refs_attr(hid_t loc_in, hid_t loc_out, pack_opt_t *options,
trav_table_t *travt, hid_t fidout);
static herr_t update_ref_value(hid_t obj_id, H5R_type_t ref_type, void *ref_in,
- hid_t fid_out, void *ref_out, trav_table_t *travt);
+ hid_t fid_out, void *ref_out, trav_table_t *travt);
/*-------------------------------------------------------------------------
* Function: do_copy_refobjs
@@ -128,8 +126,8 @@ int do_copy_refobjs(hid_t fidin,
for(k = 0; k < rank; k++)
nelmts *= dims[k];
- if((mtype_id = h5tools_get_native_type(ftype_id)) < 0)
- HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "h5tools_get_native_type failed");
+ if((mtype_id = H5Tget_native_type(ftype_id, H5T_DIR_DEFAULT)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Tget_native_type failed");
if((msize = H5Tget_size(mtype_id)) == 0)
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Tget_size failed");
@@ -427,13 +425,13 @@ done:
* Programmer: Pedro Vicente, pvn@ncsa.uiuc.edu
*
* Modifier: xcao@hdfgroup.org, 9/12/2011
- * Update values of references(object and region) for the following types:
+ * Update values of references(object and region) for the following types:
* 1) References,
* 2) ARRAY of reference,
* 3) VLEN of references.
* 4) COMPOUND of references.
* This function does not handle references in other complicated structures,
- * such as references in nested compound datatypes.
+ * such as references in nested compound datatypes.
*
* Date: October, 28, 2003
*
@@ -474,7 +472,7 @@ static int copy_refs_attr(hid_t loc_in,
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Oget_info failed");
for(u = 0; u < (unsigned)oinfo.num_attrs; u++) {
- is_ref = is_ref_vlen = is_ref_array = is_ref_comp = 0;
+ is_ref = is_ref_vlen = is_ref_array = is_ref_comp = 0;
/* open attribute */
if((attr_id = H5Aopen_by_idx(loc_in, ".", H5_INDEX_CRT_ORDER, H5_ITER_INC, (hsize_t)u, H5P_DEFAULT, H5P_DEFAULT)) < 0)
@@ -486,8 +484,8 @@ static int copy_refs_attr(hid_t loc_in,
type_class = H5Tget_class(ftype_id);
- if((mtype_id = h5tools_get_native_type(ftype_id)) < 0)
- HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "h5tools_get_native_type failed");
+ if((mtype_id = H5Tget_native_type(ftype_id, H5T_DIR_DEFAULT)) < 0)
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Tget_native_type failed");
if((msize = H5Tget_size(mtype_id)) == 0)
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Tget_size failed");
@@ -530,9 +528,9 @@ static int copy_refs_attr(hid_t loc_in,
}
H5Tclose(mtid);
}
-
- /* if compound don't contain reference type member, free the above
- * mallocs. Otherwise there can be memory leaks by the 'continue'
+
+ /* if compound don't contain reference type member, free the above
+ * mallocs. Otherwise there can be memory leaks by the 'continue'
* statement below. */
if (!ref_comp_field_n) {
if (ref_comp_index) {
@@ -541,7 +539,7 @@ static int copy_refs_attr(hid_t loc_in,
}
if (ref_comp_size) {
- HDfree(ref_comp_size);
+ HDfree(ref_comp_size);
ref_comp_size = NULL;
}
}
@@ -590,7 +588,7 @@ static int copy_refs_attr(hid_t loc_in,
array_rank = (unsigned)H5Tget_array_ndims(mtype_id);
H5Tget_array_dims2(mtype_id, array_dims);
for(j = 0; j <array_rank; j++)
- array_size *= array_dims[j];
+ array_size *= array_dims[j];
nelmts *= array_size;
}
@@ -650,7 +648,7 @@ static int copy_refs_attr(hid_t loc_in,
}
} /* H5T_STD_REF_DSETREG */
else if (is_ref_vlen) {
- /* handle VLEN of references */
+ /* handle VLEN of references */
buf = (hvl_t *)HDmalloc((unsigned)(nelmts * sizeof(hvl_t)));
refbuf = buf; /* reuse the read buffer for write */
@@ -690,7 +688,7 @@ static int copy_refs_attr(hid_t loc_in,
}
} /* else if (is_ref_vlen) */
else if (is_ref_comp) {
- /* handle ref fields in a compound */
+ /* handle ref fields in a compound */
buf = HDmalloc((unsigned)(nelmts * msize));
refbuf = buf; /* reuse the read buffer for write */
@@ -730,7 +728,7 @@ static int copy_refs_attr(hid_t loc_in,
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Awrite failed");
if (is_ref_vlen && buf)
- H5Dvlen_reclaim (mtype_id, space_id, H5P_DEFAULT, buf);
+ H5Dvlen_reclaim (mtype_id, space_id, H5P_DEFAULT, buf);
} /* if (nelmts) */
if (refbuf == buf)
@@ -782,10 +780,10 @@ done:
HDfree(buf);
if (ref_comp_index)
- HDfree(ref_comp_index);
+ HDfree(ref_comp_index);
if (ref_comp_size)
- HDfree(ref_comp_size);
+ HDfree(ref_comp_size);
H5E_BEGIN_TRY {
H5Tclose(ftype_id);
@@ -799,9 +797,9 @@ done:
}
/*-------------------------------------------------------------------------
- * Function: MapIdToName
+ * Function: MapIdToName
*
- * Purpose: map a ID from a reference to a dataset name
+ * Purpose: map a ID from a reference to a dataset name
*
*-------------------------------------------------------------------------
*/
@@ -834,16 +832,16 @@ out:
}
/*-------------------------------------------------------------------------
- * Function: Update_Ref_value
+ * Function: Update_Ref_value
*
- * Purpose: Update a reference value
+ * Purpose: Update a reference value
*
* Programmer: xcao@hdfgroup.org 9/12/2011
*
*-------------------------------------------------------------------------
*/
static herr_t update_ref_value(hid_t obj_id, H5R_type_t ref_type, void *ref_in,
- hid_t fid_out, void *ref_out, trav_table_t *travt)
+ hid_t fid_out, void *ref_out, trav_table_t *travt)
{
int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
const char* ref_obj_name;
diff --git a/tools/src/h5repack/h5repack_verify.c b/tools/src/h5repack/h5repack_verify.c
index 6765c49..4ff4875 100644
--- a/tools/src/h5repack/h5repack_verify.c
+++ b/tools/src/h5repack/h5repack_verify.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5repack.h"
@@ -39,11 +37,6 @@ static int verify_filters(hid_t pid, hid_t tid, int nfilters, filter_info_t *fil
* Programmer: Pedro Vicente, pvn@hdfgroup.org
*
* Date: December 19, 2003
- * Modified: December, 19, 2007 (exactly 4 years later :-) )
- * Separate into 3 cases
- * 1) no filter input, get all datasets and compare DCPLs. TO DO
- * 2) filter input on selected datasets, get each one trough OBJ and match
- * 3) filter input on all datasets, get all objects and match
*
*-------------------------------------------------------------------------
*/
@@ -51,8 +44,8 @@ static int verify_filters(hid_t pid, hid_t tid, int nfilters, filter_info_t *fil
int
h5repack_verify(const char *in_fname, const char *out_fname, pack_opt_t *options)
{
- int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
- hid_t fidin = -1; /* file ID for input file*/
+ int ret_value = 0; /*no need to LEAVE() on ERROR: HERR_INIT(int, SUCCEED) */
+ hid_t fidin = -1; /* file ID for input file*/
hid_t fidout = -1; /* file ID for output file*/
hid_t did = -1; /* dataset ID */
hid_t pid = -1; /* dataset creation property list ID */
@@ -62,9 +55,11 @@ h5repack_verify(const char *in_fname, const char *out_fname, pack_opt_t *options
trav_table_t *travt = NULL;
int ok = 1;
hid_t fcpl_in = -1; /* file creation property for input file */
- hid_t fcpl_out = -1; /* file creation property for output file */
- H5F_file_space_type_t in_strat, out_strat; /* file space handling strategy for in/output file */
- hsize_t in_thresh, out_thresh; /* free space section threshold for in/output file */
+ hid_t fcpl_out = -1; /* file creation property for output file */
+ H5F_fspace_strategy_t in_strategy, out_strategy; /* file space handling strategy for in/output file */
+ hbool_t in_persist, out_persist; /* free-space persist status for in/output file */
+ hsize_t in_threshold, out_threshold; /* free-space section threshold for in/output file */
+ hsize_t in_pagesize, out_pagesize; /* file space page size for input/output file */
/* open the output file */
if((fidout = H5Fopen(out_fname, H5F_ACC_RDONLY, H5P_DEFAULT)) < 0 )
@@ -127,6 +122,8 @@ h5repack_verify(const char *in_fname, const char *out_fname, pack_opt_t *options
if(options->all_filter == 1 || options->all_layout == 1)
{
+ /* Initialize indexing options */
+ h5trav_set_index(sort_by, sort_order);
/* init table */
trav_table_init(&travt);
@@ -201,8 +198,7 @@ h5repack_verify(const char *in_fname, const char *out_fname, pack_opt_t *options
}
/*-------------------------------------------------------------------------
- * Verify that file space strategy and free space threshold
- * are set as expected
+ * Verify that file space info are set as expected
*-------------------------------------------------------------------------
*/
@@ -216,12 +212,18 @@ h5repack_verify(const char *in_fname, const char *out_fname, pack_opt_t *options
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Fget_create_plist failed");
}
- /* Get file space management info for input file */
- if(H5Pget_file_space(fcpl_in, &in_strat, &in_thresh) < 0) {
+ /* Get file space info for input file */
+ if(H5Pget_file_space_strategy(fcpl_in, &in_strategy, &in_persist, &in_threshold) < 0) {
error_msg("failed to retrieve file space strategy & threshold\n");
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pget_file_space failed");
}
+ /* Get file space page size for input file */
+ if(H5Pget_file_space_page_size(fcpl_in, &in_pagesize) < 0) {
+ error_msg("failed to retrieve file space page size\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Fget_create_plist failed");
+ }
+
/* Output file is already opened */
/* Get file creation property list for output file */
if((fcpl_out = H5Fget_create_plist(fidout)) < 0) {
@@ -229,45 +231,84 @@ h5repack_verify(const char *in_fname, const char *out_fname, pack_opt_t *options
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Fget_create_plist failed");
}
- /* Get file space management info for output file */
- if(H5Pget_file_space(fcpl_out, &out_strat, &out_thresh) < 0) {
+ /* Get file space info for output file */
+ if(H5Pget_file_space_strategy(fcpl_out, &out_strategy, &out_persist, &out_threshold) < 0) {
error_msg("failed to retrieve file space strategy & threshold\n");
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Pget_file_space failed");
}
+ /* Get file space page size for output file */
+ if(H5Pget_file_space_page_size(fcpl_out, &out_pagesize) < 0) {
+ error_msg("failed to retrieve file space page size\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "H5Fget_create_plist failed");
+ }
+
/*
- * If the strategy option is not set,
- * file space handling strategy should be the same for both
- * input & output files.
- * If the strategy option is set,
- * the output file's file space handling strategy should be the same
- * as what is set via the strategy option
+ * If -S option is set, the file space handling strategy should be set as specified.
+ * If -S option is not set, the file space handling strategy should be
+ * the same as the input file's strategy.
*/
- if(!options->fs_strategy && out_strat != in_strat) {
- error_msg("file space strategy not set as unexpected\n");
- HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
-
+ if(options->fs_strategy) {
+ if(out_strategy != (options->fs_strategy == (-1) ? 0 : options->fs_strategy)) {
+ error_msg("file space strategy not set as unexpected\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+ }
+ } else {
+ if(out_strategy != in_strategy) {
+ error_msg("file space strategy not set as unexpected\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+ }
}
- else if(options->fs_strategy && out_strat!= options->fs_strategy) {
- error_msg("file space strategy not set as unexpected\n");
- HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+
+ /*
+ * If the -P option is set, the free-space persist status should be set as specified.
+ * If the -P option is not set, the free-space persist status should be
+ * the same as the input file's free-space persist status
+ */
+ if(options->fs_persist) {
+ if(out_persist != (hbool_t)(options->fs_persist == (-1) ? FALSE : options->fs_persist)) {
+ error_msg("free-space persist status is not set as unexpected\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+ }
+ } else {
+ if(out_persist != in_persist) {
+ error_msg("free-space persist status is not set as unexpected\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+ }
}
/*
- * If the threshold option is not set,
- * the free space section threshold should be the same for both
- * input & output files.
- * If the threshold option is set,
- * the output file's free space section threshold should be the same
- * as what is set via the threshold option.
+ * If the -T option is set, the threshold size should be set as specified.
+ * If the -T option is not set, the threshold should be the same as the
+ * input file's threshold size.
*/
- if(!options->fs_threshold && out_thresh != in_thresh) {
- error_msg("free space threshold not set as unexpected\n");
- HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "free space threshold not set as unexpected");
+ if(options->fs_threshold) {
+ if(out_threshold != (hsize_t)(options->fs_threshold == (-1) ? 0 : options->fs_threshold)) {
+ error_msg("threshold is not set as unexpectec\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+ }
+ } else {
+ if(out_threshold != in_threshold) {
+ error_msg("threshold is not set as unexpected\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+ }
}
- else if(options->fs_threshold && out_thresh != options->fs_threshold) {
- error_msg("free space threshold not set as unexpected\n");
- HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "free space threshold not set as unexpected");
+
+ /*
+ * If the -G option is set, the file space page size should be set as specified.
+ * If the -G option is not set, the file space page size should be
+ * the same as the input file's file space page size.
+ */
+ if(options->fs_pagesize) {
+ if(out_pagesize != (hsize_t)(options->fs_pagesize == (-1) ? 0 : options->fs_pagesize)) {
+ error_msg("file space page size is not set as unexpected\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+ }
+ } else { /* "-G" is not set */
+ if(out_pagesize != in_pagesize) {
+ error_msg("file space page size is not set as unexpected\n");
+ HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "file space strategy not set as unexpected");
+ }
}
/* Closing */
@@ -416,6 +457,9 @@ int h5repack_cmp_pl(const char *fname1,
* get file table list of objects
*-------------------------------------------------------------------------
*/
+ /* Initialize indexing options */
+ h5trav_set_index(sort_by, sort_order);
+ /* init table */
trav_table_init(&trav);
if(h5trav_gettable(fid1, trav) < 0)
HGOTO_ERROR(FAIL, H5E_tools_min_id_g, "h5trav_gettable failed");
@@ -604,7 +648,7 @@ int verify_filters(hid_t pid, hid_t tid, int nfilters, filter_info_t *filter)
{
case H5Z_FILTER_NONE:
- break;
+ break;
case H5Z_FILTER_SHUFFLE:
/* 1 private client value is returned by DCPL */
diff --git a/tools/src/h5stat/CMakeLists.txt b/tools/src/h5stat/CMakeLists.txt
index d4e14cb..9762dbb 100644
--- a/tools/src/h5stat/CMakeLists.txt
+++ b/tools/src/h5stat/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_H5STAT)
#-----------------------------------------------------------------------------
@@ -18,6 +18,17 @@ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5stat")
set (H5_DEP_EXECUTABLES h5stat)
+if (BUILD_SHARED_LIBS)
+ add_executable (h5stat-shared ${HDF5_TOOLS_SRC_H5STAT_SOURCE_DIR}/h5stat.c)
+ TARGET_NAMING (h5stat-shared SHARED)
+ TARGET_C_PROPERTIES (h5stat-shared SHARED " " " ")
+ target_link_libraries (h5stat-shared ${HDF5_TOOLS_LIBSH_TARGET} ${HDF5_LIBSH_TARGET})
+ set_target_properties (h5stat-shared PROPERTIES FOLDER tools)
+ set_global_variable (HDF5_UTILS_TO_EXPORT "${HDF5_UTILS_TO_EXPORT};h5stat-shared")
+
+ set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES} h5stat-shared)
+endif ()
+
##############################################################################
##############################################################################
### I N S T A L L A T I O N ###
@@ -27,13 +38,17 @@ set (H5_DEP_EXECUTABLES h5stat)
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS)
+ if (BUILD_SHARED_LIBS)
+ INSTALL_PROGRAM_PDB (h5stat-shared ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+ endif ()
+ INSTALL_PROGRAM_PDB (h5stat ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5stat ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5stat
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+ install (
+ TARGETS
+ ${H5_DEP_EXECUTABLES}
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif ()
diff --git a/tools/src/h5stat/Makefile.am b/tools/src/h5stat/Makefile.am
index b4d81de..c228b48 100644
--- a/tools/src/h5stat/Makefile.am
+++ b/tools/src/h5stat/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/src/h5stat/h5stat.c b/tools/src/h5stat/h5stat.c
index a331014..6aee7a8 100644
--- a/tools/src/h5stat/h5stat.c
+++ b/tools/src/h5stat/h5stat.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdlib.h>
@@ -40,11 +38,11 @@
/* File space management strategies: see H5Fpublic.h for declarations */
const char *FS_STRATEGY_NAME[] = {
+ "H5F_FSPACE_STRATEGY_FSM_AGGR",
+ "H5F_FSPACE_STRATEGY_PAGE",
+ "H5F_FSPACE_STRATEGY_AGGR",
+ "H5F_FSPACE_STRATEGY_NONE",
"unknown",
- "H5F_FILE_SPACE_ALL_PERSIST",
- "H5F_FILE_SPACE_ALL",
- "H5F_FILE_SPACE_AGGR_VFD",
- "H5F_FILE_SPACE_VFD",
NULL
};
@@ -106,10 +104,12 @@ typedef struct iter_t {
hsize_t super_size; /* superblock size */
hsize_t super_ext_size; /* superblock extension size */
hsize_t ublk_size; /* user block size (if exists) */
- H5F_file_space_type_t fs_strategy; /* File space management strategy */
+ H5F_fspace_strategy_t fs_strategy; /* File space management strategy */
+ hbool_t fs_persist; /* Free-space persist or not */
hsize_t fs_threshold; /* Free-space section threshold */
- hsize_t free_space; /* amount of freespace in the file */
- hsize_t free_hdr; /* size of free space manager metadata in the file */
+ hsize_t fsp_size; /* File space page size */
+ hsize_t free_space; /* Amount of freespace in the file */
+ hsize_t free_hdr; /* Size of free space manager metadata in the file */
unsigned long num_small_sects[SIZE_SMALL_SECTS]; /* Size of small free-space sections */
unsigned sect_nbins; /* Number of bins for free-space section sizes */
unsigned long *sect_bins; /* Pointer to array of bins for free-space section sizes */
@@ -1501,6 +1501,7 @@ print_freespace_info(const iter_t *iter)
unsigned long total; /* Total count for various statistics */
unsigned u; /* Local index variable */
+ HDfprintf(stdout, "Free-space persist: %s\n", iter->fs_persist ? "TRUE" : "FALSE");
HDfprintf(stdout, "Free-space section threshold: %Hu bytes\n", iter->fs_threshold);
printf("Small size free-space sections (< %u bytes):\n", (unsigned)SIZE_SMALL_SECTS);
total = 0;
@@ -1551,6 +1552,7 @@ print_storage_summary(const iter_t *iter)
double percent = 0.0f;
HDfprintf(stdout, "File space management strategy: %s\n", FS_STRATEGY_NAME[iter->fs_strategy]);
+ HDfprintf(stdout, "File space page size: %Hu bytes\n", iter->fsp_size);
printf("Summary of file space information:\n");
total_meta =
iter->super_size + iter->super_ext_size + iter->ublk_size +
@@ -1772,9 +1774,12 @@ main(int argc, const char *argv[])
if(H5Pget_userblock(fcpl, &iter.ublk_size) < 0)
warn_msg("Unable to retrieve userblock size\n");
- if(H5Pget_file_space(fcpl, &iter.fs_strategy, &iter.fs_threshold) < 0)
+ if(H5Pget_file_space_strategy(fcpl, &iter.fs_strategy, &iter.fs_persist, &iter.fs_threshold) < 0)
warn_msg("Unable to retrieve file space information\n");
- HDassert(iter.fs_strategy != 0 && iter.fs_strategy < H5F_FILE_SPACE_NTYPES);
+ HDassert(iter.fs_strategy >= 0 && iter.fs_strategy < H5F_FSPACE_STRATEGY_NTYPES);
+
+ if(H5Pget_file_space_page_size(fcpl, &iter.fsp_size) < 0)
+ warn_msg("Unable to retrieve file space page size\n");
/* get information for free-space sections */
if(freespace_stats(fid, &iter) < 0)
diff --git a/tools/src/misc/CMakeLists.txt b/tools/src/misc/CMakeLists.txt
index 0bda701..1292628 100644
--- a/tools/src/misc/CMakeLists.txt
+++ b/tools/src/misc/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_SRC_MISC)
#-----------------------------------------------------------------------------
@@ -50,7 +50,7 @@ set (H5_DEP_EXECUTABLES
#-----------------------------------------------------------------------------
#if (NOT WIN32)
# configure_file (${HDF5_TOOLS_SRC_MISC_SOURCE_DIR}/h5cc.in ${HDF5_BINARY_DIR}/h5cc @ONLY)
-#endif (NOT WIN32)
+#endif ()
##############################################################################
##############################################################################
@@ -61,15 +61,16 @@ set (H5_DEP_EXECUTABLES
#-----------------------------------------------------------------------------
# Rules for Installation of tools using make Install target
#-----------------------------------------------------------------------------
+if (HDF5_EXPORTED_TARGETS)
+ INSTALL_PROGRAM_PDB (h5debug ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+ INSTALL_PROGRAM_PDB (h5repart ${HDF5_INSTALL_BIN_DIR} toolsapplications)
+ INSTALL_PROGRAM_PDB (h5mkgrp ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5debug ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5repart ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-#INSTALL_PROGRAM_PDB (h5mkgrp ${HDF5_INSTALL_BIN_DIR} toolsapplications)
-
-install (
- TARGETS
- h5debug h5repart h5mkgrp
- EXPORT
- ${HDF5_EXPORTED_TARGETS}
- RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
-)
+ install (
+ TARGETS
+ h5debug h5repart h5mkgrp
+ EXPORT
+ ${HDF5_EXPORTED_TARGETS}
+ RUNTIME DESTINATION ${HDF5_INSTALL_BIN_DIR} COMPONENT toolsapplications
+ )
+endif ()
diff --git a/tools/src/misc/Makefile.am b/tools/src/misc/Makefile.am
index ad3dae9b..64c5ee5 100644
--- a/tools/src/misc/Makefile.am
+++ b/tools/src/misc/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/src/misc/h5cc.in b/tools/src/misc/h5cc.in
index 1645855..9c4e3ca 100644
--- a/tools/src/misc/h5cc.in
+++ b/tools/src/misc/h5cc.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
# This tool is adapted from the mpicc command of the MPICH Software.
diff --git a/tools/src/misc/h5clear.c b/tools/src/misc/h5clear.c
index 0be4f8f..e3b989d 100644
--- a/tools/src/misc/h5clear.c
+++ b/tools/src/misc/h5clear.c
@@ -5,133 +5,266 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
- * Programmer:
- *
- * Purpose:
+ * Purpose: A tool to clear the status_flags field from the file's superblock via -s option.
+ * A tool to remove cache image from the file via -m option.
+ *
*/
-
#include "hdf5.h"
#include "H5private.h"
#include "h5tools.h"
#include "h5tools_utils.h"
/* Name of tool */
-#define PROGRAMNAME "h5clear"
+#define PROGRAMNAME "h5clear"
/* Make this private property (defined in H5Fprivate.h) available to h5clear. */
#define H5F_ACS_CLEAR_STATUS_FLAGS_NAME "clear_status_flags"
+static char *fname_g = NULL;
+static hbool_t clear_status_flags = FALSE;
+static hbool_t remove_cache_image = FALSE;
+
+/*
+ * Command-line options: The user can specify short or long-named
+ * parameters.
+ */
+static const char *s_opts = "hVsm";
+static struct long_options l_opts[] = {
+ { "help", no_arg, 'h' },
+ { "hel", no_arg, 'h'},
+ { "he", no_arg, 'h'},
+ { "version", no_arg, 'V' },
+ { "version", no_arg, 'V' },
+ { "versio", no_arg, 'V' },
+ { "versi", no_arg, 'V' },
+ { "vers", no_arg, 'V' },
+ { "status", no_arg, 's' },
+ { "statu", no_arg, 's' },
+ { "stat", no_arg, 's' },
+ { "sta", no_arg, 's' },
+ { "st", no_arg, 's' },
+ { "image", no_arg, 'm' },
+ { "imag", no_arg, 'm' },
+ { "ima", no_arg, 'm' },
+ { "im", no_arg, 'm' },
+ { NULL, 0, '\0' }
+};
+
+
+
/*-------------------------------------------------------------------------
- * Function: leave
+ * Function: usage
*
- * Purpose: Close the tools library and exit
+ * Purpose: Prints a usage message
*
- * Return: Does not return
+ * Return: void
*
*-------------------------------------------------------------------------
*/
-static void
-leave(int ret)
+static void usage(const char *prog)
{
- h5tools_close();
- HDexit(ret);
+ HDfprintf(stdout, "usage: %s [OPTIONS] file_name\n", prog);
+ HDfprintf(stdout, " OPTIONS\n");
+ HDfprintf(stdout, " -h, --help Print a usage message and exit\n");
+ HDfprintf(stdout, " -V, --version Print version number and exit\n");
+ HDfprintf(stdout, " -s, --status Clear the status_flags field in the file's superblock\n");
+ HDfprintf(stdout, " -m, --image Remove the metadata cache image from the file\n");
+ HDfprintf(stdout, "\n");
+ HDfprintf(stdout, "Examples of use:\n");
+ HDfprintf(stdout, "\n");
+ HDfprintf(stdout, "h5clear -s file_name\n");
+ HDfprintf(stdout, " Clear the status_flags field in the superblock of the HDF5 file <file_name>.\n");
+ HDfprintf(stdout, "\n");
+ HDfprintf(stdout, "h5clear -m file_name\n");
+ HDfprintf(stdout, " Remove the metadata cache image from the HDF5 file <file_name>.\n");
+} /* usage() */
-} /* leave() */
+
+/*-------------------------------------------------------------------------
+ * Function: parse_command_line
+ *
+ * Purpose: Parses command line and sets up global variable to control output
+ *
+ * Return: Success: 0
+ *
+ * Failure: -1
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+parse_command_line(int argc, const char **argv)
+{
+ int opt;
+
+ /* no arguments */
+ if (argc == 1) {
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+ }
+
+ /* parse command line options */
+ while((opt = get_option(argc, argv, s_opts, l_opts)) != EOF) {
+ switch((char)opt) {
+ case 'h':
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_SUCCESS);
+ goto done;
+
+ case 'V':
+ print_version(h5tools_getprogname());
+ h5tools_setstatus(EXIT_SUCCESS);
+ goto done;
+
+ case 's':
+ clear_status_flags = TRUE;
+ break;
+
+ case 'm':
+ remove_cache_image = TRUE;
+ break;
+
+ default:
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+ } /* end switch */
+ } /* end while */
+
+ /* check for file name to be processed */
+ if(argc <= opt_ind) {
+ error_msg("missing file name\n");
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_FAILURE);
+ goto error;
+ } /* end if */
+
+ fname_g = HDstrdup(argv[opt_ind]);
+
+done:
+ return(0);
+
+error:
+ return -1;
+}
/*-------------------------------------------------------------------------
- * Function: usage
+ * Function: leave
*
- * Purpose: Prints a usage message
+ * Purpose: Close the tools library and exit
*
- * Return: void
+ * Return: Does not return
*
*-------------------------------------------------------------------------
*/
static void
-usage(void)
+leave(int ret)
{
- HDfprintf(stdout, "usage: h5clear filename\n");
-
-} /* usage() */
+ h5tools_close();
+ HDexit(ret);
+} /* leave() */
/*-------------------------------------------------------------------------
- * Function: main
- *
- * Purpose:
+ * Function: main
*
- * Return: Success:
- * Failure:
+ * Purpose: To clear the status_flags field in the file's superblock (-s option).
+ * To remove the cache image from the file (-m option).
*
- * Programmer:
+ * Return: Success: 0
+ * Failure: 1
*
*-------------------------------------------------------------------------
*/
int
-main (int argc, char *argv[])
+main (int argc, const char *argv[])
{
- char *fname; /* File name */
- hbool_t clear = TRUE; /* To clear the status_flags in the file's superblock */
- hid_t fapl = -1; /* File access property list */
- hid_t fid = -1; /* File ID */
+ char *fname = NULL; /* File name */
+ hid_t fapl = -1; /* File access property list */
+ hid_t fid = -1; /* File ID */
+ haddr_t image_addr;
+ hsize_t image_len;
h5tools_setprogname(PROGRAMNAME);
h5tools_setstatus(EXIT_SUCCESS);
-
+
/* Disable the HDF5 library's error reporting */
H5Eset_auto2(H5E_DEFAULT, NULL, NULL);
/* initialize h5tools lib */
h5tools_init();
- /* Check for the # of arguments */
- if(argc != 2) {
- usage();
- leave(EXIT_FAILURE);
+ /* Parse command line options */
+ if(parse_command_line(argc, argv) < 0)
+ goto done;
+
+ if(fname_g == NULL)
+ goto done;
+
+ if(!clear_status_flags && !remove_cache_image) {
+ usage(h5tools_getprogname());
+ h5tools_setstatus(EXIT_FAILURE);
+ goto done;
}
/* Duplicate the file name */
- fname = HDstrdup(argv[opt_ind]);
+ fname = HDstrdup(fname_g);
/* Get a copy of the file access property list */
if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0) {
error_msg("H5Pcreate\n");
- exit(EXIT_FAILURE);
- }
+ h5tools_setstatus(EXIT_FAILURE);
+ goto done;
+ }
- /* Set to clear the status_flags in the file's superblock */
- /* This is a private property used by h5clear only */
- if(H5Pset(fapl, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, &clear) < 0) {
- error_msg("H5Pset\n");
- exit(EXIT_FAILURE);
+ /* -s option */
+ if(clear_status_flags) {
+ /* Set to clear the status_flags in the file's superblock */
+ /* This is a private property used by h5clear only */
+ if(H5Pset(fapl, H5F_ACS_CLEAR_STATUS_FLAGS_NAME, &clear_status_flags) < 0) {
+ error_msg("H5Pset\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ goto done;
+ }
}
if((fid = h5tools_fopen(fname, H5F_ACC_RDWR, fapl, NULL, NULL, (size_t)0)) < 0) {
- error_msg("h5tools_fopen\n");
- exit(EXIT_FAILURE);
+ error_msg("h5tools_fopen\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ goto done;
}
- /* Close the file */
- if(H5Fclose(fid) < 0) {
- error_msg("H5Fclose\n");
- exit(EXIT_FAILURE);
- }
+ /* -m option */
+ if(remove_cache_image) {
+ if(H5Fget_mdc_image_info(fid, &image_addr, &image_len) < 0) {
+ error_msg("H5Fget_mdc_image_info\n");
+ h5tools_setstatus(EXIT_FAILURE);
+ goto done;
+ }
+ if(image_addr == HADDR_UNDEF && image_len == 0)
+ warn_msg("No cache image in the file\n");
+ }
- /* CLose the property list */
- if(H5Pclose(fapl) < 0) {
- error_msg("H5Pclose\n");
- exit(EXIT_FAILURE);
- }
+ h5tools_setstatus(EXIT_SUCCESS);
+done:
+ if(fname)
+ HDfree(fname);
+ if(fname_g)
+ HDfree(fname_g);
+
+ H5E_BEGIN_TRY {
+ H5Pclose(fapl);
+ H5Fclose(fid);
+ } H5E_END_TRY
- return EXIT_SUCCESS;
+ leave(h5tools_getstatus());
} /* main() */
+
diff --git a/tools/src/misc/h5debug.c b/tools/src/misc/h5debug.c
index ae64952..72f826e 100644
--- a/tools/src/misc/h5debug.c
+++ b/tools/src/misc/h5debug.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*-------------------------------------------------------------------------
diff --git a/tools/src/misc/h5mkgrp.c b/tools/src/misc/h5mkgrp.c
index ad2d306..597b6b3 100644
--- a/tools/src/misc/h5mkgrp.c
+++ b/tools/src/misc/h5mkgrp.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
@@ -328,9 +326,6 @@ main(int argc, const char *argv[])
leave(EXIT_FAILURE);
} /* end if */
- /* Shut down h5tools lib */
- h5tools_close();
-
leave(EXIT_SUCCESS);
} /* end main() */
diff --git a/tools/src/misc/h5redeploy.in b/tools/src/misc/h5redeploy.in
index 6b6ef87..242459a 100644
--- a/tools/src/misc/h5redeploy.in
+++ b/tools/src/misc/h5redeploy.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
## Update HDF5 compiler tools after the HDF5 software has been installed ##
diff --git a/tools/src/misc/h5repart.c b/tools/src/misc/h5repart.c
index e44c957..911e0c6 100644
--- a/tools/src/misc/h5repart.c
+++ b/tools/src/misc/h5repart.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/CMakeLists.txt b/tools/test/CMakeLists.txt
index 608dd6e..a7dd11f 100644
--- a/tools/test/CMakeLists.txt
+++ b/tools/test/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST)
#-----------------------------------------------------------------------------
diff --git a/tools/test/Makefile.am b/tools/test/Makefile.am
index 62193ba..19bb047 100644
--- a/tools/test/Makefile.am
+++ b/tools/test/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -24,7 +22,7 @@ include $(top_srcdir)/config/commence.am
CONFIG=ordered
# All subdirectories
-SUBDIRS=h5diff h5ls h5dump misc h5import h5repack h5jam h5copy h5stat \
- perform
+SUBDIRS=h5diff h5ls h5dump misc h5import h5repack h5jam h5copy \
+ h5format_convert h5stat perform
include $(top_srcdir)/config/conclude.am
diff --git a/tools/test/h5copy/CMakeLists.txt b/tools/test/h5copy/CMakeLists.txt
index 5bab57b..4a519ab 100644
--- a/tools/test/h5copy/CMakeLists.txt
+++ b/tools/test/h5copy/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5COPY)
#-----------------------------------------------------------------------------
@@ -18,6 +18,6 @@ INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
set_target_properties (h5copygentest PROPERTIES FOLDER generator/tools)
#add_test (NAME h5copygentest COMMAND $<TARGET_FILE:h5copygentest>)
- endif (HDF5_BUILD_GENERATORS)
+ endif ()
include (CMakeTests.cmake)
diff --git a/tools/test/h5copy/CMakeTests.cmake b/tools/test/h5copy/CMakeTests.cmake
index fe36ca9..2536ef3 100644
--- a/tools/test/h5copy/CMakeTests.cmake
+++ b/tools/test/h5copy/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -24,7 +35,7 @@
foreach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
get_filename_component(fname "${listfiles}" NAME)
HDFTEST_COPY_FILE("${listfiles}" "${PROJECT_BINARY_DIR}/testfiles/${fname}" "h5copy_files")
- endforeach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
+ endforeach ()
add_custom_target(h5copy_files ALL COMMENT "Copying files needed by h5copy tests" DEPENDS ${h5copy_files_list})
##############################################################################
@@ -36,7 +47,7 @@
#
# Perform h5copy according to passing parmeters
#
- MACRO (ADD_H5_F_TEST testname resultcode infile fparam vparam sparam srcname dparam dstname)
+ macro (ADD_H5_F_TEST testname resultcode infile fparam vparam sparam srcname dparam dstname)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
# Remove any output file left over from previous test run
add_test (
@@ -45,7 +56,7 @@
-E remove
./testfiles/${testname}.out.h5
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
add_test (
NAME H5COPY_F-${testname}
@@ -54,10 +65,10 @@
if (HDF5_ENABLE_USING_MEMCHECKER)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5COPY_F-${testname} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
set_tests_properties (H5COPY_F-${testname} PROPERTIES DEPENDS H5COPY_F-${testname}-clear-objects)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# resultcode=2 will cause the test to skip the diff test
if (NOT ${resultcode} STREQUAL "2")
@@ -68,11 +79,11 @@
SET_TESTS_PROPERTIES(H5COPY_F-${testname}-DIFF PROPERTIES DEPENDS H5COPY_F-${testname})
if (${resultcode} STREQUAL "1")
set_tests_properties (H5COPY_F-${testname}-DIFF PROPERTIES WILL_FAIL "true")
- endif (${resultcode} STREQUAL "1")
- endif (NOT ${resultcode} STREQUAL "2")
- ENDMACRO (ADD_H5_F_TEST)
+ endif ()
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST testname resultcode infile vparam sparam srcname dparam dstname)
+ macro (ADD_H5_TEST testname resultcode infile vparam sparam srcname dparam dstname)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
# Remove any output file left over from previous test run
add_test (
@@ -81,7 +92,7 @@
-E remove
./testfiles/${testname}.out.h5
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
add_test (
NAME H5COPY-${testname}
@@ -90,10 +101,10 @@
if (HDF5_ENABLE_USING_MEMCHECKER)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5COPY-${testname} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
set_tests_properties (H5COPY-${testname} PROPERTIES DEPENDS H5COPY-${testname}-clear-objects)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# resultcode=2 will cause the test to skip the diff test
if (NOT ${resultcode} STREQUAL "2")
@@ -104,11 +115,20 @@
SET_TESTS_PROPERTIES(H5COPY-${testname}-DIFF PROPERTIES DEPENDS H5COPY-${testname})
if (${resultcode} STREQUAL "1")
set_tests_properties (H5COPY-${testname}-DIFF PROPERTIES WILL_FAIL "true")
- endif (${resultcode} STREQUAL "1")
- endif (NOT ${resultcode} STREQUAL "2")
- ENDMACRO (ADD_H5_TEST)
+ endif ()
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST2 testname resultcode infile psparam pdparam vparam sparam srcname dparam dstname)
+ macro (ADD_SKIP_H5_TEST testname skipresultfile)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (
+ NAME H5COPY-${testname}-${skipresultfile}-SKIPPED
+ COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${testname}-${skipresultfile} ${ARGN}"
+ )
+ endif ()
+ endmacro ()
+
+ macro (ADD_H5_TEST2 testname resultcode infile psparam pdparam vparam sparam srcname dparam dstname)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
# Remove any output file left over from previous test run
add_test (
@@ -117,7 +137,7 @@
-E remove
./testfiles/${testname}.out.h5
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
add_test (
NAME H5COPY-${testname}-prefill
@@ -126,10 +146,10 @@
if (HDF5_ENABLE_USING_MEMCHECKER)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5COPY-${testname}-prefill PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
set_tests_properties (H5COPY-${testname}-prefill PROPERTIES DEPENDS H5COPY-${testname}-clear-objects)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
add_test (
NAME H5COPY-${testname}
@@ -145,11 +165,11 @@
SET_TESTS_PROPERTIES(H5COPY-${testname}-DIFF PROPERTIES DEPENDS H5COPY-${testname})
if (${resultcode} STREQUAL "1")
set_tests_properties (H5COPY-${testname}-DIFF PROPERTIES WILL_FAIL "true")
- endif (${resultcode} STREQUAL "1")
- endif (NOT ${resultcode} STREQUAL "2")
- ENDMACRO (ADD_H5_TEST2)
+ endif ()
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST_SAME testname resultcode pfile psparam pdparam vparam sparam srcname dparam dstname)
+ macro (ADD_H5_TEST_SAME testname resultcode pfile psparam pdparam vparam sparam srcname dparam dstname)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
# Remove any output file left over from previous test run
add_test (
@@ -158,7 +178,7 @@
-E remove
./testfiles/${testname}.out.h5
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
add_test (
NAME H5COPY_SAME-${testname}-prefill
@@ -167,10 +187,10 @@
if (HDF5_ENABLE_USING_MEMCHECKER)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5COPY_SAME-${testname}-prefill PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
else (HDF5_ENABLE_USING_MEMCHECKER)
set_tests_properties (H5COPY_SAME-${testname}-prefill PROPERTIES DEPENDS H5COPY_SAME-${testname}-clear-objects)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
add_test (
NAME H5COPY_SAME-${testname}
@@ -186,25 +206,25 @@
SET_TESTS_PROPERTIES(H5COPY_SAME-${testname}-DIFF PROPERTIES DEPENDS H5COPY_SAME-${testname})
if (${resultcode} STREQUAL "1")
set_tests_properties (H5COPY_SAME-${testname}-DIFF PROPERTIES WILL_FAIL "true")
- endif (${resultcode} STREQUAL "1")
- endif (NOT ${resultcode} STREQUAL "2")
- ENDMACRO (ADD_H5_TEST_SAME)
+ endif ()
+ endif ()
+ endmacro ()
#
# Similiar to ADD_H5_TEST macro. Compare to outputs from source & target
# files instead of checking with h5ls.
#
- MACRO (ADD_H5_CMP_TEST testname resultcode infile vparam sparam srcname dparam dstname)
+ macro (ADD_H5_CMP_TEST testname resultcode infile vparam sparam srcname dparam dstname)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5COPY-CMP-${testname} COMMAND $<TARGET_FILE:h5copy> -i ./testfiles/${infile} -o ./testfiles/${testname}.out.h5 ${vparam} ${sparam} ${srcname} ${dparam} ${dstname} ${ARGN})
if (${resultcode} STREQUAL "1")
set_tests_properties (H5COPY-CMP-${testname} PROPERTIES WILL_FAIL "true")
- endif (${resultcode} STREQUAL "1")
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5COPY-CMP-${testname} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
# Remove any output file left over from previous test run
add_test (
NAME H5COPY-CMP-${testname}-clear-objects
@@ -225,8 +245,8 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5COPY-CMP-${testname} PROPERTIES DEPENDS H5COPY-CMP-${testname}-clear-objects)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_CMP_TEST)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -286,18 +306,38 @@
set_tests_properties (H5COPY-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5COPY-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5COPY-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+
+# See which filters are usable (and skip tests for filters we
+# don't have). Do this by searching H5pubconf.h to see which
+# filters are defined.
+
+# detect whether the encoder is present.
+ if (H5_HAVE_FILTER_DEFLATE)
+ set (USE_FILTER_DEFLATE "true")
+ endif ()
+
+ if (H5_HAVE_FILTER_SZIP)
+ set (USE_FILTER_SZIP "true")
+ endif ()
# "Test copying various forms of datasets"
ADD_H5_TEST (simple 0 ${HDF_FILE1}.h5 -v -s simple -d simple)
ADD_H5_TEST (chunk 0 ${HDF_FILE1}.h5 -v -s chunk -d chunk)
ADD_H5_TEST (compact 0 ${HDF_FILE1}.h5 -v -s compact -d compact)
ADD_H5_TEST (compound 0 ${HDF_FILE1}.h5 -v -s compound -d compound)
- ADD_H5_TEST (compressed 0 ${HDF_FILE1}.h5 -v -s compressed -d compressed)
+
+ if (USE_FILTER_DEFLATE)
+ ADD_H5_TEST (compressed 0 ${HDF_FILE1}.h5 -v -s compressed -d compressed)
+ else ()
+ ADD_H5_TEST (compressed 2 ${HDF_FILE1}.h5 -v -s compressed -d compressed)
+ endif ()
+
ADD_H5_TEST (named_vl 0 ${HDF_FILE1}.h5 -v -s named_vl -d named_vl)
ADD_H5_TEST (nested_vl 0 ${HDF_FILE1}.h5 -v -s nested_vl -d nested_vl)
+ ADD_H5_TEST (dset_attr 0 ${HDF_FILE1}.h5 -v -s dset_attr -d dset_attr)
# "Test copying dataset within group in source file to root of destination"
ADD_H5_TEST (simple_top 0 ${HDF_FILE1}.h5 -v -s grp_dsets/simple -d simple_top)
@@ -307,24 +347,41 @@
# "Test copying empty, 'full' & 'nested' groups"
ADD_H5_TEST (grp_empty 0 ${HDF_FILE1}.h5 -v -s grp_empty -d grp_empty)
- ADD_H5_TEST (grp_dsets 0 ${HDF_FILE1}.h5 -v -s grp_dsets -d grp_dsets)
- ADD_H5_TEST (grp_nested 0 ${HDF_FILE1}.h5 -v -s grp_nested -d grp_nested)
+ if (USE_FILTER_DEFLATE)
+ ADD_H5_TEST (grp_dsets 0 ${HDF_FILE1}.h5 -v -s grp_dsets -d grp_dsets)
+ ADD_H5_TEST (grp_nested 0 ${HDF_FILE1}.h5 -v -s grp_nested -d grp_nested)
+ else ()
+ ADD_H5_TEST (grp_dsets 2 ${HDF_FILE1}.h5 -v -s grp_dsets -d grp_dsets)
+ ADD_H5_TEST (grp_nested 2 ${HDF_FILE1}.h5 -v -s grp_nested -d grp_nested)
+ endif ()
+ ADD_H5_TEST (grp_attr 0 ${HDF_FILE1}.h5 -v -s grp_attr -d grp_attr)
# "Test copying dataset within group in source file to group in destination"
ADD_H5_TEST2 (simple_group 0 ${HDF_FILE1}.h5 grp_dsets grp_dsets -v -s /grp_dsets/simple -d /grp_dsets/simple_group)
- # "Test copying & renaming group"
- ADD_H5_TEST (grp_rename 0 ${HDF_FILE1}.h5 -v -s grp_dsets -d grp_rename)
-
- # "Test copying 'full' group hierarchy into group in destination file"
- ADD_H5_TEST2 (grp_dsets_rename 0 ${HDF_FILE1}.h5 grp_dsets grp_rename -v -s grp_dsets -d /grp_rename/grp_dsets)
+ if (USE_FILTER_DEFLATE)
+ # "Test copying & renaming group"
+ ADD_H5_TEST (grp_rename 0 ${HDF_FILE1}.h5 -v -s grp_dsets -d grp_rename)
+ # "Test copying 'full' group hierarchy into group in destination file"
+ ADD_H5_TEST2 (grp_dsets_rename 0 ${HDF_FILE1}.h5 grp_dsets grp_rename -v -s grp_dsets -d /grp_rename/grp_dsets)
+ else ()
+ # "Test copying & renaming group"
+ ADD_H5_TEST (grp_rename 2 ${HDF_FILE1}.h5 -v -s grp_dsets -d grp_rename)
+ # "Test copying 'full' group hierarchy into group in destination file"
+ ADD_H5_TEST2 (grp_dsets_rename 2 ${HDF_FILE1}.h5 grp_dsets grp_rename -v -s grp_dsets -d /grp_rename/grp_dsets)
+ endif ()
# "Test copying objects into group hier. that doesn't exist yet in destination file"
ADD_H5_TEST (A_B1_simple 0 ${HDF_FILE1}.h5 -vp -s simple -d /A/B1/simple)
ADD_H5_TEST (A_B2_simple2 0 ${HDF_FILE1}.h5 -vp -s simple -d /A/B2/simple2)
ADD_H5_TEST (C_D_simple 0 ${HDF_FILE1}.h5 -vp -s /grp_dsets/simple -d /C/D/simple)
- ADD_H5_TEST (E_F_grp_dsets 0 ${HDF_FILE1}.h5 -vp -s /grp_dsets -d /E/F/grp_dsets)
- ADD_H5_TEST (G_H_grp_nested 0 ${HDF_FILE1}.h5 -vp -s /grp_nested -d /G/H/grp_nested)
+ if (USE_FILTER_DEFLATE)
+ ADD_H5_TEST (E_F_grp_dsets 0 ${HDF_FILE1}.h5 -vp -s /grp_dsets -d /E/F/grp_dsets)
+ ADD_H5_TEST (G_H_grp_nested 0 ${HDF_FILE1}.h5 -vp -s /grp_nested -d /G/H/grp_nested)
+ else ()
+ ADD_H5_TEST (E_F_grp_dsets 2 ${HDF_FILE1}.h5 -vp -s /grp_dsets -d /E/F/grp_dsets)
+ ADD_H5_TEST (G_H_grp_nested 2 ${HDF_FILE1}.h5 -vp -s /grp_nested -d /G/H/grp_nested)
+ endif ()
############# COPY REFERENCES ##############
@@ -370,4 +427,8 @@
# - dataset
ADD_H5_TEST_SAME (samefile1 0 ${HDF_FILE1}.h5 /simple /simple -v -s /simple -d /simple_cp)
# - group with some datasets
- ADD_H5_TEST_SAME (samefile2 0 ${HDF_FILE1}.h5 /grp_dsets /grp_dsets -v -s /grp_dsets -d /grp_dsets_cp)
+ if (USE_FILTER_DEFLATE)
+ ADD_H5_TEST_SAME (samefile2 0 ${HDF_FILE1}.h5 /grp_dsets /grp_dsets -v -s /grp_dsets -d /grp_dsets_cp)
+ else ()
+ ADD_H5_TEST_SAME (samefile2 2 ${HDF_FILE1}.h5 /grp_dsets /grp_dsets -v -s /grp_dsets -d /grp_dsets_cp)
+ endif ()
diff --git a/tools/test/h5copy/Makefile.am b/tools/test/h5copy/Makefile.am
index becb55f..5fda4a0 100644
--- a/tools/test/h5copy/Makefile.am
+++ b/tools/test/h5copy/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/test/h5copy/h5copygentest.c b/tools/test/h5copy/h5copygentest.c
index 7669702..bda0686 100644
--- a/tools/test/h5copy/h5copygentest.c
+++ b/tools/test/h5copy/h5copygentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -35,12 +33,12 @@
#define DATASET_COMPRESSED "compressed"
#define DATASET_NAMED_VL "named_vl"
#define DATASET_NESTED_VL "nested_vl"
-#define DATASET_ATTR "dset_attr"
-#define ATTR "attr"
+#define DATASET_ATTR "dset_attr"
+#define ATTR "attr"
#define GROUP_EMPTY "grp_empty"
#define GROUP_DATASETS "grp_dsets"
#define GROUP_NESTED "grp_nested"
-#define GROUP_ATTR "grp_attr"
+#define GROUP_ATTR "grp_attr"
/* Obj reference */
#define OBJ_REF_DS "Dset1"
@@ -329,8 +327,8 @@ static void gent_nested_vl(hid_t loc_id)
* Function: gent_att_compound_vlstr
*
* Purpose: Generate a dataset and a group.
- * Both has an attribute with a compound datatype consisting
- * of a variable length string
+ * Both has an attribute with a compound datatype consisting
+ * of a variable length string
*
*-------------------------------------------------------------------------
*/
@@ -349,7 +347,7 @@ static void gent_att_compound_vlstr(hid_t loc_id)
hid_t vl_str_tid = -1; /* Variable length datatype ID */
hid_t cmpd_tid = -1; /* Compound datatype ID */
hid_t null_sid = -1; /* Null dataspace ID */
- s1 buf; /* Buffer */
+ s1 buf; /* Buffer */
buf.i = 9;
buf.v = "ThisIsAString";
diff --git a/tools/test/h5copy/testfiles/h5copy_extlinks_src.out.ls b/tools/test/h5copy/testfiles/h5copy_extlinks_src.out.ls
index 0134714..af499a5 100644
--- a/tools/test/h5copy/testfiles/h5copy_extlinks_src.out.ls
+++ b/tools/test/h5copy/testfiles/h5copy_extlinks_src.out.ls
@@ -8,7 +8,7 @@ Opened "./testfiles/h5copy_extlinks_src.out.h5" with sec2 driver.
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/copy1_group Group
- Location: 1:4696
+ Location: 1:4600
Links: 1
/copy1_group/extlink_datatype External Link {h5copy_extlinks_trg.h5//datatype}
/copy1_group/extlink_dset External Link {h5copy_extlinks_trg.h5//simple}
@@ -16,24 +16,24 @@ Opened "./testfiles/h5copy_extlinks_src.out.h5" with sec2 driver.
/copy1_group/extlink_notyet1 External Link {h5copy_extlinks_trg.h5//notyet}
/copy1_group/extlink_notyet2 External Link {notyet_file.h5//notyet}
/copy2_dset Dataset {6/6}
- Location: 1:4216
+ Location: 1:4120
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/copy2_group Group
- Location: 1:5128
+ Location: 1:5032
Links: 1
/copy2_group/extlink_datatype Type
- Location: 1:6328
+ Location: 1:6232
Links: 1
- Type: shared-1:6328 32-bit little-endian integer
+ Type: shared-1:6232 32-bit little-endian integer
/copy2_group/extlink_dset Dataset {6/6}
- Location: 1:5496
+ Location: 1:5400
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/copy2_group/extlink_grp Group
- Location: 1:6288
+ Location: 1:6192
Links: 1
/copy2_group/extlink_notyet1 External Link {h5copy_extlinks_trg.h5//notyet}
/copy2_group/extlink_notyet2 External Link {notyet_file.h5//notyet}
diff --git a/tools/test/h5copy/testfiles/h5copy_ref.out.ls b/tools/test/h5copy/testfiles/h5copy_ref.out.ls
index d685af2..f5b31a7 100644
--- a/tools/test/h5copy/testfiles/h5copy_ref.out.ls
+++ b/tools/test/h5copy/testfiles/h5copy_ref.out.ls
@@ -11,21 +11,21 @@ Opened "./testfiles/h5copy_ref.out.h5" with sec2 driver.
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/COPY/Dset2 Dataset {3/3, 16/16}
- Location: 1:1960
+ Location: 1:4096
Links: 3
Storage: <details removed for portability>
Type: 8-bit integer
/COPY/Dset_OBJREF Dataset {2/2}
- Location: 1:5184
+ Location: 1:5224
Links: 1
Storage: <details removed for portability>
Type: object reference
/COPY/Dset_REGREF Dataset {2/2}
- Location: 1:5304
+ Location: 1:5344
Links: 1
Storage: <details removed for portability>
Type: dataset region reference
/COPY/Group Group
- Location: 1:2096
+ Location: 1:1960
Links: 3
-/~obj_pointed_by_2096 Group, same as /COPY/Group
+/~obj_pointed_by_1960 Group, same as /COPY/Group
diff --git a/tools/test/h5copy/testfiles/h5copytst.h5 b/tools/test/h5copy/testfiles/h5copytst.h5
index 1d1cbf1..474c3da 100644
--- a/tools/test/h5copy/testfiles/h5copytst.h5
+++ b/tools/test/h5copy/testfiles/h5copytst.h5
Binary files differ
diff --git a/tools/test/h5copy/testfiles/h5copytst.out.ls b/tools/test/h5copy/testfiles/h5copytst.out.ls
index 4044aaf..629915e 100644
--- a/tools/test/h5copy/testfiles/h5copytst.out.ls
+++ b/tools/test/h5copy/testfiles/h5copytst.out.ls
@@ -3,57 +3,57 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
Location: 1:96
Links: 1
/A Group
- Location: 1:84304
+ Location: 1:84208
Links: 1
/A/B1 Group
- Location: 1:85008
+ Location: 1:84912
Links: 1
/A/B1/simple Dataset {6/6}
- Location: 1:84176
+ Location: 1:84080
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/A/B2 Group
- Location: 1:88544
+ Location: 1:88448
Links: 1
/A/B2/simple2 Dataset {6/6}
- Location: 1:88416
+ Location: 1:88320
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/C Group
- Location: 1:91752
+ Location: 1:91656
Links: 1
/C/D Group
- Location: 1:92456
+ Location: 1:92360
Links: 1
/C/D/simple Dataset {6/6}
- Location: 1:91624
+ Location: 1:91528
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/E Group
- Location: 1:106368
+ Location: 1:106272
Links: 1
/E/F Group
- Location: 1:107072
+ Location: 1:106976
Links: 1
/E/F/grp_dsets Group
- Location: 1:94568
+ Location: 1:94472
Links: 1
/E/F/grp_dsets/chunk Dataset {6/6}
- Location: 1:98752
+ Location: 1:98656
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/E/F/grp_dsets/compact Dataset {6/6}
- Location: 1:99208
+ Location: 1:99112
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/E/F/grp_dsets/compound Dataset {2/2}
- Location: 1:99344
+ Location: 1:99248
Links: 1
Storage: <details removed for portability>
Type: struct {
@@ -61,60 +61,60 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
"str2" +20 20-byte null-terminated ASCII string
} 40 bytes
/E/F/grp_dsets/compressed Dataset {6/6}
- Location: 1:101656
+ Location: 1:101560
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Filter-0: deflate-1 OPT {1}
Type: 32-bit little-endian integer
/E/F/grp_dsets/named_vl Dataset {2/2}
- Location: 1:105920
+ Location: 1:105824
Links: 1
Storage: <details removed for portability>
- Type: shared-1:106048 variable length of
+ Type: shared-1:105952 variable length of
32-bit little-endian integer
/E/F/grp_dsets/nested_vl Dataset {2/2}
- Location: 1:106096
+ Location: 1:106000
Links: 1
Storage: <details removed for portability>
Type: variable length of
variable length of
32-bit little-endian integer
/E/F/grp_dsets/simple Dataset {6/6}
- Location: 1:106240
+ Location: 1:106144
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/E/F/grp_dsets/vl Type
- Location: 1:106048
+ Location: 1:105952
Links: 2
- Type: shared-1:106048 variable length of
+ Type: shared-1:105952 variable length of
32-bit little-endian integer
/G Group
- Location: 1:122016
+ Location: 1:121920
Links: 1
/G/H Group
- Location: 1:122720
+ Location: 1:122624
Links: 1
/G/H/grp_nested Group
- Location: 1:109096
+ Location: 1:109000
Links: 1
/G/H/grp_nested/grp_dsets Group
- Location: 1:109888
+ Location: 1:109792
Links: 1
/G/H/grp_nested/grp_dsets/chunk Dataset {6/6}
- Location: 1:114072
+ Location: 1:113976
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/G/H/grp_nested/grp_dsets/compact Dataset {6/6}
- Location: 1:114528
+ Location: 1:114432
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/G/H/grp_nested/grp_dsets/compound Dataset {2/2}
- Location: 1:114664
+ Location: 1:114568
Links: 1
Storage: <details removed for portability>
Type: struct {
@@ -122,48 +122,48 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
"str2" +20 20-byte null-terminated ASCII string
} 40 bytes
/G/H/grp_nested/grp_dsets/compressed Dataset {6/6}
- Location: 1:116976
+ Location: 1:116880
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Filter-0: deflate-1 OPT {1}
Type: 32-bit little-endian integer
/G/H/grp_nested/grp_dsets/named_vl Dataset {2/2}
- Location: 1:121240
+ Location: 1:121144
Links: 1
Storage: <details removed for portability>
- Type: shared-1:121368 variable length of
+ Type: shared-1:121272 variable length of
32-bit little-endian integer
/G/H/grp_nested/grp_dsets/nested_vl Dataset {2/2}
- Location: 1:121416
+ Location: 1:121320
Links: 1
Storage: <details removed for portability>
Type: variable length of
variable length of
32-bit little-endian integer
/G/H/grp_nested/grp_dsets/simple Dataset {6/6}
- Location: 1:121560
+ Location: 1:121464
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/G/H/grp_nested/grp_dsets/vl Type
- Location: 1:121368
+ Location: 1:121272
Links: 2
- Type: shared-1:121368 variable length of
+ Type: shared-1:121272 variable length of
32-bit little-endian integer
/chunk Dataset {6/6}
- Location: 1:6312
+ Location: 1:6216
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/compact Dataset {6/6}
- Location: 1:6440
+ Location: 1:6344
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/compound Dataset {2/2}
- Location: 1:8624
+ Location: 1:8528
Links: 1
Storage: <details removed for portability>
Type: struct {
@@ -171,28 +171,28 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
"str2" +20 20-byte null-terminated ASCII string
} 40 bytes
/compressed Dataset {6/6}
- Location: 1:12984
+ Location: 1:12888
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Filter-0: deflate-1 OPT {1}
Type: 32-bit little-endian integer
/grp_dsets Group
- Location: 1:28128
+ Location: 1:28032
Links: 1
/grp_dsets/chunk Dataset {6/6}
- Location: 1:32312
+ Location: 1:32216
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_dsets/compact Dataset {6/6}
- Location: 1:32768
+ Location: 1:32672
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_dsets/compound Dataset {2/2}
- Location: 1:32904
+ Location: 1:32808
Links: 1
Storage: <details removed for portability>
Type: struct {
@@ -200,62 +200,62 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
"str2" +20 20-byte null-terminated ASCII string
} 40 bytes
/grp_dsets/compressed Dataset {6/6}
- Location: 1:35216
+ Location: 1:35120
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Filter-0: deflate-1 OPT {1}
Type: 32-bit little-endian integer
/grp_dsets/named_vl Dataset {2/2}
- Location: 1:39480
+ Location: 1:39384
Links: 1
Storage: <details removed for portability>
- Type: shared-1:39608 variable length of
+ Type: shared-1:39512 variable length of
32-bit little-endian integer
/grp_dsets/nested_vl Dataset {2/2}
- Location: 1:39656
+ Location: 1:39560
Links: 1
Storage: <details removed for portability>
Type: variable length of
variable length of
32-bit little-endian integer
/grp_dsets/simple Dataset {6/6}
- Location: 1:39800
+ Location: 1:39704
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_dsets/simple_group Dataset {6/6}
- Location: 1:55912
+ Location: 1:55816
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_dsets/vl Type
- Location: 1:39608
+ Location: 1:39512
Links: 2
- Type: shared-1:39608 variable length of
+ Type: shared-1:39512 variable length of
32-bit little-endian integer
/grp_empty Group
- Location: 1:27336
+ Location: 1:27240
Links: 1
/grp_nested Group
- Location: 1:40592
+ Location: 1:40496
Links: 1
/grp_nested/grp_dsets Group
- Location: 1:41384
+ Location: 1:41288
Links: 1
/grp_nested/grp_dsets/chunk Dataset {6/6}
- Location: 1:45568
+ Location: 1:45472
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_nested/grp_dsets/compact Dataset {6/6}
- Location: 1:46024
+ Location: 1:45928
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_nested/grp_dsets/compound Dataset {2/2}
- Location: 1:46160
+ Location: 1:46064
Links: 1
Storage: <details removed for portability>
Type: struct {
@@ -263,51 +263,51 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
"str2" +20 20-byte null-terminated ASCII string
} 40 bytes
/grp_nested/grp_dsets/compressed Dataset {6/6}
- Location: 1:48472
+ Location: 1:48376
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Filter-0: deflate-1 OPT {1}
Type: 32-bit little-endian integer
/grp_nested/grp_dsets/named_vl Dataset {2/2}
- Location: 1:52736
+ Location: 1:52640
Links: 1
Storage: <details removed for portability>
- Type: shared-1:52864 variable length of
+ Type: shared-1:52768 variable length of
32-bit little-endian integer
/grp_nested/grp_dsets/nested_vl Dataset {2/2}
- Location: 1:52912
+ Location: 1:52816
Links: 1
Storage: <details removed for portability>
Type: variable length of
variable length of
32-bit little-endian integer
/grp_nested/grp_dsets/simple Dataset {6/6}
- Location: 1:53056
+ Location: 1:52960
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_nested/grp_dsets/vl Type
- Location: 1:52864
+ Location: 1:52768
Links: 2
- Type: shared-1:52864 variable length of
+ Type: shared-1:52768 variable length of
32-bit little-endian integer
/grp_rename Group
- Location: 1:57120
+ Location: 1:57024
Links: 1
/grp_rename/chunk Dataset {6/6}
- Location: 1:61304
+ Location: 1:61208
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_rename/compact Dataset {6/6}
- Location: 1:61760
+ Location: 1:61664
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_rename/compound Dataset {2/2}
- Location: 1:61896
+ Location: 1:61800
Links: 1
Storage: <details removed for portability>
Type: struct {
@@ -315,28 +315,28 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
"str2" +20 20-byte null-terminated ASCII string
} 40 bytes
/grp_rename/compressed Dataset {6/6}
- Location: 1:64208
+ Location: 1:64112
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Filter-0: deflate-1 OPT {1}
Type: 32-bit little-endian integer
/grp_rename/grp_dsets Group
- Location: 1:70000
+ Location: 1:69904
Links: 1
/grp_rename/grp_dsets/chunk Dataset {6/6}
- Location: 1:74184
+ Location: 1:74088
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_rename/grp_dsets/compact Dataset {6/6}
- Location: 1:74640
+ Location: 1:74544
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_rename/grp_dsets/compound Dataset {2/2}
- Location: 1:74776
+ Location: 1:74680
Links: 1
Storage: <details removed for portability>
Type: struct {
@@ -344,73 +344,73 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
"str2" +20 20-byte null-terminated ASCII string
} 40 bytes
/grp_rename/grp_dsets/compressed Dataset {6/6}
- Location: 1:77088
+ Location: 1:76992
Links: 1
Chunks: {2} 8 bytes
Storage: <details removed for portability>
Filter-0: deflate-1 OPT {1}
Type: 32-bit little-endian integer
/grp_rename/grp_dsets/named_vl Dataset {2/2}
- Location: 1:81352
+ Location: 1:81256
Links: 1
Storage: <details removed for portability>
- Type: shared-1:81480 variable length of
+ Type: shared-1:81384 variable length of
32-bit little-endian integer
/grp_rename/grp_dsets/nested_vl Dataset {2/2}
- Location: 1:81528
+ Location: 1:81432
Links: 1
Storage: <details removed for portability>
Type: variable length of
variable length of
32-bit little-endian integer
/grp_rename/grp_dsets/simple Dataset {6/6}
- Location: 1:81672
+ Location: 1:81576
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_rename/grp_dsets/vl Type
- Location: 1:81480
+ Location: 1:81384
Links: 2
- Type: shared-1:81480 variable length of
+ Type: shared-1:81384 variable length of
32-bit little-endian integer
/grp_rename/named_vl Dataset {2/2}
- Location: 1:68472
+ Location: 1:68376
Links: 1
Storage: <details removed for portability>
- Type: shared-1:68600 variable length of
+ Type: shared-1:68504 variable length of
32-bit little-endian integer
/grp_rename/nested_vl Dataset {2/2}
- Location: 1:68648
+ Location: 1:68552
Links: 1
Storage: <details removed for portability>
Type: variable length of
variable length of
32-bit little-endian integer
/grp_rename/simple Dataset {6/6}
- Location: 1:68792
+ Location: 1:68696
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/grp_rename/vl Type
- Location: 1:68600
+ Location: 1:68504
Links: 2
- Type: shared-1:68600 variable length of
+ Type: shared-1:68504 variable length of
32-bit little-endian integer
/named_vl Dataset {2/2}
- Location: 1:17280
+ Location: 1:17184
Links: 1
Storage: <details removed for portability>
- Type: shared-1:17408 variable length of
+ Type: shared-1:17312 variable length of
32-bit little-endian integer
/nested_vl Dataset {2/2}
- Location: 1:21760
+ Location: 1:21664
Links: 1
Storage: <details removed for portability>
Type: variable length of
variable length of
32-bit little-endian integer
/rename Dataset {2/2}
- Location: 1:26128
+ Location: 1:26032
Links: 1
Storage: <details removed for portability>
Type: struct {
@@ -423,7 +423,7 @@ Opened "./testfiles/h5copytst.out.h5" with sec2 driver.
Storage: <details removed for portability>
Type: 32-bit little-endian integer
/simple_top Dataset {6/6}
- Location: 1:23952
+ Location: 1:23856
Links: 1
Storage: <details removed for portability>
Type: 32-bit little-endian integer
diff --git a/tools/test/h5copy/testh5copy.sh.in b/tools/test/h5copy/testh5copy.sh.in
index 6d1478a..203bfef 100644
--- a/tools/test/h5copy/testh5copy.sh.in
+++ b/tools/test/h5copy/testh5copy.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5copy tool
#
@@ -21,6 +19,10 @@
srcdir=@srcdir@
+# Determine which filters are available
+USE_FILTER_SZIP="@USE_FILTER_SZIP@"
+USE_FILTER_DEFLATE="@USE_FILTER_DEFLATE@"
+
# source dirs
SRC_TOOLS="$srcdir/../.."
SRC_TOOLS_TESTFILES="$SRC_TOOLS/testfiles"
@@ -78,7 +80,7 @@ AWK='awk'
nerrors=0
verbose=yes
-h5haveexitcode=yes # default is yes
+h5haveexitcode=yes # default is yes
TESTDIR=./testfiles
test -d $TESTDIR || mkdir $TESTDIR
@@ -111,10 +113,10 @@ COPY_TESTFILES_TO_TESTDIR()
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
- $CP -f $tstfile $TESTDIR
+ $CP -f $tstfile $TESTDIR
if [ $? -ne 0 ]; then
echo "Error: FAILED to copy $tstfile ."
-
+
# Comment out this to CREATE expected file
exit $EXIT_FAILURE
fi
@@ -139,13 +141,13 @@ CLEAN_TESTFILES_AND_TESTDIR()
# Print a "SKIP" message
SKIP() {
- TESTING $H5COPY $@
- echo " -SKIP-"
+ TESTING $H5COPY $@
+ echo " -SKIP-"
}
# Print a line-line message left justified in a field of 70 characters
# beginning with the word "Testing".
-TESTING()
+TESTING()
{
SPACES=" "
echo "Testing $* $SPACES" |cut -c1-70 |tr -d '\012'
@@ -154,7 +156,7 @@ TESTING()
# Print a line-line message left justified in a field of 70 characters
# beginning with the word "Verifying".
#
-VERIFY()
+VERIFY()
{
SPACES=" "
echo "Verifying h5diff output $* $SPACES" | cut -c1-70 | tr -d '\012'
@@ -163,7 +165,7 @@ VERIFY()
# Print a line-line message left justified in a field of 70 characters
# beginning with the word "Verifying".
#
-VERIFY_OUTPUT()
+VERIFY_OUTPUT()
{
SPACES=" "
echo "Verifying output files $* $SPACES" | cut -c1-70 | tr -d '\012'
@@ -182,7 +184,7 @@ VERIFY_OUTPUT()
# $4 is output file
# $* everything else arguments for h5copy.
-TOOLTEST()
+TOOLTEST()
{
actualout="$TESTDIR/tooltest.actualout"
actualerr="$TESTDIR/tooltest.actualerr"
@@ -199,7 +201,7 @@ TOOLTEST()
fi
if [ "$3" = -o ]; then
outputfile=$4
- else
+ else
if [ "$1" = -f ]; then
outputfile=$6
else
@@ -207,7 +209,7 @@ TOOLTEST()
fi
runh5diff=no
fi
-
+
TESTING $H5COPY $@
(
echo "#############################"
@@ -223,7 +225,7 @@ TOOLTEST()
nerrors="`expr $nerrors + 1`"
else
echo " PASSED"
-
+
if [ $runh5diff != no ]; then
H5DIFFTEST $inputfile $outputfile $7 $9
fi
@@ -236,7 +238,7 @@ TOOLTEST()
}
# TOOLTEST back-to-back
-TOOLTEST_PREFILL()
+TOOLTEST_PREFILL()
{
actualout="$TESTDIR/tooltest.actualout"
actualerr="$TESTDIR/tooltest.actualerr"
@@ -248,21 +250,21 @@ TOOLTEST_PREFILL()
fi
if [ "$3" = -o ]; then
outputfile=$4
- else
+ else
runh5diff=no
fi
-
+
grp_name=$5
grp_name2=$6
obj_name=$7
obj_name2=$8
-
+
TESTING $H5COPY $@
(
echo "#############################"
echo " output for '$H5COPY $@'"
echo "#############################"
- $RUNSERIAL $H5COPY_BIN -i $inputfile -o $outputfile -v -s $grp_name -d $grp_name2
+ $RUNSERIAL $H5COPY_BIN -i $inputfile -o $outputfile -v -s $grp_name -d $grp_name2
) > $actualout 2> $actualerr
RET=$?
if [ $RET != 0 ]; then
@@ -276,7 +278,7 @@ TOOLTEST_PREFILL()
echo "#############################"
echo " output for '$H5COPY $@'"
echo "#############################"
- $RUNSERIAL $H5COPY_BIN -i $inputfile -o $outputfile -v -s $obj_name -d $obj_name2
+ $RUNSERIAL $H5COPY_BIN -i $inputfile -o $outputfile -v -s $obj_name -d $obj_name2
) > $actualout 2> $actualerr
RET=$?
if [ $RET != 0 ]; then
@@ -286,11 +288,11 @@ TOOLTEST_PREFILL()
nerrors="`expr $nerrors + 1`"
else
echo " PASSED"
-
+
if [ $runh5diff != no ]; then
H5DIFFTEST $inputfile $outputfile $obj_name $obj_name2
fi
-
+
# Clean up output file
if test -z "$HDF5_NOCLEANUP"; then
rm -f $actualout $actualerr $outputfile
@@ -300,7 +302,7 @@ TOOLTEST_PREFILL()
}
# TOOLTEST back-to-back
-TOOLTEST_SAME()
+TOOLTEST_SAME()
{
actualout="$TESTDIR/tooltest.actualout"
actualerr="$TESTDIR/tooltest.actualerr"
@@ -312,19 +314,19 @@ TOOLTEST_SAME()
fi
if [ "$3" = -o ]; then
outputfile=$4
- else
+ else
runh5diff=no
fi
-
+
grp_name=$5
grp_name2=$6
-
+
TESTING $H5COPY $@
(
echo "#############################"
echo " output for '$H5COPY $@'"
echo "#############################"
- $RUNSERIAL $H5COPY_BIN -i $inputfile -o $outputfile -v -s $grp_name -d $grp_name
+ $RUNSERIAL $H5COPY_BIN -i $inputfile -o $outputfile -v -s $grp_name -d $grp_name
) > $actualout 2> $actualerr
RET=$?
if [ $RET != 0 ]; then
@@ -338,7 +340,7 @@ TOOLTEST_SAME()
echo "#############################"
echo " output for '$H5COPY $@'"
echo "#############################"
- $RUNSERIAL $H5COPY_BIN -i $outputfile -o $outputfile -v -s $grp_name -d $grp_name2
+ $RUNSERIAL $H5COPY_BIN -i $outputfile -o $outputfile -v -s $grp_name -d $grp_name2
) > $actualout 2> $actualerr
RET=$?
if [ $RET != 0 ]; then
@@ -348,11 +350,11 @@ TOOLTEST_SAME()
nerrors="`expr $nerrors + 1`"
else
echo " PASSED"
-
+
if [ $runh5diff != no ]; then
H5DIFFTEST $outputfile $outputfile $grp_name $grp_name2
fi
-
+
# Clean up output file
if test -z "$HDF5_NOCLEANUP"; then
rm -f $actualout $actualerr $outputfile
@@ -379,6 +381,8 @@ CMP_OUTPUT()
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -389,7 +393,7 @@ CMP_OUTPUT()
fi
}
-TOOLTEST_FAIL()
+TOOLTEST_FAIL()
{
expectout="$TESTDIR/$1"
actualout="$TESTDIR/$1.actualout"
@@ -433,7 +437,7 @@ TOOLTEST_FAIL()
cat $actualout
nerrors="`expr $nerrors + 1`"
fi
-
+
# Clean up output file
if test -z "$HDF5_NOCLEANUP"; then
@@ -444,10 +448,10 @@ TOOLTEST_FAIL()
# Call the h5diff tool
#
-H5DIFFTEST()
+H5DIFFTEST()
{
VERIFY $@
- $RUNSERIAL $H5DIFF_BIN -q "$@"
+ $RUNSERIAL $H5DIFF_BIN -q "$@"
RET=$?
if [ $RET != 0 ] ; then
echo "*FAILED*"
@@ -459,10 +463,10 @@ H5DIFFTEST()
# Call the h5diff tool with a call that is expected to fail
#
-H5DIFFTEST_FAIL()
+H5DIFFTEST_FAIL()
{
VERIFY $@
- $RUNSERIAL $H5DIFF_BIN -q "$@"
+ $RUNSERIAL $H5DIFF_BIN -q "$@"
RET=$?
if [ $h5haveexitcode = 'yes' -a $RET != 1 ] ; then
@@ -478,7 +482,7 @@ H5DIFFTEST_FAIL()
#
# Assumed arguments:
# <none>
-COPY_OBJECTS()
+COPY_OBJECTS()
{
TESTFILE="$TESTDIR/h5copytst.h5"
@@ -487,7 +491,9 @@ COPY_OBJECTS()
TOOLTEST -i $TESTFILE -o $TESTDIR/chunk.out.h5 -v -s chunk -d chunk
TOOLTEST -i $TESTFILE -o $TESTDIR/compact.out.h5 -v -s compact -d compact
TOOLTEST -i $TESTFILE -o $TESTDIR/compound.out.h5 -v -s compound -d compound
+if test $USE_FILTER_DEFLATE = "yes" ; then
TOOLTEST -i $TESTFILE -o $TESTDIR/compressed.out.h5 -v -s compressed -d compressed
+fi
TOOLTEST -i $TESTFILE -o $TESTDIR/named_vl.out.h5 -v -s named_vl -d named_vl
TOOLTEST -i $TESTFILE -o $TESTDIR/nested_vl.out.h5 -v -s nested_vl -d nested_vl
TOOLTEST -i $TESTFILE -o $TESTDIR/dset_attr.out.h5 -v -s /dset_attr -d /dset_attr
@@ -499,16 +505,19 @@ COPY_OBJECTS()
TOOLTEST -i $TESTFILE -o $TESTDIR/dsrename.out.h5 -v -s compound -d rename
echo "Test copying empty, 'full' & 'nested' groups"
+if test $USE_FILTER_DEFLATE = "yes" ; then
TOOLTEST -i $TESTFILE -o $TESTDIR/grp_empty.out.h5 -v -s grp_empty -d grp_empty
+fi
TOOLTEST -i $TESTFILE -o $TESTDIR/grp_dsets.out.h5 -v -s grp_dsets -d grp_dsets
TOOLTEST -i $TESTFILE -o $TESTDIR/grp_nested.out.h5 -v -s grp_nested -d grp_nested
TOOLTEST -i $TESTFILE -o $TESTDIR/grp_attr.out.h5 -v -s grp_attr -d grp_attr
+if test $USE_FILTER_DEFLATE = "yes" ; then
echo "Test copying dataset within group in source file to group in destination"
TOOLTEST_PREFILL -i $TESTFILE -o $TESTDIR/simple_group.out.h5 grp_dsets grp_dsets /grp_dsets/simple /grp_dsets/simple_group
-
echo "Test copying & renaming group"
TOOLTEST -i $TESTFILE -o $TESTDIR/grp_rename.out.h5 -v -s grp_dsets -d grp_rename
+fi
echo "Test copying 'full' group hierarchy into group in destination file"
TOOLTEST_PREFILL -i $TESTFILE -o $TESTDIR/grp_dsets_rename.out.h5 grp_dsets grp_rename grp_dsets /grp_rename/grp_dsets
@@ -517,15 +526,17 @@ COPY_OBJECTS()
TOOLTEST -i $TESTFILE -o $TESTDIR/A_B1_simple.out.h5 -vp -s simple -d /A/B1/simple
TOOLTEST -i $TESTFILE -o $TESTDIR/A_B2_simple2.out.h5 -vp -s simple -d /A/B2/simple2
TOOLTEST -i $TESTFILE -o $TESTDIR/C_D_simple.out.h5 -vp -s /grp_dsets/simple -d /C/D/simple
+if test $USE_FILTER_DEFLATE = "yes" ; then
TOOLTEST -i $TESTFILE -o $TESTDIR/E_F_grp_dsets.out.h5 -vp -s /grp_dsets -d /E/F/grp_dsets
TOOLTEST -i $TESTFILE -o $TESTDIR/G_H_grp_nested.out.h5 -vp -s /grp_nested -d /G/H/grp_nested
+fi
}
# Copy references in various way.
#
# Assumed arguments:
# <none>
-COPY_REFERENCES()
+COPY_REFERENCES()
{
TESTFILE="$TESTDIR/h5copy_ref.h5"
@@ -538,7 +549,7 @@ COPY_REFERENCES()
#
# Assumed arguments:
# <none>
-COPY_EXT_LINKS()
+COPY_EXT_LINKS()
{
TESTFILE="$TESTDIR/h5copy_extlinks_src.h5"
@@ -571,7 +582,7 @@ COPY_EXT_LINKS()
#
# Assumed arguments:
# <none>
-TEST_MISC()
+TEST_MISC()
{
TESTFILE="$TESTDIR/h5copytst.h5"
@@ -579,8 +590,10 @@ TEST_MISC()
TOOLTEST_FAIL h5copy_misc1.out -i $TESTFILE -o $TESTDIR/h5copy_misc1.out.h5 -v -s /simple -d /g1/g2/simple
echo "Test copying objects to the same file "
- TOOLTEST_SAME -i $TESTFILE -o $TESTDIR/samefile1.out.h5 /simple /simple_cp
+ TOOLTEST_SAME -i $TESTFILE -o $TESTDIR/samefile1.out.h5 /simple /simple_cp
+if test $USE_FILTER_DEFLATE = "yes" ; then
TOOLTEST_SAME -i $TESTFILE -o $TESTDIR/samefile2.out.h5 /grp_dsets /grp_dsets_cp
+fi
}
##############################################################################
@@ -590,7 +603,7 @@ TEST_MISC()
COPY_TESTFILES_TO_TESTDIR
# Start tests
-COPY_OBJECTS
+COPY_OBJECTS
COPY_REFERENCES
COPY_EXT_LINKS
TEST_MISC
diff --git a/tools/test/h5diff/CMakeLists.txt b/tools/test/h5diff/CMakeLists.txt
index 2507664..f6951a3 100644
--- a/tools/test/h5diff/CMakeLists.txt
+++ b/tools/test/h5diff/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5DIFF)
#-----------------------------------------------------------------------------
@@ -17,6 +17,34 @@ INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
set_target_properties (h5diffgentest PROPERTIES FOLDER generator/tools)
#add_test (NAME h5diffgentest COMMAND $<TARGET_FILE:h5diffgentest>)
- endif (HDF5_BUILD_GENERATORS)
+ endif ()
+
+ #-----------------------------------------------------------------------------
+ # If plugin library tests can be tested
+ #-----------------------------------------------------------------------------
+ set (HDF5_TOOL_PLUGIN_LIB_CORENAME "dynlibdiff")
+ set (HDF5_TOOL_PLUGIN_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_TOOL_PLUGIN_LIB_CORENAME}")
+ set (HDF5_TOOL_PLUGIN_LIB_TARGET ${HDF5_TOOL_PLUGIN_LIB_CORENAME})
+ add_definitions (${HDF_EXTRA_C_FLAGS})
+ INCLUDE_DIRECTORIES (${HDF5_SRC_DIR})
+
+ add_library (${HDF5_TOOL_PLUGIN_LIB_TARGET} SHARED dynlib_diff.c)
+ TARGET_C_PROPERTIES (${HDF5_TOOL_PLUGIN_LIB_TARGET} SHARED " " " ")
+ target_link_libraries (${HDF5_TOOL_PLUGIN_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+ H5_SET_LIB_OPTIONS (${HDF5_TOOL_PLUGIN_LIB_TARGET} ${HDF5_TOOL_PLUGIN_LIB_NAME} SHARED ${HDF5_PACKAGE_SOVERSION})
+
+ # make plugins dir
+ file (MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/plugins")
+ #-----------------------------------------------------------------------------
+ # Copy plugin library to a plugins folder
+ #-----------------------------------------------------------------------------
+ add_custom_command (
+ TARGET ${HDF5_TOOL_PLUGIN_LIB_TARGET}
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E copy_if_different
+ "$<TARGET_FILE:${HDF5_TOOL_PLUGIN_LIB_TARGET}>"
+ "${CMAKE_BINARY_DIR}/plugins/$<TARGET_FILE_NAME:${HDF5_TOOL_PLUGIN_LIB_TARGET}>"
+ )
include (CMakeTests.cmake)
diff --git a/tools/test/h5diff/CMakeTests.cmake b/tools/test/h5diff/CMakeTests.cmake
index 3687574..72dda6b 100644
--- a/tools/test/h5diff/CMakeTests.cmake
+++ b/tools/test/h5diff/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -17,6 +28,7 @@
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_attr2.h5
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_dset1.h5
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_dset2.h5
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_dset3.h5
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_hyper1.h5
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_hyper2.h5
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_empty.h5
@@ -55,6 +67,10 @@
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/tmptest2.he5
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/tmpSingleSiteBethe.reference.h5
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/tmpSingleSiteBethe.output.h5
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/tudfilter.h5
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/tudfilter2.h5
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/diff_strings1.h5
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/diff_strings2.h5
# tools/testfiles/vds
${HDF5_TOOLS_DIR}/testfiles/vds/1_a.h5
${HDF5_TOOLS_DIR}/testfiles/vds/1_b.h5
@@ -207,6 +223,10 @@
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_518.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_530.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_540.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_60.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_61.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_62.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_63.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_600.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_601.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_603.txt
@@ -256,9 +276,13 @@
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_709.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_710.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_80.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_800.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_801.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_90.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_tmp1.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_tmp2.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_ud.txt
+ ${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_udfail.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_v1.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_v2.txt
${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/h5diff_v3.txt
@@ -273,6 +297,9 @@
# Make testfiles dir under build dir
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ if (H5_HAVE_PARALLEL)
+ file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/PAR/testfiles")
+ endif ()
#
# copy test files from source to build dir
@@ -280,6 +307,9 @@
foreach (h5_tstfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
get_filename_component(fname "${h5_tstfiles}" NAME)
HDFTEST_COPY_FILE("${h5_tstfiles}" "${PROJECT_BINARY_DIR}/testfiles/${fname}" "h5diff_files")
+ if (H5_HAVE_PARALLEL)
+ HDFTEST_COPY_FILE("${h5_tstfiles}" "${PROJECT_BINARY_DIR}/PAR/testfiles/${fname}" "h5diff_files")
+ endif ()
endforeach ()
@@ -290,11 +320,17 @@
foreach (h5_tstfiles ${LIST_WIN_TEST_FILES})
get_filename_component(fname "${h5_tstfiles}" NAME)
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/${h5_tstfiles}w.txt" "${PROJECT_BINARY_DIR}/testfiles/${fname}.txt" "h5diff_files")
+ if (H5_HAVE_PARALLEL)
+ HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/${h5_tstfiles}w.txt" "${PROJECT_BINARY_DIR}/PAR/testfiles/${fname}.txt" "h5diff_files")
+ endif ()
endforeach ()
else ()
foreach (h5_tstfiles ${LIST_WIN_TEST_FILES})
get_filename_component(fname "${h5_tstfiles}" NAME)
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/${h5_tstfiles}.txt" "${PROJECT_BINARY_DIR}/testfiles/${fname}.txt" "h5diff_files")
+ if (H5_HAVE_PARALLEL)
+ HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5DIFF_SOURCE_DIR}/testfiles/${h5_tstfiles}.txt" "${PROJECT_BINARY_DIR}/PAR/testfiles/${fname}.txt" "h5diff_files")
+ endif ()
endforeach ()
endif ()
add_custom_target(h5diff_files ALL COMMENT "Copying files needed by h5diff tests" DEPENDS ${h5diff_files_list})
@@ -305,7 +341,7 @@
##############################################################################
##############################################################################
- MACRO (ADD_H5_TEST resultfile resultcode)
+ macro (ADD_H5_TEST resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DIFF-${resultfile} COMMAND $<TARGET_FILE:h5diff> ${ARGN})
@@ -316,7 +352,7 @@
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5DIFF-${resultfile} PROPERTIES DEPENDS ${last_test})
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5DIFF-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -329,40 +365,90 @@
-D "TEST_APPEND=EXIT CODE:"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
if (H5_HAVE_PARALLEL)
ADD_PH5_TEST (${resultfile} ${resultcode} ${ARGN})
endif ()
- ENDMACRO (ADD_H5_TEST file)
+ endmacro ()
- MACRO (ADD_PH5_TEST resultfile resultcode)
+ macro (ADD_PH5_TEST resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME PH5DIFF-${resultfile} COMMAND $<TARGET_FILE:ph5diff> ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} ${ARGN})
- set_tests_properties (PH5DIFF-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ set_tests_properties (PH5DIFF-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/PAR/testfiles")
if (NOT ${resultcode} STREQUAL "0")
set_tests_properties (PH5DIFF-${resultfile} PROPERTIES WILL_FAIL "true")
endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (PH5DIFF-${resultfile} PROPERTIES DEPENDS ${last_test})
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME PH5DIFF-${resultfile}
COMMAND "${CMAKE_COMMAND}"
-D "TEST_PROGRAM=${MPIEXEC};${MPIEXEC_PREFLAGS};${MPIEXEC_NUMPROC_FLAG};${MPIEXEC_MAX_NUMPROCS};${MPIEXEC_POSTFLAGS};$<TARGET_FILE:ph5diff>"
-D "TEST_ARGS:STRING=${ARGN}"
- -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
- -D "TEST_OUTPUT=P_${resultfile}.out"
- -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/PAR/testfiles"
+ -D "TEST_OUTPUT=${resultfile}.out"
+ -D "TEST_EXPECT=0"
-D "TEST_REFERENCE=${resultfile}.txt"
-# -D "TEST_APPEND=EXIT CODE: [0-9]"
-# -D "TEST_REF_FILTER=EXIT CODE: 0"
- -D "TEST_SKIP_COMPARE=TRUE"
- -P "${HDF_RESOURCES_EXT_DIR}/prunTest.cmake"
+ -D "TEST_APPEND=EXIT CODE:"
+ -D "TEST_REF_APPEND=EXIT CODE: [0-9]"
+ -D "TEST_REF_FILTER=EXIT CODE: 0"
+ -D "TEST_SORT_COMPARE=TRUE"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (PH5DIFF-${resultfile} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "PH5DIFF-${resultfile}")
+ endif ()
+ endmacro ()
+
+ macro (ADD_H5_UD_TEST testname resultcode resultfile)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ # Remove any output file left over from previous test run
+ add_test (
+ NAME H5DIFF_UD-${testname}-clearall-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ testfiles/${resultfile}.out
+ testfiles/${resultfile}.out.err
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_PH5_TEST file)
+ if (${resultcode} STREQUAL "2")
+ add_test (
+ NAME H5DIFF_UD-${testname}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:h5diff>"
+ -D "TEST_ARGS:STRING=${ARGN}"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
+ -D "TEST_OUTPUT=${resultfile}.out"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_REFERENCE=${resultfile}.txt"
+ -D "TEST_APPEND=EXIT CODE:"
+ -D "TEST_ENV_VAR=HDF5_PLUGIN_PATH"
+ -D "TEST_ENV_VALUE=${CMAKE_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ else ()
+ add_test (
+ NAME H5DIFF_UD-${testname}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:h5diff>"
+ -D "TEST_ARGS:STRING=${ARGN}"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
+ -D "TEST_OUTPUT=${resultfile}.out"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_REFERENCE=${resultfile}.txt"
+ -D "TEST_APPEND=EXIT CODE:"
+ -D "TEST_ENV_VAR=HDF5_PLUGIN_PATH"
+ -D "TEST_ENV_VALUE=${CMAKE_BINARY_DIR}/plugins"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ endif ()
+ set_tests_properties (H5DIFF_UD-${testname} PROPERTIES DEPENDS H5DIFF_UD-${testname}-clearall-objects)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -381,6 +467,7 @@
set (FILE6 h5diff_attr2.h5)
set (FILE7 h5diff_dset1.h5)
set (FILE8 h5diff_dset2.h5)
+ set (FILE8A h5diff_dset3.h5)
set (FILE9 h5diff_hyper1.h5)
set (FILE10 h5diff_hyper2.h5)
set (FILE11 h5diff_empty.h5)
@@ -419,6 +506,10 @@
# attrs with verbose option level
set (ATTR_VERBOSE_LEVEL_FILE1 h5diff_attr_v_level1.h5)
set (ATTR_VERBOSE_LEVEL_FILE2 h5diff_attr_v_level2.h5)
+ # strings
+ set (STRINGS1 diff_strings1.h5)
+ set (STRINGS2 diff_strings2.h5)
+
# VDS tests
set (FILEV1 1_vds.h5)
set (FILEV2 2_vds.h5)
@@ -683,6 +774,14 @@
h5diff_530.out.err
h5diff_540.out
h5diff_540.out.err
+ h5diff_60.out
+ h5diff_60.out.err
+ h5diff_61.out
+ h5diff_61.out.err
+ h5diff_62.out
+ h5diff_62.out.err
+ h5diff_63.out
+ h5diff_63.out.err
h5diff_600.out
h5diff_600.out.err
h5diff_601.out
@@ -777,6 +876,10 @@
h5diff_710.out.err
h5diff_80.out
h5diff_80.out.err
+ h5diff_800.out
+ h5diff_800.out.err
+ h5diff_801.out
+ h5diff_801.out.err
h5diff_90.out
h5diff_90.out.err
h5diff_v1.out
@@ -789,9 +892,9 @@
set_tests_properties (H5DIFF-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5DIFF-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5DIFF-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# ############################################################################
# # Common usage
@@ -918,6 +1021,13 @@ ADD_H5_TEST (h5diff_58 1 -v ${FILE7} ${FILE8} refreg)
# ( HDDFV-7942 )
ADD_H5_TEST (h5diff_59 0 -v ${FILE4} ${FILE4} dset11a dset11b)
+# Strings
+# ( HDFFV-10128 )
+ADD_H5_TEST (h5diff_60 1 -v ${STRINGS1} ${STRINGS2} string1 string1)
+ADD_H5_TEST (h5diff_61 1 -v ${STRINGS1} ${STRINGS2} string2 string2)
+ADD_H5_TEST (h5diff_62 1 -v ${STRINGS1} ${STRINGS2} string3 string3)
+ADD_H5_TEST (h5diff_63 1 -v ${STRINGS1} ${STRINGS2} string4 string4)
+
# ##############################################################################
# # Error messages
# ##############################################################################
@@ -1084,8 +1194,7 @@ ADD_H5_TEST (h5diff_103 1 -v --use-system-epsilon ${FILE1} ${FILE1} g1/d1
g1/d2)
# with --use-system-epsilon for float value. expect less differences
-ADD_H5_TEST (h5diff_104 1 -v --use-system-epsilon ${FILE1} ${FILE1} g1/fp1
-g1/fp2)
+ADD_H5_TEST (h5diff_104 1 -v --use-system-epsilon ${FILE1} ${FILE1} g1/fp1 g1/fp2)
# not comparable -c flag
ADD_H5_TEST (h5diff_200 0 ${FILE2} ${FILE2} g2/dset1 g2/dset2)
@@ -1376,8 +1485,27 @@ ADD_H5_TEST (h5diff_644 1 -v --use-system-epsilon -d 5 ${FILE1} ${FILE2} /g1/dse
ADD_H5_TEST (h5diff_645 1 -v -p 0.05 --use-system-epsilon ${FILE1} ${FILE2} /g1/dset3 /g1/dset4)
ADD_H5_TEST (h5diff_646 1 -v --use-system-epsilon -p 0.05 ${FILE1} ${FILE2} /g1/dset3 /g1/dset4)
-# VDS
+# ##############################################################################
+# # Test array variances
+# ##############################################################################
+#
+# Test with -d , -p and --use-system-epsilon.
+ADD_H5_TEST (h5diff_800 1 -v ${FILE7} ${FILE8} /g1/array /g1/array)
+ADD_H5_TEST (h5diff_801 1 -v ${FILE7} ${FILE8A} /g1/array /g1/array)
+
+# ##############################################################################
+# VDS tests
+# ##############################################################################
ADD_H5_TEST (h5diff_v1 0 -v ${FILEV1} ${FILEV2})
ADD_H5_TEST (h5diff_v2 0 -r ${FILEV1} ${FILEV2})
ADD_H5_TEST (h5diff_v3 0 -c ${FILEV1} ${FILEV2})
+##############################################################################
+### P L U G I N T E S T S
+##############################################################################
+ADD_H5_UD_TEST (h5diff_plugin_test 0 h5diff_ud -v tudfilter.h5 tudfilter2.h5)
+ADD_H5_UD_TEST (h5diff_plugin_fail 2 h5diff_udfail -v tudfilter.h5 tudfilter2.h5)
+
+# ##############################################################################
+# # END
+# ##############################################################################
diff --git a/tools/test/h5diff/Makefile.am b/tools/test/h5diff/Makefile.am
index c366199..5721aaa 100644
--- a/tools/test/h5diff/Makefile.am
+++ b/tools/test/h5diff/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -38,6 +36,11 @@ check_PROGRAMS=$(TEST_PROG)
check_SCRIPTS=$(TEST_SCRIPT) $(TEST_SCRIPT_PARA)
# The parallel test script testph5diff.sh actually depends on testh5diff.sh.
SCRIPT_DEPEND=../../src/h5diff/h5diff$(EXEEXT) $(H5PDIFF) ../../src/h5diff/testh5diff.sh
+if HAVE_SHARED_CONDITIONAL
+if USE_PLUGINS_CONDITIONAL
+ TEST_SCRIPT += h5diff_plugin.sh
+endif
+endif
# Source files for the program
h5diffgentest_SOURCES=h5diffgentest.c
@@ -45,9 +48,24 @@ h5diffgentest_SOURCES=h5diffgentest.c
# Programs depend on the main HDF5 library and tools library
LDADD=$(LIBH5TOOLS) $(LIBHDF5)
+if HAVE_SHARED_CONDITIONAL
+ # Build it as shared library if configure is enabled for shared library.
+ dyn_LTLIBRARIES=libdynlibdiff.la
+ libdynlibdiff_la_SOURCES=dynlib_diff.c
+ libdynlibdiff_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+
+libdynlibdiff.la: $(libdynlibdiff_la_OBJECTS) $(libdynlibdiff_la_DEPENDENCIES) $(EXTRA_libdynlibdiff_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlibdiff_la_LINK) $(am_libdynlibdiff_la_rpath) $(libdynlibdiff_la_OBJECTS) $(libdynlibdiff_la_LIBADD)
+
+#install-exec-hook:
+# $(RM) $(DESTDIR)$(dyndir)/*dynlib*
+endif
+
# Temporary files. *.h5 are generated by h5diff. They should
# be copied to the testfiles/ directory if update is required
CHECK_CLEANFILES+=*.h5 expect_sorted actual_sorted
+DISTCLEANFILES=h5diff_plugin.sh
+
include $(top_srcdir)/config/conclude.am
diff --git a/tools/test/h5diff/dynlib_diff.c b/tools/test/h5diff/dynlib_diff.c
new file mode 100644
index 0000000..571452e
--- /dev/null
+++ b/tools/test/h5diff/dynlib_diff.c
@@ -0,0 +1,89 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/*
+ * Purpose: Tests the plugin module (H5PL)
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "H5PLextern.h"
+
+#define H5Z_FILTER_DYNLIBUD 300
+#define MULTIPLIER 3
+
+static size_t H5Z_filter_dynlibud(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+
+/* This message derives from H5Z */
+const H5Z_class2_t H5Z_DYNLIBUD[1] = {{
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ H5Z_FILTER_DYNLIBUD, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "dynlibud", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
+ (H5Z_func_t)H5Z_filter_dynlibud, /* The actual filter function */
+}};
+
+H5PL_type_t H5PLget_plugin_type(void) {return H5PL_TYPE_FILTER;}
+const void *H5PLget_plugin_info(void) {return H5Z_DYNLIBUD;}
+
+/*-------------------------------------------------------------------------
+ * Function: H5Z_filter_dynlibud
+ *
+ * Purpose: A dynlib2 filter method that multiplies the original value
+ * during write and divide the original value during read. It
+ * will be built as a shared library. plugin.c test will load
+ * and use this filter library.
+ *
+ * Return: Success: Data chunk size
+ *
+ * Failure: 0
+ *-------------------------------------------------------------------------
+ */
+static size_t
+H5Z_filter_dynlibud(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes,
+ size_t *buf_size, void **buf)
+{
+ char *int_ptr = (char *)*buf; /* Pointer to the data values */
+ size_t buf_left = *buf_size; /* Amount of data buffer left to process */
+
+ /* Check for the correct number of parameters */
+ if(cd_nelmts > 0)
+ return(0);
+
+ /* Assignment to eliminate unused parameter warning. */
+ cd_values = cd_values;
+
+ if(flags & H5Z_FLAG_REVERSE) { /*read*/
+ /* Subtract the original value with MULTIPLIER */
+ while(buf_left > 0) {
+ char temp = *int_ptr;
+ *int_ptr = temp - MULTIPLIER;
+ int_ptr++;
+ buf_left -= sizeof(*int_ptr);
+ } /* end while */
+ } /* end if */
+ else { /*write*/
+ /* Add the original value with MULTIPLIER */
+ while(buf_left > 0) {
+ char temp = *int_ptr;
+ *int_ptr = temp + MULTIPLIER;
+ int_ptr++;
+ buf_left -= sizeof(*int_ptr);
+ } /* end while */
+ } /* end else */
+
+ return nbytes;
+} /* end H5Z_filter_dynlibud() */
+
diff --git a/tools/test/h5diff/h5diff_plugin.sh.in b/tools/test/h5diff/h5diff_plugin.sh.in
new file mode 100644
index 0000000..212f5d6
--- /dev/null
+++ b/tools/test/h5diff/h5diff_plugin.sh.in
@@ -0,0 +1,315 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+srcdir=@srcdir@
+TOP_BUILDDIR=@top_builddir@
+
+# Determine backward compatibility options enabled
+DEPRECATED_SYMBOLS="@DEPRECATED_SYMBOLS@"
+
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+
+H5DIFF=../../src/h5diff/h5diff # The tool name
+H5DIFF_BIN=`pwd`/$H5DIFF # The path of the tool binary
+
+nerrors=0
+verbose=yes
+h5haveexitcode=yes # default is yes
+
+TEST_NAME=ud_plugin
+FROM_DIR=`pwd`/.libs
+PLUGIN_LIB="$FROM_DIR/libdynlibdiff.*"
+PLUGIN_LIBDIR=testdir3
+RM='rm -rf'
+
+RM='rm -rf'
+CMP='cmp -s'
+DIFF='diff -c'
+CP='cp'
+DIRNAME='dirname'
+LS='ls'
+AWK='awk'
+
+# source dirs
+SRC_TOOLS="$srcdir/../.."
+
+# testfiles source dirs for tools
+SRC_H5DIFF_TESTFILES="$SRC_TOOLS/test/h5diff/testfiles"
+
+TESTDIR=./testplug
+test -d $TESTDIR || mkdir $TESTDIR
+
+######################################################################
+# test files
+# --------------------------------------------------------------------
+# All the test files copy from source directory to test directory
+# NOTE: Keep this framework to add/remove test files.
+# Any test files from other tools can be used in this framework.
+# This list are also used for checking exist.
+# Comment '#' without space can be used.
+# --------------------------------------------------------------------
+LIST_HDF5_TEST_FILES="
+$SRC_H5DIFF_TESTFILES/tudfilter.h5
+$SRC_H5DIFF_TESTFILES/tudfilter2.h5
+$SRC_H5DIFF_TESTFILES/h5diff_ud.txt
+$SRC_H5DIFF_TESTFILES/h5diff_udfail.txt
+"
+
+# Main Body
+# Create test directories if not exists yet.
+test -d $PLUGIN_LIBDIR || mkdir -p $PLUGIN_LIBDIR
+if [ $? != 0 ]; then
+ echo "Failed to create test directory($PLUGIN_LIBDIR)"
+ exit $EXIT_FAILURE
+fi
+
+# copy plugin library for test
+$CP $PLUGIN_LIB $PLUGIN_LIBDIR
+if [ $? != 0 ]; then
+ echo "Failed to copy plugin library ($PLUGIN_LIB) for test."
+ exit $EXIT_FAILURE
+fi
+
+# setup plugin path
+ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}"
+
+#
+# copy test files and expected output files from source dirs to test dir
+#
+COPY_TESTFILES="$LIST_HDF5_TEST_FILES"
+
+COPY_TESTFILES_TO_TESTDIR()
+{
+ # copy test files. Used -f to make sure get a new copy
+ for tstfile in $COPY_TESTFILES
+ do
+ # ignore '#' comment
+ echo $tstfile | tr -d ' ' | grep '^#' > /dev/null
+ RET=$?
+ if [ $RET -eq 1 ]; then
+ # skip cp if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=`$DIRNAME $tstfile`
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $CP -f $tstfile $TESTDIR
+ if [ $? -ne 0 ]; then
+ echo "Error: FAILED to copy $tstfile ."
+
+ # Comment out this to CREATE expected file
+ exit $EXIT_FAILURE
+ fi
+ fi
+ fi
+ done
+}
+
+CLEAN_TESTFILES_AND_TESTDIR()
+{
+ # skip rm if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=$SRC_H5DIFF_TESTFILES
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $RM $TESTDIR
+ fi
+}
+
+# Parse option
+# -p run ph5diff tests
+# -h print help page
+while [ $# -gt 0 ]; do
+ case "$1" in
+ -p) # reset the tool name and bin to run ph5diff tests
+ TESTNAME=ph5diff
+ H5DIFF=../../src/h5diff/ph5diff # The tool name
+ H5DIFF_BIN=`pwd`/$H5DIFF # The path of the tool binary
+ pmode=yes
+ shift
+ ;;
+ -h) # print help page
+ echo "$0 [-p] [-h]"
+ echo " -p run ph5diff tests"
+ echo " -h print help page"
+ shift
+ exit 0
+ ;;
+ *) # unknown option
+ echo "$0: Unknown option ($1)"
+ exit 1
+ ;;
+ esac
+done
+
+# RUNSERIAL is used. Check if it can return exit code from executalbe correctly.
+if [ -n "$RUNSERIAL_NOEXITCODE" ]; then
+ echo "***Warning*** Serial Exit Code is not passed back to shell corretly."
+ echo "***Warning*** Exit code checking is skipped."
+ h5haveexitcode=no
+fi
+
+
+# Print a $* message left justified in a field of 70 characters
+#
+MESSAGE() {
+ SPACES=" "
+ echo "$* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Verifying".
+#
+VERIFY() {
+ MESSAGE "Verifying $*"
+}
+
+# Source in the output filter function definitions.
+. $srcdir/../../../bin/output_filter.sh
+
+# Run a test and print PASS or *FAIL*. If a test fails then increment
+# the `nerrors' global variable and (if $verbose is set) display the
+# difference between the actual output and the expected output. The
+# expected output is given as the first argument to this function and
+# the actual output file is calculated by replacing the `.ddl' with
+# `.out'. The actual output is not removed if $HDF5_NOCLEANUP has a
+# non-zero value.
+#
+# Need eval before the RUNCMD command because some machines like
+# AIX, has RUNPARALLEL in the style as
+# MP_PROCS=3 MP_TASKS_PER_NODE=3 poe ./a.out
+# that throws the shell script off.
+#
+TOOLTEST() {
+ expect="$TESTDIR/$1"
+ actual="$TESTDIR/`basename $1 .txt`.out"
+ actual_err="$TESTDIR/`basename $1 .txt`.err"
+ actual_sav=${actual}-sav
+ actual_err_sav=${actual_err}-sav
+ shift
+ if test -n "$pmode"; then
+ RUNCMD=$RUNPARALLEL
+ else
+ RUNCMD=$RUNSERIAL
+ fi
+
+ # Run test.
+ TESTING $H5DIFF $@
+ (
+ #echo "#############################"
+ #echo "Expected output for '$H5DIFF $@'"
+ #echo "#############################"
+ cd $TESTDIR
+ eval $ENVCMD $RUNCMD $H5DIFF_BIN "$@"
+ ) >$actual 2>$actual_err
+ EXIT_CODE=$?
+ # save actual and actual_err in case they are needed later.
+ cp $actual $actual_sav
+ STDOUT_FILTER $actual
+ cp $actual_err $actual_err_sav
+ STDERR_FILTER $actual_err
+ cat $actual_err >> $actual
+ # don't add exit code check in pmode, as it causes failure. (exit code
+ # is from mpirun not tool)
+ # if any problem occurs relate to an exit code, it will be caught in
+ # serial mode, so the test is fullfilled.
+ if test $h5haveexitcode = 'yes' -a -z "$pmode"; then
+ echo "EXIT CODE: $EXIT_CODE" >> $actual
+ fi
+
+ if [ ! -f $expect ]; then
+ # Create the expect file if it doesn't yet exist.
+ echo " CREATED"
+ cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
+ elif $CMP $expect $actual; then
+ echo " PASSED"
+ elif test $h5haveexitcode = 'yes' -a -z "$pmode"; then
+ echo "*FAILED*"
+ echo " Expected result ($expect) differs from actual result ($actual)"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
+ else
+ # parallel mode output are often of different ordering from serial
+ # output. If the sorted expected and actual files compare the same,
+ # it is safe to assume the actual output match the expected file.
+ expect_sorted=expect_sorted
+ actual_sorted=actual_sorted
+ sort $expect -o $expect_sorted
+ sort $actual -o $actual_sorted
+ # remove "EXIT CODE:" line from expect file. test for exit code
+ # is done by serial mode.
+ grep -v "EXIT CODE:" $expect_sorted > $expect_sorted.noexit
+ mv $expect_sorted.noexit $expect_sorted
+ if $CMP $expect_sorted $actual_sorted; then
+ echo " PASSED"
+ else
+ echo "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ if test yes = "$verbose"; then
+ echo "====Expected result ($expect_sorted) differs from actual result ($actual_sorted)"
+ $DIFF $expect_sorted $actual_sorted |sed 's/^/ /'
+ echo "====The actual output ($actual_sav)"
+ sed 's/^/ /' < $actual_sav
+ echo "====The actual stderr ($actual_err_sav)"
+ sed 's/^/ /' < $actual_err_sav
+ echo "====End of actual stderr ($actual_err_sav)"
+ echo ""
+ fi
+ fi
+ fi
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $actual $actual_err $actual_sav $actual_err_sav
+ rm -f $actual_sorted $expect_sorted
+ fi
+}
+
+##############################################################################
+### T H E T E S T S
+##############################################################################
+# prepare for test
+COPY_TESTFILES_TO_TESTDIR
+
+# Run the test
+TOOLTEST h5diff_ud.txt -v tudfilter.h5 tudfilter2.h5
+
+# print results
+if test $nerrors -ne 0 ; then
+ echo "$nerrors errors encountered"
+ exit_code=$EXIT_FAILURE
+else
+ echo "All Plugin API tests passed."
+ exit_code=$EXIT_SUCCESS
+fi
+
+# Clean up temporary files/directories
+CLEAN_TESTFILES_AND_TESTDIR
+
+# Clean up temporary files/directories and leave
+$RM $PLUGIN_LIBDIR
+
+exit $exit_code
diff --git a/tools/test/h5diff/h5diffgentest.c b/tools/test/h5diff/h5diffgentest.c
index 339ff6c..708f371 100644
--- a/tools/test/h5diff/h5diffgentest.c
+++ b/tools/test/h5diff/h5diffgentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
@@ -24,9 +22,9 @@
* size of that temporary buffer in bytes. For efficiency's sake, choose the
* largest value suitable for your machine (for testing use a small value).
*/
-/* Maximum size used in a call to malloc for a dataset
+/* Maximum size used in a call to malloc for a dataset
* NOTE: this value should stay in sync with the value defined in the tools
- * library file: h5tools_utils.h
+ * library file: h5tools_utils.h
*/
size_t H5TOOLS_MALLOCSIZE = (128 * 1024 * 1024);
@@ -50,6 +48,7 @@ size_t H5TOOLS_MALLOCSIZE = (128 * 1024 * 1024);
#define FILE6 "h5diff_attr2.h5"
#define FILE7 "h5diff_dset1.h5"
#define FILE8 "h5diff_dset2.h5"
+#define FILE8A "h5diff_dset3.h5"
#define FILE9 "h5diff_hyper1.h5"
#define FILE10 "h5diff_hyper2.h5"
#define FILE11 "h5diff_empty.h5"
@@ -95,6 +94,9 @@ size_t H5TOOLS_MALLOCSIZE = (128 * 1024 * 1024);
/* non-comparable dataset and attribute */
#define NON_COMPARBLES1 "non_comparables1.h5"
#define NON_COMPARBLES2 "non_comparables2.h5"
+/* string dataset and attribute */
+#define DIFF_STRINGS1 "diff_strings1.h5"
+#define DIFF_STRINGS2 "diff_strings2.h5"
#define UIMAX 4294967295u /*Maximum value for a variable of type unsigned int */
#define STR_SIZE 3
@@ -159,6 +161,7 @@ static void test_comps_array_vlen (const char *fname, const char *dset, const ch
static void test_comps_vlen_arry (const char *fname, const char *dset,const char *attr, int diff, int is_file_new);
static void test_data_nocomparables (const char *fname, int diff);
static void test_objs_nocomparables (const char *fname1, const char *fname2);
+static void test_objs_strings (const char *fname, const char *fname2);
/* called by test_attributes() and test_datasets() */
static void write_attr_in(hid_t loc_id,const char* dset_name,hid_t fid,int make_diffs);
@@ -195,6 +198,7 @@ int main(void)
/* generate 2 files, the second call creates a similar file with differences */
test_datasets(FILE7,0);
test_datasets(FILE8,1);
+ test_datasets(FILE8A,2);
/* generate 2 files, the second call creates a similar file with differences */
test_hyperslab(FILE9,0);
@@ -214,11 +218,11 @@ int main(void)
test_special_datasets(FILE19,0);
test_special_datasets(FILE20,1);
- /*
+ /*
* Generate 2 files: FILE21 with old format; FILE22 with new format
- * Create 2 datasets in each file:
- * One dataset: chunked layout, w/o filters, fixed dimension
- * One dataset: chunked layout, w/ filters, fixed dimension
+ * Create 2 datasets in each file:
+ * One dataset: chunked layout, w/o filters, fixed dimension
+ * One dataset: chunked layout, w/ filters, fixed dimension
*/
gen_dataset_idx(FILE21, 0);
gen_dataset_idx(FILE22, 1);
@@ -244,7 +248,7 @@ int main(void)
test_enums(ENUM_INVALID_VALUES);
/* -------------------------------------------------
- * Create test files with dataset and attribute with container types
+ * Create test files with dataset and attribute with container types
* (array, vlen) with multiple nested compound types.
*/
/* file1 */
@@ -259,8 +263,8 @@ int main(void)
test_comps_vlen_arry(COMPS_COMPLEX2,"dset4", "attr4", 5, 0);
/*-------------------------------------------------
- * Create test files with non-comparable dataset and attributes with
- * comparable datasets and attributes. All the comparables should display
+ * Create test files with non-comparable dataset and attributes with
+ * comparable datasets and attributes. All the comparables should display
* differences.
*/
test_data_nocomparables(NON_COMPARBLES1,0);
@@ -269,6 +273,9 @@ int main(void)
/* common objects (same name) with different object types. HDFFV-7644 */
test_objs_nocomparables(NON_COMPARBLES1, NON_COMPARBLES2);
+ /* string dataset and attribute. HDFFV-10028 */
+ test_objs_strings(DIFF_STRINGS1, DIFF_STRINGS2);
+
return 0;
}
@@ -1323,7 +1330,7 @@ int test_datasets(const char *file,
herr_t status;
int buf[2]={1,2};
- if(make_diffs)
+ if(make_diffs > 0)
memset(buf, 0, sizeof buf);
/* Create a file */
@@ -1394,9 +1401,8 @@ int test_special_datasets(const char *file,
/* Create a dataset with zero dimension size in one file but the other one
* has a dataset with a non-zero dimension size */
- if(make_diffs) {
+ if(make_diffs)
dims[1] = SPACE1_DIM2 + 4;
- }
sid = H5Screate_simple(SPACE1_RANK, dims, NULL);
did = H5Dcreate2(fid, "dset2", H5T_NATIVE_INT, sid, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
@@ -2120,33 +2126,33 @@ out:
* Function: gen_dataset_idx
*
* Purpose: Create a file with either the new or old format
-* Create two datasets in the file:
-* one dataset: fixed dimension, chunked layout, w/o filters
-* one dataset: fixed dimension, chunked layout, w/ filters
+* Create two datasets in the file:
+* one dataset: fixed dimension, chunked layout, w/o filters
+* one dataset: fixed dimension, chunked layout, w/ filters
*
*-------------------------------------------------------------------------
*/
static
int gen_dataset_idx(const char *file, int format)
{
- hid_t fid; /* file id */
- hid_t did, did2; /* dataset id */
- hid_t sid; /* space id */
- hid_t fapl; /* file access property id */
- hid_t dcpl; /* dataset creation property id */
- hsize_t dims[1] = {10}; /* dataset dimension */
- hsize_t c_dims[1] = {2}; /* chunk dimension */
- herr_t status; /* return status */
- int buf[10]; /* data buffer */
- int i; /* local index variable */
+ hid_t fid; /* file id */
+ hid_t did, did2; /* dataset id */
+ hid_t sid; /* space id */
+ hid_t fapl; /* file access property id */
+ hid_t dcpl; /* dataset creation property id */
+ hsize_t dims[1] = {10}; /* dataset dimension */
+ hsize_t c_dims[1] = {2}; /* chunk dimension */
+ herr_t status; /* return status */
+ int buf[10]; /* data buffer */
+ int i; /* local index variable */
/* Get a copy of the file aaccess property */
fapl = H5Pcreate(H5P_FILE_ACCESS);
/* Set the "use the latest format" bounds for creating objects in the file */
if(format) {
- status = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
- assert(status >= 0);
+ status = H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST);
+ assert(status >= 0);
}
/* Create a file */
@@ -2155,7 +2161,7 @@ int gen_dataset_idx(const char *file, int format)
/* Create data */
for(i = 0; i < 10; i++)
- buf[i] = i;
+ buf[i] = i;
/* Set chunk */
dcpl = H5Pcreate(H5P_DATASET_CREATE);
@@ -2165,7 +2171,7 @@ int gen_dataset_idx(const char *file, int format)
/* Create a 1D dataset */
sid = H5Screate_simple(1, dims, NULL);
did = H5Dcreate2(fid, "dset", H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
-
+
/* Write to the dataset */
status = H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
assert(status >= 0);
@@ -4422,8 +4428,8 @@ out:
/*-------------------------------------------------------------------------
*
-* Purpose:
-* Create test files with dataset and attribute with container types
+* Purpose:
+* Create test files with dataset and attribute with container types
* (array, vlen) with multiple nested compound types.
*
* Function: test_comps_array()
@@ -4447,7 +4453,7 @@ out:
static void test_comps_array (const char *fname, const char *dset, const char *attr,int diff, int is_file_new)
{
/* sub compound 2 */
- typedef struct {
+ typedef struct {
int i2;
float f2;
} cmpd2_t;
@@ -4469,14 +4475,14 @@ static void test_comps_array (const char *fname, const char *dset, const char *a
hid_t tid_attr;
hsize_t sdims_dset[] = {SDIM_DSET};
hsize_t sdims_cmpd_arry[] = {SDIM_CMPD_ARRAY};
- int i,j;
+ int i,j;
herr_t ret; /* Generic return value */
/* Initialize array data to write */
for(i=0; i < SDIM_DSET; i++)
{
wdata[i].i1 = i;
- for(j=0; j < SDIM_CMPD_ARRAY; j++)
+ for(j=0; j < SDIM_CMPD_ARRAY; j++)
{
wdata[i].cmpd2[j].i2 = i * 10 + diff;
wdata[i].cmpd2[j].f2 = (float)i * 10.5F + (float)diff;
@@ -4520,7 +4526,7 @@ static void test_comps_array (const char *fname, const char *dset, const char *a
/* -------------------
- * Create a dataset
+ * Create a dataset
*/
/* Create dataspace for datasets */
sid_dset = H5Screate_simple(1, sdims_dset, NULL);
@@ -4562,18 +4568,18 @@ static void test_comps_array (const char *fname, const char *dset, const char *a
static void test_comps_vlen (const char * fname, const char *dset, const char *attr, int diff, int is_file_new)
{
/* sub compound 2 */
- typedef struct {
+ typedef struct {
int i2;
float f2;
} cmpd2_t;
/* top compound 1 */
- typedef struct {
+ typedef struct {
int i1;
hvl_t vl; /* VL information for compound2 */
} cmpd1_t;
- cmpd1_t wdata[SDIM_DSET]; /* Dataset for compound1 */
+ cmpd1_t wdata[SDIM_DSET]; /* Dataset for compound1 */
hid_t fid; /* HDF5 File ID */
hid_t did_dset; /* dataset ID */
@@ -4629,7 +4635,7 @@ static void test_comps_vlen (const char * fname, const char *dset, const char *a
assert(ret >= 0);
/* -------------------------------
- * Create dataset with compound1
+ * Create dataset with compound1
*/
/* Create dataspace for dataset */
sid_dset = H5Screate_simple(1, sdims_dset, NULL);
@@ -4674,11 +4680,11 @@ static void test_comps_vlen (const char * fname, const char *dset, const char *a
static void test_comps_array_vlen (const char * fname, const char *dset,const char *attr, int diff, int is_file_new)
{
- typedef struct {
+ typedef struct {
int i3;
float f3;
} cmpd3_t;
-
+
typedef struct { /* Typedef for compound datatype */
int i2;
hvl_t vl; /* VL information to write */
@@ -4817,19 +4823,19 @@ static void test_comps_array_vlen (const char * fname, const char *dset,const ch
static void test_comps_vlen_arry (const char * fname, const char *dset, const char *attr, int diff, int is_file_new)
{
/* sub compound 3 */
- typedef struct {
+ typedef struct {
int i3;
float f3;
} cmpd3_t;
/* sub compound 2 */
- typedef struct {
+ typedef struct {
int i2;
cmpd3_t cmpd3[SDIM_CMPD_ARRAY];
} cmpd2_t;
/* top compound 1 */
- typedef struct {
+ typedef struct {
int i1;
hvl_t vl; /* VL information for compound2 */
} cmpd1_t;
@@ -4910,7 +4916,7 @@ static void test_comps_vlen_arry (const char * fname, const char *dset, const ch
assert(ret >= 0);
/* -------------------------------
- * Create dataset with compound1
+ * Create dataset with compound1
*/
/* Create dataspace for dataset */
sid_dset = H5Screate_simple(1, sdims_dset, NULL);
@@ -4960,9 +4966,9 @@ static void test_comps_vlen_arry (const char * fname, const char *dset, const ch
/*-------------------------------------------------------------------------
* Function: test_data_nocomparables
*
-* Purpose:
-* Create test files with non-comparable dataset and attributes with
-* comparable datasets and attributes. All the comparables should display
+* Purpose:
+* Create test files with non-comparable dataset and attributes with
+* comparable datasets and attributes. All the comparables should display
* differences.
*
*-------------------------------------------------------------------------*/
@@ -5020,8 +5026,8 @@ static void test_data_nocomparables (const char * fname, int make_diffs)
dset_data_ptr2=(int*)&data2;
attr_data_ptr1=(int*)&data2;
- /* -----------
- * group2
+ /* -----------
+ * group2
*/
dset_data_ptr3=(int*)&data2;
/* dset1/attr1 */
@@ -5143,10 +5149,10 @@ static void test_data_nocomparables (const char * fname, int make_diffs)
goto out;
}
-
+
out:
-
+
/*-----------------------------------------------------------------------
* Close IDs
*-----------------------------------------------------------------------*/
@@ -5171,9 +5177,9 @@ out:
/*-------------------------------------------------------------------------
* Function: test_objs_nocomparables
*
-* Purpose:
+* Purpose:
* Create test files with common objects (same name) but different object
-* types.
+* types.
* h5diff should show non-comparable output from these common objects.
*-------------------------------------------------------------------------*/
static void test_objs_nocomparables(const char *fname1, const char *fname2)
@@ -5317,6 +5323,188 @@ out:
}
+static hid_t mkstr(int size, H5T_str_t pad) {
+ hid_t type;
+
+ if((type=H5Tcopy(H5T_C_S1)) < 0) return -1;
+ if(H5Tset_size(type, (size_t)size) < 0) return -1;
+ if(H5Tset_strpad(type, pad) < 0) return -1;
+
+ return type;
+}
+
+/*-------------------------------------------------------------------------
+* Function: test_objs_strings
+*
+* Purpose:
+* Create test files with common objects (same name) but different string
+* types.
+* h5diff should show differences output from these common objects.
+*-------------------------------------------------------------------------*/
+static void test_objs_strings(const char *fname1, const char *fname2)
+{
+ herr_t status = SUCCEED;
+ hid_t fid1=0;
+ hid_t fid2=0;
+ hid_t dataset=0;
+ hid_t space=0;
+ hid_t f_type=0;
+ hid_t m_type=0;
+ hsize_t dims1[] = {3, 4};
+ char string1A[12][3] = {"s1","s2","s3","s4","s5","s6","s","s","s9",
+ "s0","s1","s2"};
+ char string1B[12][3] = {"s1","s2","s3","s4","s","s","s7","s8","s9",
+ "s0","s1","s2"};
+
+ hsize_t dims2[]={20};
+ char string2A[20][10] = {"ab cd ef1", "ab cd ef2", "ab cd ef3", "ab cd ef4",
+ "ab cd ef5", "ab cd ef6", "ab cd ef7", "ab cd ef8",
+ "ab cd 9", "ab cd 0", "ab cd 1", "ab cd 2",
+ "ab cd ef3", "ab cd ef4", "ab cd ef5", "ab cd ef6",
+ "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0"};
+ char string2B[20][10] = {"ab cd ef1", "ab cd ef2", "ab cd ef3", "ab cd ef4",
+ "ab cd ef5", "ab cd ef6", "ab cd ef7", "ab cd ef8",
+ "ab cd ef9", "ab cd ef0", "ab cd ef1", "ab cd ef2",
+ "ab cd 3", "ab cd 4", "ab cd 5", "ab cd 6",
+ "ab cd ef7", "ab cd ef8", "ab cd ef9", "ab cd ef0"};
+
+ hsize_t dims3[] = {27};
+ char string3A[27][6] = {"abcd0", "abcd1", "abcd2", "abcd3",
+ "abcd4", "abcd5", "abcd6", "abcd7",
+ "abcd8", "abcd9", "abcd0", "abcd1",
+ "abd2", "abc3", "bcd4", "acd5",
+ "abcd6", "abcd7", "abcd8", "abcd9",
+ "abcd0", "abcd1", "abcd2", "abcd3",
+ "abc4", "abc5", "abc6"};
+ char string3B[27][6] = {"abcd0", "abcd1", "abcd2", "abcd3",
+ "abcd4", "abcd5", "abcd6", "abcd7",
+ "abcd8", "abcd9", "abcd0", "abcd1",
+ "abcd2", "abcd3", "abcd4", "abcd5",
+ "abd6", "abc7", "bcd8", "acd9",
+ "abcd0", "abcd1", "abcd2", "abcd3",
+ "abd4", "abd5", "abd6"};
+
+ hsize_t dims4[] = {3};
+ char string4A[3][21] = { "s1234567890123456789", "s1234567890123456789",
+ "s12345678901234567"};
+ char string4B[3][21] = { "s1234567890123456789", "s12345678901234567",
+ "s1234567890123456789"};
+
+ /*-----------------------------------------------------------------------
+ * Create file(s)
+ *------------------------------------------------------------------------*/
+ /* file1 */
+ fid1 = H5Fcreate (fname1, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ if (fid1 < 0)
+ {
+ fprintf(stderr, "Error: %s> H5Fcreate failed.\n", fname1);
+ status = FAIL;
+ goto out;
+ }
+
+ /* file2 */
+ fid2 = H5Fcreate (fname2, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ if (fid2 < 0)
+ {
+ fprintf(stderr, "Error: %s> H5Fcreate failed.\n", fname2);
+ status = FAIL;
+ goto out;
+ }
+
+ /* string 1 : nullterm string */
+ space = H5Screate_simple(2, dims1, NULL);
+ f_type = mkstr(5, H5T_STR_NULLTERM);
+ m_type = mkstr(3, H5T_STR_NULLTERM);
+ dataset = H5Dcreate2(fid1, "/string1", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string1A);
+ H5Dclose(dataset);
+ dataset = H5Dcreate2(fid2, "/string1", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string1B);
+ H5Tclose(m_type);
+ H5Tclose(f_type);
+ H5Sclose(space);
+ H5Dclose(dataset);
+
+ /* string 2 : space pad string */
+ space = H5Screate_simple(1, dims2, NULL);
+ f_type = mkstr(11, H5T_STR_SPACEPAD);
+ m_type = mkstr(10, H5T_STR_NULLTERM);
+ dataset = H5Dcreate2(fid1, "/string2", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string2A);
+ H5Dclose(dataset);
+ dataset = H5Dcreate2(fid2, "/string2", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string2B);
+ H5Tclose(m_type);
+ H5Tclose(f_type);
+ H5Sclose(space);
+ H5Dclose(dataset);
+
+ /* string 3 : null pad string */
+ space = H5Screate_simple(1, dims3, NULL);
+ f_type = mkstr(8, H5T_STR_NULLPAD);
+ m_type = mkstr(6, H5T_STR_NULLTERM);
+ dataset = H5Dcreate2(fid1, "/string3", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string3A);
+ H5Dclose(dataset);
+ dataset = H5Dcreate2(fid2, "/string3", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string3B);
+ H5Tclose(m_type);
+ H5Tclose(f_type);
+ H5Sclose(space);
+ H5Dclose(dataset);
+
+ /* string 4 : space pad long string */
+ space = H5Screate_simple(1, dims4, NULL);
+ f_type = mkstr(168, H5T_STR_SPACEPAD);
+ m_type = mkstr(21, H5T_STR_NULLTERM);
+ dataset = H5Dcreate2(fid1, "/string4", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string4A);
+ H5Dclose(dataset);
+ dataset = H5Dcreate2(fid2, "/string4", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string4B);
+ H5Tclose(m_type);
+ H5Tclose(f_type);
+ H5Sclose(space);
+ H5Dclose(dataset);
+
+ /* string 5 : early term long string */
+ string4A[0][10] = 0;
+ string4A[0][11] = 0;
+ string4B[0][10] = 0;
+
+ string4A[1][10] = 0;
+ string4A[1][11] = 'Z';
+ string4B[1][10] = 0;
+ string4B[1][11] = 'x';
+
+ string4A[2][10] = 0;
+ string4B[2][10] = 0;
+ string4B[2][11] = 'a';
+ string4B[2][12] = 'B';
+ string4B[2][13] = 'c';
+ space = H5Screate_simple(1, dims4, NULL);
+ f_type = mkstr(168, H5T_STR_NULLTERM);
+ m_type = mkstr(21, H5T_STR_NULLTERM);
+ dataset = H5Dcreate2(fid1, "/string5", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string4A);
+ H5Dclose(dataset);
+ dataset = H5Dcreate2(fid2, "/string5", f_type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT);
+ H5Dwrite(dataset, m_type, H5S_ALL, H5S_ALL, H5P_DEFAULT, string4B);
+ H5Tclose(m_type);
+ H5Tclose(f_type);
+ H5Sclose(space);
+ H5Dclose(dataset);
+
+out:
+ /*-----------------------------------------------------------------------
+ * Close IDs
+ *-----------------------------------------------------------------------*/
+ if(fid1)
+ H5Fclose(fid1);
+ if(fid2)
+ H5Fclose(fid2);
+}
+
/*-------------------------------------------------------------------------
* Function: write_attr_in
*
@@ -6454,6 +6642,9 @@ void write_dset_in(hid_t loc_id,
int buf73[4][3][2]; /* integer */
float buf83[4][3][2]; /* float */
+ if(make_diffs == 2) {
+ dimarray[0] = 4;
+ }
/*-------------------------------------------------------------------------
* H5S_SCALAR
@@ -6462,11 +6653,8 @@ void write_dset_in(hid_t loc_id,
- if ( make_diffs )
- {
-
+ if(make_diffs)
scalar_data = 1;
- }
/* create a space */
sid = H5Screate(H5S_SCALAR);
@@ -7049,7 +7237,7 @@ void write_dset_in(hid_t loc_id,
n=1;
for (i = 0; i < 24; i++) {
- for (j = 0; j < (int)dimarray[0]; j++) {
+ for (j = 0; j < 3; j++) {
if (make_diffs) buf63[i][j]=0;
else buf63[i][j]=n++;
}
diff --git a/tools/test/h5diff/testfiles/diff_strings1.h5 b/tools/test/h5diff/testfiles/diff_strings1.h5
new file mode 100644
index 0000000..0f8b380
--- /dev/null
+++ b/tools/test/h5diff/testfiles/diff_strings1.h5
Binary files differ
diff --git a/tools/test/h5diff/testfiles/diff_strings2.h5 b/tools/test/h5diff/testfiles/diff_strings2.h5
new file mode 100644
index 0000000..e8520ae
--- /dev/null
+++ b/tools/test/h5diff/testfiles/diff_strings2.h5
Binary files differ
diff --git a/tools/test/h5diff/testfiles/h5diff_10.txt b/tools/test/h5diff/testfiles/h5diff_10.txt
index a699f00..0a0ee22 100644
--- a/tools/test/h5diff/testfiles/h5diff_10.txt
+++ b/tools/test/h5diff/testfiles/h5diff_10.txt
@@ -1,4 +1,4 @@
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -59,19 +59,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -106,15 +109,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_58.txt b/tools/test/h5diff/testfiles/h5diff_58.txt
index 768dd97..d27a0c9 100644
--- a/tools/test/h5diff/testfiles/h5diff_58.txt
+++ b/tools/test/h5diff/testfiles/h5diff_58.txt
@@ -1,5 +1,5 @@
dataset: </refreg> and </refreg>
-Referenced dataset 10720 10720
+Referenced dataset 10784 10784
------------------------------------------------------------
Region blocks
block #0 (2,2)-(7,7) (0,0)-(2,2)
diff --git a/tools/test/h5diff/testfiles/h5diff_60.txt b/tools/test/h5diff/testfiles/h5diff_60.txt
new file mode 100644
index 0000000..938f279
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_60.txt
@@ -0,0 +1,10 @@
+dataset: </string1> and </string1>
+size: [3x4] [3x4]
+position string1 string1 difference
+------------------------------------------------------------
+[ 1 0 ] 5 \000
+[ 1 1 ] 6 \000
+[ 1 2 ] \000 7
+[ 1 3 ] \000 8
+4 differences found
+EXIT CODE: 1
diff --git a/tools/test/h5diff/testfiles/h5diff_600.txt b/tools/test/h5diff/testfiles/h5diff_600.txt
index 3c3ad9f..4362711 100644
--- a/tools/test/h5diff/testfiles/h5diff_600.txt
+++ b/tools/test/h5diff/testfiles/h5diff_600.txt
@@ -1,4 +1,4 @@
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -59,19 +59,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -106,15 +109,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_603.txt b/tools/test/h5diff/testfiles/h5diff_603.txt
index 087764a..9ab3204 100644
--- a/tools/test/h5diff/testfiles/h5diff_603.txt
+++ b/tools/test/h5diff/testfiles/h5diff_603.txt
@@ -1,5 +1,5 @@
<-d -4> is not a valid option
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -60,19 +60,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -107,15 +110,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_606.txt b/tools/test/h5diff/testfiles/h5diff_606.txt
index c31a67e..afe0e72 100644
--- a/tools/test/h5diff/testfiles/h5diff_606.txt
+++ b/tools/test/h5diff/testfiles/h5diff_606.txt
@@ -1,5 +1,5 @@
<-d 0x1> is not a valid option
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -60,19 +60,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -107,15 +110,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_61.txt b/tools/test/h5diff/testfiles/h5diff_61.txt
new file mode 100644
index 0000000..1f238b1
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_61.txt
@@ -0,0 +1,30 @@
+dataset: </string2> and </string2>
+size: [20] [20]
+position string2 string2 difference
+------------------------------------------------------------
+[ 8 ] 9 e
+[ 8 ] f
+[ 8 ] 9
+[ 9 ] 0 e
+[ 9 ] f
+[ 9 ] 0
+[ 10 ] 1 e
+[ 10 ] f
+[ 10 ] 1
+[ 11 ] 2 e
+[ 11 ] f
+[ 11 ] 2
+[ 12 ] e 3
+[ 12 ] f
+[ 12 ] 3
+[ 13 ] e 4
+[ 13 ] f
+[ 13 ] 4
+[ 14 ] e 5
+[ 14 ] f
+[ 14 ] 5
+[ 15 ] e 6
+[ 15 ] f
+[ 15 ] 6
+24 differences found
+EXIT CODE: 1
diff --git a/tools/test/h5diff/testfiles/h5diff_612.txt b/tools/test/h5diff/testfiles/h5diff_612.txt
index 05318bd..97a1747 100644
--- a/tools/test/h5diff/testfiles/h5diff_612.txt
+++ b/tools/test/h5diff/testfiles/h5diff_612.txt
@@ -1,5 +1,5 @@
<-p -4> is not a valid option
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -60,19 +60,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -107,15 +110,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_615.txt b/tools/test/h5diff/testfiles/h5diff_615.txt
index fd756b3..78770f0 100644
--- a/tools/test/h5diff/testfiles/h5diff_615.txt
+++ b/tools/test/h5diff/testfiles/h5diff_615.txt
@@ -1,5 +1,5 @@
<-p 0x1> is not a valid option
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -60,19 +60,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -107,15 +110,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_62.txt b/tools/test/h5diff/testfiles/h5diff_62.txt
new file mode 100644
index 0000000..0cc0947
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_62.txt
@@ -0,0 +1,37 @@
+dataset: </string3> and </string3>
+size: [27] [27]
+position string3 string3 difference
+------------------------------------------------------------
+[ 12 ] d c
+[ 12 ] 2 d
+[ 12 ] \000 2
+[ 13 ] 3 d
+[ 13 ] \000 3
+[ 14 ] b a
+[ 14 ] c b
+[ 14 ] d c
+[ 14 ] 4 d
+[ 14 ] \000 4
+[ 15 ] c b
+[ 15 ] d c
+[ 15 ] 5 d
+[ 15 ] \000 5
+[ 16 ] c d
+[ 16 ] d 6
+[ 16 ] 6 \000
+[ 17 ] d 7
+[ 17 ] 7 \000
+[ 18 ] a b
+[ 18 ] b c
+[ 18 ] c d
+[ 18 ] d 8
+[ 18 ] 8 \000
+[ 19 ] b c
+[ 19 ] c d
+[ 19 ] d 9
+[ 19 ] 9 \000
+[ 24 ] c d
+[ 25 ] c d
+[ 26 ] c d
+31 differences found
+EXIT CODE: 1
diff --git a/tools/test/h5diff/testfiles/h5diff_621.txt b/tools/test/h5diff/testfiles/h5diff_621.txt
index fd8c680..b2a5881 100644
--- a/tools/test/h5diff/testfiles/h5diff_621.txt
+++ b/tools/test/h5diff/testfiles/h5diff_621.txt
@@ -1,5 +1,5 @@
<-n -4> is not a valid option
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -60,19 +60,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -107,15 +110,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_622.txt b/tools/test/h5diff/testfiles/h5diff_622.txt
index f0d38af..66a3025 100644
--- a/tools/test/h5diff/testfiles/h5diff_622.txt
+++ b/tools/test/h5diff/testfiles/h5diff_622.txt
@@ -1,5 +1,5 @@
<-n 0> is not a valid option
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -60,19 +60,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -107,15 +110,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_623.txt b/tools/test/h5diff/testfiles/h5diff_623.txt
index 4ab66a5..f9258d1 100644
--- a/tools/test/h5diff/testfiles/h5diff_623.txt
+++ b/tools/test/h5diff/testfiles/h5diff_623.txt
@@ -1,5 +1,5 @@
<-n u> is not a valid option
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -60,19 +60,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -107,15 +110,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_624.txt b/tools/test/h5diff/testfiles/h5diff_624.txt
index f5e7ee3..57a4ddd 100644
--- a/tools/test/h5diff/testfiles/h5diff_624.txt
+++ b/tools/test/h5diff/testfiles/h5diff_624.txt
@@ -1,5 +1,5 @@
<-n 0x1> is not a valid option
-usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
+usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
file1 File name of the first HDF5 file
file2 File name of the second HDF5 file
[obj1] Name of an HDF5 object, in absolute path
@@ -60,19 +60,22 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
-n C, --count=C
Print differences up to C. C must be a positive integer.
-d D, --delta=D
- Print difference if (|a-b| > D). D must be a positive number.
+ Print difference if (|a-b| > D). D must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-p' or '--use-system-epsilon'.
-p R, --relative=R
- Print difference if (|(a-b)/b| > R). R must be a positive number.
+ Print difference if (|(a-b)/b| > R). R must be a positive number. Where a
+ is the data point value in file1 and b is the data point value in file2.
Can not use with '-d' or '--use-system-epsilon'.
--use-system-epsilon
- Print difference if (|a-b| > EPSILON), EPSILON is system defined value.
+ Print difference if (|a-b| > EPSILON), EPSILON is system defined value. Where a
+ is the data point value in file1 and b is the data point value in file2.
If the system epsilon is not defined,one of the following predefined
values will be used:
FLT_EPSILON = 1.19209E-07 for floating-point type
DBL_EPSILON = 2.22045E-16 for double precision type
Can not use with '-p' or '-d'.
- --exclude-path "path"
+ --exclude-path "path"
Exclude the specified path to an object when comparing files or groups.
If a group is excluded, all member objects will also be excluded.
The specified path is excluded wherever it occurs.
@@ -107,15 +110,15 @@ usage: h5diff [OPTIONS] file1 file2 [obj1[ obj2]]
common objects.
Object comparison:
- 1) Groups
+ 1) Groups
First compares the names of member objects (relative path, from the
specified group) and generates a report of objects that appear in only
one group or in both groups. Common objects are then compared recursively.
- 2) Datasets
+ 2) Datasets
Array rank and dimensions, datatypes, and data values are compared.
- 3) Datatypes
+ 3) Datatypes
The comparison is based on the return value of H5Tequal.
- 4) Symbolic links
+ 4) Symbolic links
The paths to the target objects are compared.
(The option --follow-symlinks overrides the default behavior when
symbolic links are compared.).
diff --git a/tools/test/h5diff/testfiles/h5diff_63.txt b/tools/test/h5diff/testfiles/h5diff_63.txt
new file mode 100644
index 0000000..043da16
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_63.txt
@@ -0,0 +1,10 @@
+dataset: </string4> and </string4>
+size: [3] [3]
+position string4 string4 difference
+------------------------------------------------------------
+[ 1 ] 8
+[ 1 ] 9
+[ 2 ] 8
+[ 2 ] 9
+4 differences found
+EXIT CODE: 1
diff --git a/tools/test/h5diff/testfiles/h5diff_80.txt b/tools/test/h5diff/testfiles/h5diff_80.txt
index 5957d72..836e073 100644
--- a/tools/test/h5diff/testfiles/h5diff_80.txt
+++ b/tools/test/h5diff/testfiles/h5diff_80.txt
@@ -865,7 +865,7 @@ position vlen3D vlen3D difference
[ 3 2 1 ] 59 0 59
59 differences found
dataset: </refreg> and </refreg>
-Referenced dataset 10720 10720
+Referenced dataset 10784 10784
------------------------------------------------------------
Region blocks
block #0 (2,2)-(7,7) (0,0)-(2,2)
diff --git a/tools/test/h5diff/testfiles/h5diff_800.txt b/tools/test/h5diff/testfiles/h5diff_800.txt
new file mode 100644
index 0000000..e43e7b4
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_800.txt
@@ -0,0 +1,12 @@
+dataset: </g1/array> and </g1/array>
+size: [2] [2]
+position array array difference
+------------------------------------------------------------
+[ 0 ] 1 0 1
+[ 0 ] 2 0 2
+[ 0 ] 3 0 3
+[ 1 ] 4 0 4
+[ 1 ] 5 0 5
+[ 1 ] 6 0 6
+6 differences found
+EXIT CODE: 1
diff --git a/tools/test/h5diff/testfiles/h5diff_801.txt b/tools/test/h5diff/testfiles/h5diff_801.txt
new file mode 100644
index 0000000..043e7e3
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_801.txt
@@ -0,0 +1,13 @@
+dataset: </g1/array> and </g1/array>
+Warning: different storage datatype
+size: [2] [2]
+position array array difference
+------------------------------------------------------------
+[ 0 ] 1 0 1
+[ 0 ] 2 0 2
+[ 0 ] 3 0 3
+[ 1 ] 4 0 4
+[ 1 ] 5 0 5
+[ 1 ] 6 0 6
+6 differences found
+EXIT CODE: 1
diff --git a/tools/test/h5diff/testfiles/h5diff_dset1.h5 b/tools/test/h5diff/testfiles/h5diff_dset1.h5
index 123e141..fefd86a 100644
--- a/tools/test/h5diff/testfiles/h5diff_dset1.h5
+++ b/tools/test/h5diff/testfiles/h5diff_dset1.h5
Binary files differ
diff --git a/tools/test/h5diff/testfiles/h5diff_dset2.h5 b/tools/test/h5diff/testfiles/h5diff_dset2.h5
index 3ae6993..598e481 100644
--- a/tools/test/h5diff/testfiles/h5diff_dset2.h5
+++ b/tools/test/h5diff/testfiles/h5diff_dset2.h5
Binary files differ
diff --git a/tools/test/h5diff/testfiles/h5diff_dset3.h5 b/tools/test/h5diff/testfiles/h5diff_dset3.h5
new file mode 100644
index 0000000..893c135
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_dset3.h5
Binary files differ
diff --git a/tools/test/h5diff/testfiles/h5diff_idx.txt b/tools/test/h5diff/testfiles/h5diff_ud.txt
index 754d3ea..ddda3ff 100644
--- a/tools/test/h5diff/testfiles/h5diff_idx.txt
+++ b/tools/test/h5diff/testfiles/h5diff_ud.txt
@@ -2,13 +2,10 @@
file1 file2
---------------------------------------
x x /
- x x /dset
- x x /dset_filter
+ x x /dynlibud
group : </> and </>
0 differences found
-dataset: </dset> and </dset>
-0 differences found
-dataset: </dset_filter> and </dset_filter>
+dataset: </dynlibud> and </dynlibud>
0 differences found
EXIT CODE: 0
diff --git a/tools/test/h5diff/testfiles/h5diff_udfail.txt b/tools/test/h5diff/testfiles/h5diff_udfail.txt
new file mode 100644
index 0000000..c154c6b
--- /dev/null
+++ b/tools/test/h5diff/testfiles/h5diff_udfail.txt
@@ -0,0 +1,12 @@
+
+file1 file2
+---------------------------------------
+ x x /
+ x x /dynlibud
+
+group : </> and </>
+0 differences found
+dataset: </dynlibud> and </dynlibud>
+0 differences found
+warning: dataset </dynlibud> cannot be read, user defined filter is not available
+EXIT CODE: 2
diff --git a/tools/test/h5diff/testfiles/tudfilter.h5 b/tools/test/h5diff/testfiles/tudfilter.h5
new file mode 100644
index 0000000..081b000
--- /dev/null
+++ b/tools/test/h5diff/testfiles/tudfilter.h5
Binary files differ
diff --git a/tools/test/h5diff/testfiles/tudfilter2.h5 b/tools/test/h5diff/testfiles/tudfilter2.h5
new file mode 100644
index 0000000..081b000
--- /dev/null
+++ b/tools/test/h5diff/testfiles/tudfilter2.h5
Binary files differ
diff --git a/tools/test/h5diff/testh5diff.sh.in b/tools/test/h5diff/testh5diff.sh.in
index da883ac..d769c23 100644
--- a/tools/test/h5diff/testh5diff.sh.in
+++ b/tools/test/h5diff/testh5diff.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5diff tool
#
@@ -124,6 +122,11 @@ $SRC_H5DIFF_TESTFILES/tmptest.he5
$SRC_H5DIFF_TESTFILES/tmptest2.he5
$SRC_H5DIFF_TESTFILES/tmpSingleSiteBethe.reference.h5
$SRC_H5DIFF_TESTFILES/tmpSingleSiteBethe.output.h5
+$SRC_H5DIFF_TESTFILES/diff_strings1.h5
+$SRC_H5DIFF_TESTFILES/diff_strings2.h5
+"
+
+LIST_HDF5_VDS_TEST_FILES="
$SRC_TOOLS_TESTFILES/vds/1_a.h5
$SRC_TOOLS_TESTFILES/vds/1_b.h5
$SRC_TOOLS_TESTFILES/vds/1_c.h5
@@ -279,6 +282,10 @@ $SRC_H5DIFF_TESTFILES/h5diff_517.txt
$SRC_H5DIFF_TESTFILES/h5diff_518.txt
$SRC_H5DIFF_TESTFILES/h5diff_530.txt
$SRC_H5DIFF_TESTFILES/h5diff_540.txt
+$SRC_H5DIFF_TESTFILES/h5diff_60.txt
+$SRC_H5DIFF_TESTFILES/h5diff_61.txt
+$SRC_H5DIFF_TESTFILES/h5diff_62.txt
+$SRC_H5DIFF_TESTFILES/h5diff_63.txt
$SRC_H5DIFF_TESTFILES/h5diff_600.txt
$SRC_H5DIFF_TESTFILES/h5diff_601.txt
$SRC_H5DIFF_TESTFILES/h5diff_603.txt
@@ -339,7 +346,7 @@ $SRC_H5DIFF_TESTFILES/h5diff_v3.txt
#
# copy test files and expected output files from source dirs to test dir
#
-COPY_TESTFILES="$LIST_HDF5_TEST_FILES $LIST_OTHER_TEST_FILES $LIST_HDF5_TEST_FILES_XML $LIST_OTHER_TEST_FILES_XML"
+COPY_TESTFILES="$LIST_HDF5_TEST_FILES $LIST_HDF5_VDS_TEST_FILES $LIST_OTHER_TEST_FILES $LIST_HDF5_TEST_FILES_XML $LIST_OTHER_TEST_FILES_XML"
COPY_TESTFILES_TO_TESTDIR()
{
@@ -379,6 +386,21 @@ CLEAN_TESTFILES_AND_TESTDIR()
INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
$RM $TESTDIR
+ else
+ # files in $LIST_HDF5_VDS_TEST_FILES are copied in from a different
+ # directory, so when srcdir is the same as destdir and $TESTDIR is
+ # not deleted, the copy n $TESTDIR of each file from
+ # $LIST_HDF5_VDS_TEST_FILES will need to be deleted.
+ for tstfile in $LIST_HDF5_VDS_TEST_FILES
+ do
+ # ignore '#' comment
+ echo $tstfile | tr -d ' ' | grep '^#' > /dev/null
+ RET=$?
+ if [ $RET -eq 1 ]; then
+ fname=`basename $tstfile`
+ rm $TESTDIR/$fname
+ fi
+ done
fi
}
@@ -387,24 +409,24 @@ CLEAN_TESTFILES_AND_TESTDIR()
# -h print help page
while [ $# -gt 0 ]; do
case "$1" in
- -p) # reset the tool name and bin to run ph5diff tests
- TESTNAME=ph5diff
- H5DIFF=../../src/h5diff/ph5diff # The tool name
- H5DIFF_BIN=`pwd`/$H5DIFF # The path of the tool binary
- pmode=yes
- shift
- ;;
+ -p) # reset the tool name and bin to run ph5diff tests
+ TESTNAME=ph5diff
+ H5DIFF=../../src/h5diff/ph5diff # The tool name
+ H5DIFF_BIN=`pwd`/$H5DIFF # The path of the tool binary
+ pmode=yes
+ shift
+ ;;
-h) # print help page
- echo "$0 [-p] [-h]"
- echo " -p run ph5diff tests"
- echo " -h print help page"
- shift
- exit 0
- ;;
+ echo "$0 [-p] [-h]"
+ echo " -p run ph5diff tests"
+ echo " -h print help page"
+ shift
+ exit 0
+ ;;
*) # unknown option
echo "$0: Unknown option ($1)"
- exit 1
- ;;
+ exit 1
+ ;;
esac
done
@@ -455,11 +477,11 @@ TOOLTEST() {
# Run test.
TESTING $H5DIFF $@
(
- #echo "#############################"
- #echo "Expected output for '$H5DIFF $@'"
- #echo "#############################"
- cd $TESTDIR
- eval $RUNCMD $H5DIFF_BIN "$@"
+ #echo "#############################"
+ #echo "Expected output for '$H5DIFF $@'"
+ #echo "#############################"
+ cd $TESTDIR
+ eval $RUNCMD $H5DIFF_BIN "$@"
) >$actual 2>$actual_err
EXIT_CODE=$?
# save actual and actual_err in case they are needed later.
@@ -480,6 +502,8 @@ TOOLTEST() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
elif test $h5haveexitcode = 'yes' -a -z "$pmode"; then
@@ -499,22 +523,22 @@ TOOLTEST() {
# is done by serial mode.
grep -v "EXIT CODE:" $expect_sorted > $expect_sorted.noexit
mv $expect_sorted.noexit $expect_sorted
- if $CMP $expect_sorted $actual_sorted; then
- echo " PASSED"
- else
- echo "*FAILED*"
- nerrors="`expr $nerrors + 1`"
- if test yes = "$verbose"; then
- echo "====Expected result ($expect_sorted) differs from actual result ($actual_sorted)"
- $DIFF $expect_sorted $actual_sorted |sed 's/^/ /'
- echo "====The actual output ($actual_sav)"
- sed 's/^/ /' < $actual_sav
- echo "====The actual stderr ($actual_err_sav)"
- sed 's/^/ /' < $actual_err_sav
- echo "====End of actual stderr ($actual_err_sav)"
- echo ""
- fi
- fi
+ if $CMP $expect_sorted $actual_sorted; then
+ echo " PASSED"
+ else
+ echo "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ if test yes = "$verbose"; then
+ echo "====Expected result ($expect_sorted) differs from actual result ($actual_sorted)"
+ $DIFF $expect_sorted $actual_sorted |sed 's/^/ /'
+ echo "====The actual output ($actual_sav)"
+ sed 's/^/ /' < $actual_sav
+ echo "====The actual stderr ($actual_err_sav)"
+ sed 's/^/ /' < $actual_err_sav
+ echo "====End of actual stderr ($actual_err_sav)"
+ echo ""
+ fi
+ fi
fi
# Clean up output file
@@ -635,8 +659,6 @@ TOOLTEST h5diff_28.txt -v h5diff_types.h5 h5diff_types.h5 l1 l2
TOOLTEST h5diff_30.txt -v h5diff_enum_invalid_values.h5 h5diff_enum_invalid_values.h5 dset1 dset2
-
-
# ##############################################################################
# # Dataset datatypes
# ##############################################################################
@@ -672,6 +694,13 @@ TOOLTEST h5diff_58.txt -v h5diff_dset1.h5 h5diff_dset2.h5 refreg
# ( HDDFV-7942 )
TOOLTEST h5diff_59.txt -v h5diff_dtypes.h5 h5diff_dtypes.h5 dset11a dset11b
+# Strings
+# ( HDFFV-10128 )
+TOOLTEST h5diff_60.txt -v diff_strings1.h5 diff_strings2.h5 string1 string1
+TOOLTEST h5diff_61.txt -v diff_strings1.h5 diff_strings2.h5 string2 string2
+TOOLTEST h5diff_62.txt -v diff_strings1.h5 diff_strings2.h5 string3 string3
+TOOLTEST h5diff_63.txt -v diff_strings1.h5 diff_strings2.h5 string4 string4
+
# ##############################################################################
# # Error messages
# ##############################################################################
@@ -743,8 +772,6 @@ TOOLTEST h5diff_618.txt -p 2 h5diff_basic1.h5 h5diff_basic2.h5 g1/dset3 g1/dset4
# 6.19: number smaller than smallest difference
TOOLTEST h5diff_619.txt -p 0.005 h5diff_basic1.h5 h5diff_basic2.h5 g1/dset3 g1/dset4
-
-
# ##############################################################################
# # -n
# ##############################################################################
@@ -797,7 +824,6 @@ TOOLTEST h5diff_tmp2.txt tmpSingleSiteBethe.output.h5 tmpSingleSiteBethe.referen
# ##################################################
# attrs with verbose option level
# ##################################################
-
TOOLTEST h5diff_700.txt -v1 h5diff_attr1.h5 h5diff_attr2.h5
TOOLTEST h5diff_701.txt -v2 h5diff_attr1.h5 h5diff_attr2.h5
TOOLTEST h5diff_702.txt --verbose=1 h5diff_attr1.h5 h5diff_attr2.h5
@@ -848,7 +874,6 @@ TOOLTEST h5diff_103.txt -v --use-system-epsilon h5diff_basic1.h5 h5diff_basic1.h
# with --use-system-epsilon for float value
TOOLTEST h5diff_104.txt -v --use-system-epsilon h5diff_basic1.h5 h5diff_basic1.h5 g1/fp1 g1/fp2
-
# not comparable -c flag
TOOLTEST h5diff_200.txt h5diff_basic2.h5 h5diff_basic2.h5 g2/dset1 g2/dset2
@@ -976,7 +1001,6 @@ TOOLTEST h5diff_424.txt --follow-symlinks -v h5diff_ext2softlink_trg.h5 h5diff_e
# extlink_to_softlink_to_dset1 vs extlink_to_softlink_to_dset2"
TOOLTEST h5diff_425.txt --follow-symlinks -v h5diff_ext2softlink_src.h5 h5diff_ext2softlink_src.h5 /ext_link_to_slink1 /ext_link_to_slink2
-
# ##############################################################################
# # Dangling links compare (--follow-symlinks and --no-dangling-links)
# ##############################################################################
@@ -1127,6 +1151,7 @@ TOOLTEST h5diff_540.txt -v compounds_array_vlen1.h5 compounds_array_vlen2.h5
# ##############################################################################
# # Test mutually exclusive options
# ##############################################################################
+#
# Test with -d , -p and --use-system-epsilon.
TOOLTEST h5diff_640.txt -v -d 5 -p 0.05 --use-system-epsilon h5diff_basic1.h5 h5diff_basic2.h5 /g1/dset3 /g1/dset4
TOOLTEST h5diff_641.txt -v -d 5 -p 0.05 h5diff_basic1.h5 h5diff_basic2.h5 /g1/dset3 /g1/dset4
diff --git a/tools/test/h5diff/testph5diff.sh.in b/tools/test/h5diff/testph5diff.sh.in
index ca212a1..b726a80 100644
--- a/tools/test/h5diff/testph5diff.sh.in
+++ b/tools/test/h5diff/testph5diff.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
srcdir=@srcdir@
diff --git a/tools/test/h5dump/CMakeLists.txt b/tools/test/h5dump/CMakeLists.txt
index 1398aca..89c7534 100644
--- a/tools/test/h5dump/CMakeLists.txt
+++ b/tools/test/h5dump/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5DUMP)
#-----------------------------------------------------------------------------
@@ -6,6 +6,34 @@ PROJECT (HDF5_TOOLS_TEST_H5DUMP)
#-----------------------------------------------------------------------------
INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
+ #-----------------------------------------------------------------------------
+ # If plugin library tests can be tested
+ #-----------------------------------------------------------------------------
+ set (HDF5_TOOL_PLUGIN_LIB_CORENAME "dynlibdump")
+ set (HDF5_TOOL_PLUGIN_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_TOOL_PLUGIN_LIB_CORENAME}")
+ set (HDF5_TOOL_PLUGIN_LIB_TARGET ${HDF5_TOOL_PLUGIN_LIB_CORENAME})
+ add_definitions (${HDF_EXTRA_C_FLAGS})
+ INCLUDE_DIRECTORIES (${HDF5_SRC_DIR})
+
+ add_library (${HDF5_TOOL_PLUGIN_LIB_TARGET} SHARED dynlib_dump.c)
+ TARGET_C_PROPERTIES (${HDF5_TOOL_PLUGIN_LIB_TARGET} SHARED " " " ")
+ target_link_libraries (${HDF5_TOOL_PLUGIN_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+ H5_SET_LIB_OPTIONS (${HDF5_TOOL_PLUGIN_LIB_TARGET} ${HDF5_TOOL_PLUGIN_LIB_NAME} SHARED ${HDF5_PACKAGE_SOVERSION})
+
+ # make plugins dir
+ file (MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/plugins")
+ #-----------------------------------------------------------------------------
+ # Copy plugin library to a plugins folder
+ #-----------------------------------------------------------------------------
+ add_custom_command (
+ TARGET ${HDF5_TOOL_PLUGIN_LIB_TARGET}
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E copy_if_different
+ "$<TARGET_FILE:${HDF5_TOOL_PLUGIN_LIB_TARGET}>"
+ "${CMAKE_BINARY_DIR}/plugins/$<TARGET_FILE_NAME:${HDF5_TOOL_PLUGIN_LIB_TARGET}>"
+ )
+
# --------------------------------------------------------------------
# Add the h5dump test executable
# --------------------------------------------------------------------
@@ -17,7 +45,7 @@ INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
set_target_properties (h5dumpgentest PROPERTIES FOLDER generator/tools)
#add_test (NAME h5dumpgentest COMMAND $<TARGET_FILE:h5dumpgentest>)
- endif (HDF5_BUILD_GENERATORS)
+ endif ()
include (CMakeTests.cmake)
diff --git a/tools/test/h5dump/CMakeTests.cmake b/tools/test/h5dump/CMakeTests.cmake
index ad3c5ba..ae4649a 100644
--- a/tools/test/h5dump/CMakeTests.cmake
+++ b/tools/test/h5dump/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -51,7 +62,8 @@
${HDF5_TOOLS_DIR}/testfiles/tbin4.ddl
${HDF5_TOOLS_DIR}/testfiles/tbinregR.ddl
${HDF5_TOOLS_DIR}/testfiles/tbigdims.ddl
- ${HDF5_TOOLS_DIR}/testfiles/tbitnopaque.ddl
+ ${HDF5_TOOLS_DIR}/testfiles/tbitnopaque_be.ddl
+ ${HDF5_TOOLS_DIR}/testfiles/tbitnopaque_le.ddl
${HDF5_TOOLS_DIR}/testfiles/tboot1.ddl
${HDF5_TOOLS_DIR}/testfiles/tboot2.ddl
${HDF5_TOOLS_DIR}/testfiles/tboot2A.ddl
@@ -166,6 +178,7 @@
${HDF5_TOOLS_DIR}/testfiles/tstring2.ddl
${HDF5_TOOLS_DIR}/testfiles/tstringe.ddl
${HDF5_TOOLS_DIR}/testfiles/tszip.ddl
+ ${HDF5_TOOLS_DIR}/testfiles/tudfilter.ddl
${HDF5_TOOLS_DIR}/testfiles/tudlink-1.ddl
${HDF5_TOOLS_DIR}/testfiles/tudlink-2.ddl
${HDF5_TOOLS_DIR}/testfiles/tuserfilter.ddl
@@ -291,6 +304,7 @@
${HDF5_TOOLS_DIR}/testfiles/tstr.h5
${HDF5_TOOLS_DIR}/testfiles/tstr2.h5
${HDF5_TOOLS_DIR}/testfiles/tstr3.h5
+ ${HDF5_TOOLS_DIR}/testfiles/tudfilter.h5
${HDF5_TOOLS_DIR}/testfiles/tudlink.h5
${HDF5_TOOLS_DIR}/testfiles/tvldtypes1.h5
${HDF5_TOOLS_DIR}/testfiles/tvldtypes2.h5
@@ -381,16 +395,16 @@
##############################################################################
##############################################################################
- MACRO (ADD_HELP_TEST testname resultcode)
+ macro (ADD_HELP_TEST testname resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-${testname} COMMAND $<TARGET_FILE:h5dump> ${ARGN})
set_tests_properties (H5DUMP-${testname} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5DUMP-${testname} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5DUMP-${testname}")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5DUMP-h5dump-${testname}
COMMAND "${CMAKE_COMMAND}"
@@ -402,34 +416,34 @@
-D "TEST_REFERENCE=h5dump-${testname}.txt"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_HELP_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_SKIP_H5_TEST skipresultfile skipresultcode testtype)
+ macro (ADD_SKIP_H5_TEST skipresultfile skipresultcode testtype)
if (${testtype} STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5DUMP-${skipresultfile}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${skipresultfile} ${ARGN}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else (${testtype} STREQUAL "SKIP")
+ endif ()
+ else ()
ADD_H5_TEST (${skipresultfile} ${skipresultcode} ${ARGN})
- endif (${testtype} STREQUAL "SKIP")
- ENDMACRO (ADD_SKIP_H5_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST resultfile resultcode)
+ macro (ADD_H5_TEST resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-${resultfile} COMMAND $<TARGET_FILE:h5dump> ${ARGN})
set_tests_properties (H5DUMP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
if (NOT ${resultcode} STREQUAL "0")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES WILL_FAIL "true")
- endif (NOT ${resultcode} STREQUAL "0")
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
add_test (
NAME H5DUMP-${resultfile}-clear-objects
COMMAND ${CMAKE_COMMAND}
@@ -448,21 +462,21 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5DUMP-${resultfile} PROPERTIES DEPENDS "H5DUMP-${resultfile}-clear-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST file)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST_N resultfile resultcode)
+ macro (ADD_H5_TEST_N resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-N-${resultfile} COMMAND $<TARGET_FILE:h5dump> ${ARGN})
set_tests_properties (H5DUMP-N-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
if (NOT ${resultcode} STREQUAL "0")
set_tests_properties (H5DUMP-N-${resultfile} PROPERTIES WILL_FAIL "true")
- endif (NOT ${resultcode} STREQUAL "0")
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5DUMP-N-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
add_test (
NAME H5DUMP-N-${resultfile}-clear-objects
COMMAND ${CMAKE_COMMAND}
@@ -481,21 +495,21 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5DUMP-N-${resultfile} PROPERTIES DEPENDS "H5DUMP-N-${resultfile}-clear-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST_N file)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST_EXPORT resultfile targetfile resultcode)
+ macro (ADD_H5_TEST_EXPORT resultfile targetfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-${resultfile} COMMAND $<TARGET_FILE:h5dump> ${ARGN} ${resultfile}.txt ${targetfile})
set_tests_properties (H5DUMP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
if (NOT ${resultcode} STREQUAL "0")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES WILL_FAIL "true")
- endif (NOT ${resultcode} STREQUAL "0")
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
add_test (
NAME H5DUMP-${resultfile}-clear-objects
COMMAND ${CMAKE_COMMAND}
@@ -521,21 +535,21 @@
)
set_tests_properties (H5DUMP-${resultfile}-output-cmp PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
set_tests_properties (H5DUMP-${resultfile}-output-cmp PROPERTIES DEPENDS H5DUMP-${resultfile})
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST_EXPORT file)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST_EXPORT_DDL resultfile targetfile resultcode ddlfile)
+ macro (ADD_H5_TEST_EXPORT_DDL resultfile targetfile resultcode ddlfile)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-${resultfile} COMMAND $<TARGET_FILE:h5dump> --ddl=${ddlfile}.txt ${ARGN} ${resultfile}.txt ${targetfile})
set_tests_properties (H5DUMP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
if (NOT ${resultcode} STREQUAL "0")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES WILL_FAIL "true")
- endif (NOT ${resultcode} STREQUAL "0")
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
add_test (
NAME H5DUMP-${resultfile}-clear-objects
COMMAND ${CMAKE_COMMAND}
@@ -568,10 +582,10 @@
)
set_tests_properties (H5DUMP-${resultfile}-output-cmp-ddl PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
set_tests_properties (H5DUMP-${resultfile}-output-cmp-ddl PROPERTIES DEPENDS H5DUMP-${resultfile}-output-cmp)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST_EXPORT_DDL file)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_EXPORT_TEST resultfile targetfile resultcode)
+ macro (ADD_H5_EXPORT_TEST resultfile targetfile resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5DUMP-output-${resultfile}-clear-objects
@@ -592,10 +606,10 @@
)
set_tests_properties (H5DUMP-output-cmp-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
set_tests_properties (H5DUMP-output-cmp-${resultfile} PROPERTIES DEPENDS H5DUMP-output-${resultfile})
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_EXPORT_TEST file)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_MASK_TEST resultfile resultcode)
+ macro (ADD_H5_MASK_TEST resultfile resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5DUMP-${resultfile}
@@ -609,10 +623,10 @@
-D "TEST_MASK_ERROR=true"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_MASK_TEST file)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5ERR_MASK_TEST resultfile resultcode)
+ macro (ADD_H5ERR_MASK_TEST resultfile resultcode)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5DUMP-${resultfile}
@@ -627,10 +641,10 @@
-D "TEST_MASK_ERROR=true"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5ERR_MASK_TEST file)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5ERR_MASK_ENV_TEST resultfile resultcode envvar envval)
+ macro (ADD_H5ERR_MASK_ENV_TEST resultfile resultcode envvar envval)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5DUMP-${resultfile}
@@ -647,10 +661,10 @@
-D "TEST_ENV_VALUE:STRING=${envval}"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5ERR_MASK_ENV_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST_IMPORT conffile resultfile testfile resultcode)
+ macro (ADD_H5_TEST_IMPORT conffile resultfile testfile resultcode)
# If using memchecker add tests without using scripts
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -677,12 +691,39 @@
add_test (NAME H5DUMP-IMPORT-h5diff-${resultfile} COMMAND h5diff ${testfile} ${resultfile}.h5 /integer /integer)
set_tests_properties (H5DUMP-IMPORT-h5diff-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
set_tests_properties (H5DUMP-IMPORT-h5diff-${resultfile} PROPERTIES DEPENDS H5DUMP-IMPORT-h5import-${resultfile})
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST_IMPORT file)
+ endif ()
+ endmacro ()
+
+ macro (ADD_H5_UD_TEST testname resultcode resultfile)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ # Remove any output file left over from previous test run
+ add_test (
+ NAME H5DUMP_UD-${testname}-clearall-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ testfiles/std/${resultfile}.out
+ testfiles/std/${resultfile}.out.err
+ )
+ add_test (
+ NAME H5DUMP_UD-${testname}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:h5dump>"
+ -D "TEST_ARGS:STRING=${ARGN}"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles/std"
+ -D "TEST_OUTPUT=${resultfile}.out"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_REFERENCE=${resultfile}.ddl"
+ -D "TEST_ENV_VAR=HDF5_PLUGIN_PATH"
+ -D "TEST_ENV_VALUE=${CMAKE_BINARY_DIR}/plugins"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ set_tests_properties (H5DUMP_UD-${testname} PROPERTIES DEPENDS H5DUMP_UD-${testname}-clearall-objects)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
-### T H E T E S T S HDF5_ENABLE_USING_MEMCHECKER ###
+### T H E T E S T S ###
##############################################################################
##############################################################################
@@ -770,8 +811,10 @@
tbinregR.out.err
tbigdims.out
tbigdims.out.err
- tbitnopaque.out
- tbitnopaque.out.err
+ tbitnopaque_be.out
+ tbitnopaque_be.out.err
+ tbitnopaque_le.out
+ tbitnopaque_le.out.err
tboot1.out
tboot1.out.err
tboot2.out
@@ -1038,9 +1081,9 @@
set_tests_properties (H5DUMP-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/std")
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5DUMP-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5DUMP-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
ADD_HELP_TEST(help 0 -h)
@@ -1132,7 +1175,11 @@
ADD_H5_TEST (tcomp-4 0 --enable-error-stack tcompound_complex.h5)
ADD_H5_TEST (tcompound_complex2 0 --enable-error-stack tcompound_complex2.h5)
# tests for bitfields and opaque data types
- ADD_H5_TEST (tbitnopaque 0 --enable-error-stack tbitnopaque.h5)
+ if (H5_WORDS_BIGENDIAN)
+ ADD_H5_TEST (tbitnopaque_be 0 --enable-error-stack tbitnopaque.h5)
+ else ()
+ ADD_H5_TEST (tbitnopaque_le 0 --enable-error-stack tbitnopaque.h5)
+ endif ()
#test for the nested compound type
ADD_H5_TEST (tnestcomp-1 0 --enable-error-stack tnestedcomp.h5)
@@ -1321,11 +1368,11 @@
# detect whether the encoder is present.
if (H5_HAVE_FILTER_DEFLATE)
set (USE_FILTER_DEFLATE "true")
- endif (H5_HAVE_FILTER_DEFLATE)
+ endif ()
if (H5_HAVE_FILTER_SZIP)
set (USE_FILTER_SZIP "true")
- endif (H5_HAVE_FILTER_SZIP)
+ endif ()
if (USE_FILTER_DEFLATE)
# data read internal filters
@@ -1333,8 +1380,8 @@
if (HDF5_ENABLE_SZIP_SUPPORT)
# data read all filters
ADD_H5_TEST (treadfilter 0 --enable-error-stack -d all -d szip tfilters.h5)
- endif (HDF5_ENABLE_SZIP_SUPPORT)
- endif (USE_FILTER_DEFLATE)
+ endif ()
+ endif ()
# test for displaying objects with very long names
ADD_H5_TEST (tlonglinks 0 --enable-error-stack tlonglinks.h5)
@@ -1369,14 +1416,14 @@
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
ADD_H5_TEST (tbin2 0 --enable-error-stack -b BE -d float -o tbin2.bin tbinary.h5)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# the NATIVE test can be validated with h5import/h5diff
ADD_H5_TEST_IMPORT (tbin3 out3D tbinary.h5 0 --enable-error-stack -d integer -b NATIVE)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
ADD_H5_TEST (tbin4 0 --enable-error-stack -d double -b FILE -o tbin4.bin tbinary.h5)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# test for dataset region references
ADD_H5_TEST (tdatareg 0 --enable-error-stack tdatareg.h5)
@@ -1421,3 +1468,8 @@
# test for non-existing file
ADD_H5_TEST (non_existing 1 --enable-error-stack tgroup.h5 non_existing.h5)
+
+##############################################################################
+### P L U G I N T E S T S
+##############################################################################
+ADD_H5_UD_TEST (h5dump_plugin_test 0 tudfilter --enable-error-stack tudfilter.h5)
diff --git a/tools/test/h5dump/CMakeTestsPBITS.cmake b/tools/test/h5dump/CMakeTestsPBITS.cmake
index c2ec9cc..986f1a1 100644
--- a/tools/test/h5dump/CMakeTestsPBITS.cmake
+++ b/tools/test/h5dump/CMakeTestsPBITS.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -88,18 +99,18 @@
foreach (pbits_h5_file ${HDF5_REFERENCE_TEST_PBITS})
get_filename_component(fname "${pbits_h5_file}" NAME)
HDFTEST_COPY_FILE("${pbits_h5_file}" "${PROJECT_BINARY_DIR}/testfiles/pbits/${fname}" "h5dump_pbits_files")
- endforeach (pbits_h5_file ${HDF5_REFERENCE_TEST_PBITS})
+ endforeach ()
foreach (ddl_pbits ${HDF5_REFERENCE_PBITS})
get_filename_component(fname "${ddl_pbits}" NAME)
HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/pbits/${ddl_pbits}" "${PROJECT_BINARY_DIR}/testfiles/pbits/${fname}" "h5dump_pbits_files")
- endforeach (ddl_pbits ${HDF5_REFERENCE_PBITS})
+ endforeach ()
foreach (ddl_pbits ${HDF5_ERROR_REFERENCE_PBITS})
get_filename_component(fname "${ddl_pbits}" NAME)
HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/errfiles/${ddl_pbits}" "${PROJECT_BINARY_DIR}/testfiles/pbits/${fname}" "h5dump_pbits_files")
- endforeach (ddl_pbits ${HDF5_ERROR_REFERENCE_PBITS})
+ endforeach ()
add_custom_target(h5dump_pbits_files ALL COMMENT "Copying files needed by h5dump_pbits tests" DEPENDS ${h5dump_pbits_files_list})
##############################################################################
@@ -108,7 +119,7 @@
##############################################################################
##############################################################################
- MACRO (ADD_H5_PBITS_TEST resultfile resultcode)
+ macro (ADD_H5_PBITS_TEST resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-${resultfile} COMMAND $<TARGET_FILE:h5dump> ${ARGN})
@@ -119,7 +130,7 @@
if (NOT "${last_pbits_test}" STREQUAL "")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES DEPENDS ${last_pbits_test})
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5DUMP-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -131,8 +142,8 @@
-D "TEST_REFERENCE=${resultfile}.ddl"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_PBITS_TEST file)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -256,9 +267,9 @@
set_tests_properties (H5DUMP_PACKED_BITS-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/pbits")
if (NOT "${last_pbits_test}" STREQUAL "")
set_tests_properties (H5DUMP_PACKED_BITS-clearall-objects PROPERTIES DEPENDS ${last_pbits_test})
- endif (NOT "${last_pbits_test}" STREQUAL "")
+ endif ()
set (last_pbits_test "H5DUMP_PACKED_BITS-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# test failure handling
# Missing file name
diff --git a/tools/test/h5dump/CMakeTestsVDS.cmake b/tools/test/h5dump/CMakeTestsVDS.cmake
index aa85350..310f9ce 100644
--- a/tools/test/h5dump/CMakeTestsVDS.cmake
+++ b/tools/test/h5dump/CMakeTestsVDS.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -67,18 +78,18 @@
foreach (vds_h5_file ${HDF5_REFERENCE_TEST_VDS})
get_filename_component(fname "${vds_h5_file}" NAME)
HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/vds/${vds_h5_file}" "${PROJECT_BINARY_DIR}/testfiles/vds/${fname}" "h5dump_vds_files")
- endforeach (vds_h5_file ${HDF5_REFERENCE_TEST_VDS})
+ endforeach ()
foreach (ddl_vds ${HDF5_REFERENCE_VDS})
get_filename_component(fname "${ddl_vds}" NAME)
HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/vds/${ddl_vds}" "${PROJECT_BINARY_DIR}/testfiles/vds/${fname}" "h5dump_vds_files")
- endforeach (ddl_vds ${HDF5_REFERENCE_VDS})
+ endforeach ()
foreach (ddl_vds ${HDF5_ERROR_REFERENCE_VDS})
get_filename_component(fname "${ddl_vds}" NAME)
HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/errfiles/${ddl_vds}" "${PROJECT_BINARY_DIR}/testfiles/vds/${fname}" "h5dump_vds_files")
- endforeach (ddl_vds ${HDF5_ERROR_REFERENCE_VDS})
+ endforeach ()
add_custom_target(h5dump_vds_files ALL COMMENT "Copying files needed by h5dump_vds tests" DEPENDS ${h5dump_vds_files_list})
##############################################################################
@@ -87,7 +98,7 @@
##############################################################################
##############################################################################
- MACRO (ADD_H5_VDS_TEST resultfile resultcode)
+ macro (ADD_H5_VDS_TEST resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-${resultfile} COMMAND $<TARGET_FILE:h5dump> ${ARGN})
@@ -98,7 +109,7 @@
if (NOT "${last_vds_test}" STREQUAL "")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES DEPENDS ${last_VDS_test})
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5DUMP-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -110,10 +121,10 @@
-D "TEST_REFERENCE=${resultfile}.ddl"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_VDS_TEST file)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_VDS_LAYOUT resultfile resultcode)
+ macro (ADD_H5_VDS_LAYOUT resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-${resultfile} COMMAND $<TARGET_FILE:h5dump> -p ${ARGN})
@@ -124,7 +135,7 @@
if (NOT "${last_vds_test}" STREQUAL "")
set_tests_properties (H5DUMP-${resultfile} PROPERTIES DEPENDS ${last_VDS_test})
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5DUMP-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -137,8 +148,8 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5DUMP-${resultfile} PROPERTIES DEPENDS "H5DUMP-${resultfile}-clear-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_VDS_LAYOUT file)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -190,9 +201,9 @@
set_tests_properties (H5DUMP_VDS-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/vds")
if (NOT "${last_vds_test}" STREQUAL "")
set_tests_properties (H5DUMP_VDS-clearall-objects PROPERTIES DEPENDS ${last_vds_test})
- endif (NOT "${last_vds_test}" STREQUAL "")
+ endif ()
set (last_VDS_test "H5DUMP_VDS-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# See which filters are usable (and skip tests for filters we
# don't have). Do this by searching H5pubconf.h to see which
@@ -201,11 +212,11 @@
# detect whether the encoder is present.
if (H5_HAVE_FILTER_DEFLATE)
set (USE_FILTER_DEFLATE "true")
- endif (H5_HAVE_FILTER_DEFLATE)
+ endif ()
if (H5_HAVE_FILTER_SZIP)
set (USE_FILTER_SZIP "true")
- endif (H5_HAVE_FILTER_SZIP)
+ endif ()
# Data read
if (USE_FILTER_DEFLATE)
@@ -218,7 +229,7 @@
ADD_H5_VDS_TEST (vds-first 0 --vds-view-first-missing --enable-error-stack vds-percival-unlim-maxmin.h5)
ADD_H5_VDS_TEST (vds-gap1 0 -d /VDS-Eiger --vds-gap-size=1 --enable-error-stack vds-eiger.h5)
ADD_H5_VDS_TEST (vds-gap2 0 --vds-gap-size=2 --enable-error-stack vds-eiger.h5)
- endif (USE_FILTER_DEFLATE)
+ endif ()
# Layout read
if (USE_FILTER_DEFLATE)
@@ -230,4 +241,4 @@
ADD_H5_VDS_LAYOUT (tvds_layout-5 0 --enable-error-stack 5_vds.h5)
ADD_H5_VDS_LAYOUT (vds_layout-eiger 0 --enable-error-stack vds-eiger.h5)
ADD_H5_VDS_LAYOUT (vds_layout-maxmin 0 --enable-error-stack vds-percival-unlim-maxmin.h5)
- endif (USE_FILTER_DEFLATE)
+ endif ()
diff --git a/tools/test/h5dump/CMakeTestsXML.cmake b/tools/test/h5dump/CMakeTestsXML.cmake
index 79d3ff8..92e08a0 100644
--- a/tools/test/h5dump/CMakeTestsXML.cmake
+++ b/tools/test/h5dump/CMakeTestsXML.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -70,7 +81,8 @@
${HDF5_TOOLS_DIR}/testfiles/tarray6.h5.xml
${HDF5_TOOLS_DIR}/testfiles/tarray7.h5.xml
${HDF5_TOOLS_DIR}/testfiles/tattr.h5.xml
- ${HDF5_TOOLS_DIR}/testfiles/tbitfields.h5.xml
+ ${HDF5_TOOLS_DIR}/testfiles/tbitfields_be.h5.xml
+ ${HDF5_TOOLS_DIR}/testfiles/tbitfields_le.h5.xml
${HDF5_TOOLS_DIR}/testfiles/tcompound_complex.h5.xml
${HDF5_TOOLS_DIR}/testfiles/tcompound.h5.xml
${HDF5_TOOLS_DIR}/testfiles/tcompound2.h5.xml
@@ -130,12 +142,12 @@
foreach (tst_xml_h5_file ${HDF5_XML_REFERENCE_TEST_FILES})
get_filename_component(fname "${tst_xml_h5_file}" NAME)
HDFTEST_COPY_FILE("${tst_xml_h5_file}" "${PROJECT_BINARY_DIR}/testfiles/xml/${fname}" "h5dump_xml_files")
- endforeach (tst_xml_h5_file ${HDF5_XML_REFERENCE_TEST_FILES})
+ endforeach ()
foreach (tst_xml_other_file ${HDF5_XML_REFERENCE_FILES})
get_filename_component(fname "${tst_xml_other_file}" NAME)
HDFTEST_COPY_FILE("${tst_xml_other_file}" "${PROJECT_BINARY_DIR}/testfiles/xml/${fname}" "h5dump_xml_files")
- endforeach (tst_xml_other_file ${HDF5_XML_REFERENCE_FILES})
+ endforeach ()
add_custom_target(h5dump_xml_files ALL COMMENT "Copying files needed by h5dump_xml tests" DEPENDS ${h5dump_xml_files_list})
##############################################################################
@@ -144,20 +156,20 @@
##############################################################################
##############################################################################
- MACRO (ADD_XML_SKIP_H5_TEST skipresultfile skipresultcode testtype)
+ macro (ADD_XML_SKIP_H5_TEST skipresultfile skipresultcode testtype)
if (${testtype} STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5DUMP-XML-${skipresultfile}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${skipresultfile}.xml --xml ${ARGN}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else (${testtype} STREQUAL "SKIP")
+ endif ()
+ else ()
ADD_XML_H5_TEST (${skipresultfile} ${skipresultcode} ${ARGN})
- endif (${testtype} STREQUAL "SKIP")
- ENDMACRO (ADD_XML_SKIP_H5_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_XML_H5_TEST resultfile resultcode)
+ macro (ADD_XML_H5_TEST resultfile resultcode)
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5DUMP-XML-${resultfile} COMMAND $<TARGET_FILE:h5dump> --xml ${ARGN})
set_tests_properties (H5DUMP-XML-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/xml")
@@ -167,7 +179,7 @@
if (NOT "${last_xml_test}" STREQUAL "")
set_tests_properties (H5DUMP-XML-${resultfile} PROPERTIES DEPENDS ${last_xml_test})
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5DUMP-XML-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -179,8 +191,8 @@
-D "TEST_REFERENCE=${resultfile}.xml"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_XML_H5_TEST file)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -210,8 +222,10 @@
tarray7.h5.out.err
tattr.h5.out
tattr.h5.out.err
- tbitfields.h5.out
- tbitfields.h5.out.err
+ tbitfields_be.h5.out
+ tbitfields_be.h5.out.err
+ tbitfields_le.h5.out
+ tbitfields_le.h5.out.err
tcompound.h5.out
tcompound.h5.out.err
tcompound2.h5.out
@@ -326,14 +340,18 @@
set_tests_properties (H5DUMP-XML-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/xml")
if (NOT "${last_xml_test}" STREQUAL "")
set_tests_properties (H5DUMP-XML-clearall-objects PROPERTIES DEPENDS ${last_xml_test})
- endif (NOT "${last_xml_test}" STREQUAL "")
+ endif ()
set (last_test "H5DUMP-XML-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
########## test XML
ADD_XML_H5_TEST (tall.h5 0 tall.h5)
ADD_XML_H5_TEST (tattr.h5 0 tattr.h5)
- ADD_XML_H5_TEST (tbitfields.h5 0 tbitfields.h5)
+ if (H5_WORDS_BIGENDIAN)
+ ADD_XML_H5_TEST (tbitfields_be.h5 0 tbitfields.h5)
+ else ()
+ ADD_XML_H5_TEST (tbitfields_le.h5 0 tbitfields.h5)
+ endif ()
ADD_XML_H5_TEST (tcompound.h5 0 tcompound.h5)
ADD_XML_H5_TEST (tcompound2.h5 0 tcompound2.h5)
ADD_XML_H5_TEST (tdatareg.h5 0 tdatareg.h5)
diff --git a/tools/test/h5dump/Makefile.am b/tools/test/h5dump/Makefile.am
index aed34ec..df97396 100644
--- a/tools/test/h5dump/Makefile.am
+++ b/tools/test/h5dump/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -30,13 +28,31 @@ TEST_SCRIPT=testh5dump.sh testh5dumppbits.sh testh5dumpvds.sh testh5dumpxml.sh
check_PROGRAMS=$(TEST_PROG) binread
check_SCRIPTS=$(TEST_SCRIPT)
SCRIPT_DEPEND=../../src/h5dump/h5dump$(EXEEXT)
+if HAVE_SHARED_CONDITIONAL
+if USE_PLUGINS_CONDITIONAL
+ TEST_SCRIPT += h5dump_plugin.sh
+endif
+endif
# All the programs depend on the hdf5 and h5tools libraries
LDADD=$(LIBH5TOOLS) $(LIBHDF5)
+if HAVE_SHARED_CONDITIONAL
+ # Build it as shared library if configure is enabled for shared library.
+ dyn_LTLIBRARIES=libdynlibdump.la
+ libdynlibdump_la_SOURCES=dynlib_dump.c
+ libdynlibdump_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+
+libdynlibdump.la: $(libdynlibdump_la_OBJECTS) $(libdynlibdump_la_DEPENDENCIES) $(EXTRA_libdynlibdump_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlibdump_la_LINK) $(am_libdynlibdump_la_rpath) $(libdynlibdump_la_OBJECTS) $(libdynlibdump_la_LIBADD)
+
+#install-exec-hook:
+# $(RM) $(DESTDIR)$(dyndir)/*dynlib*
+endif
+
# Temporary files. *.h5 are generated by h5dumpgentest. They should
# copied to the testfiles/ directory if update is required.
CHECK_CLEANFILES+=*.h5 *.bin
-DISTCLEANFILES=testh5dump.sh testh5dumppbits.sh testh5dumpxml.sh
+DISTCLEANFILES=testh5dump.sh testh5dumppbits.sh testh5dumpxml.sh h5dump_plugin.sh
include $(top_srcdir)/config/conclude.am
diff --git a/tools/test/h5dump/binread.c b/tools/test/h5dump/binread.c
index 74db92c..6165cd8 100644
--- a/tools/test/h5dump/binread.c
+++ b/tools/test/h5dump/binread.c
@@ -6,12 +6,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
diff --git a/tools/test/h5dump/dynlib_dump.c b/tools/test/h5dump/dynlib_dump.c
new file mode 100644
index 0000000..571452e
--- /dev/null
+++ b/tools/test/h5dump/dynlib_dump.c
@@ -0,0 +1,89 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/*
+ * Purpose: Tests the plugin module (H5PL)
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "H5PLextern.h"
+
+#define H5Z_FILTER_DYNLIBUD 300
+#define MULTIPLIER 3
+
+static size_t H5Z_filter_dynlibud(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+
+/* This message derives from H5Z */
+const H5Z_class2_t H5Z_DYNLIBUD[1] = {{
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ H5Z_FILTER_DYNLIBUD, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "dynlibud", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
+ (H5Z_func_t)H5Z_filter_dynlibud, /* The actual filter function */
+}};
+
+H5PL_type_t H5PLget_plugin_type(void) {return H5PL_TYPE_FILTER;}
+const void *H5PLget_plugin_info(void) {return H5Z_DYNLIBUD;}
+
+/*-------------------------------------------------------------------------
+ * Function: H5Z_filter_dynlibud
+ *
+ * Purpose: A dynlib2 filter method that multiplies the original value
+ * during write and divide the original value during read. It
+ * will be built as a shared library. plugin.c test will load
+ * and use this filter library.
+ *
+ * Return: Success: Data chunk size
+ *
+ * Failure: 0
+ *-------------------------------------------------------------------------
+ */
+static size_t
+H5Z_filter_dynlibud(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes,
+ size_t *buf_size, void **buf)
+{
+ char *int_ptr = (char *)*buf; /* Pointer to the data values */
+ size_t buf_left = *buf_size; /* Amount of data buffer left to process */
+
+ /* Check for the correct number of parameters */
+ if(cd_nelmts > 0)
+ return(0);
+
+ /* Assignment to eliminate unused parameter warning. */
+ cd_values = cd_values;
+
+ if(flags & H5Z_FLAG_REVERSE) { /*read*/
+ /* Subtract the original value with MULTIPLIER */
+ while(buf_left > 0) {
+ char temp = *int_ptr;
+ *int_ptr = temp - MULTIPLIER;
+ int_ptr++;
+ buf_left -= sizeof(*int_ptr);
+ } /* end while */
+ } /* end if */
+ else { /*write*/
+ /* Add the original value with MULTIPLIER */
+ while(buf_left > 0) {
+ char temp = *int_ptr;
+ *int_ptr = temp + MULTIPLIER;
+ int_ptr++;
+ buf_left -= sizeof(*int_ptr);
+ } /* end while */
+ } /* end else */
+
+ return nbytes;
+} /* end H5Z_filter_dynlibud() */
+
diff --git a/tools/test/h5dump/h5dump_plugin.sh.in b/tools/test/h5dump/h5dump_plugin.sh.in
new file mode 100644
index 0000000..6a00a16
--- /dev/null
+++ b/tools/test/h5dump/h5dump_plugin.sh.in
@@ -0,0 +1,241 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+srcdir=@srcdir@
+TOP_BUILDDIR=@top_builddir@
+
+# Determine backward compatibility options enabled
+DEPRECATED_SYMBOLS="@DEPRECATED_SYMBOLS@"
+
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+
+H5DUMP=../../src/h5dump/h5dump # The h5dump tool name
+H5DUMP_BIN=`pwd`/$H5DUMP # The path of the h5dump tool binary
+
+nerrors=0
+verbose=yes
+h5haveexitcode=yes # default is yes
+
+TEST_NAME=ud_plugin
+FROM_DIR=`pwd`/.libs
+PLUGIN_LIB="$FROM_DIR/libdynlibdump.*"
+PLUGIN_LIBDIR=testdir3
+RM='rm -rf'
+
+RM='rm -rf'
+GREP='grep'
+CMP='cmp'
+DIFF='diff -c'
+CP='cp'
+DIRNAME='dirname'
+LS='ls'
+AWK='awk'
+
+# source dirs
+SRC_TOOLS="$srcdir/../.."
+
+# testfiles source dirs for tools
+SRC_TOOLS_TESTFILES="$SRC_TOOLS/testfiles"
+SRC_H5DUMP_TESTFILES="$SRC_TOOLS_TESTFILES"
+
+TESTDIR=./testplug
+test -d $TESTDIR || mkdir $TESTDIR
+
+######################################################################
+# test files
+# --------------------------------------------------------------------
+# All the test files copy from source directory to test directory
+# NOTE: Keep this framework to add/remove test files.
+# Any test files from other tools can be used in this framework.
+# This list are also used for checking exist.
+# Comment '#' without space can be used.
+# --------------------------------------------------------------------
+LIST_HDF5_TEST_FILES="
+$SRC_H5DUMP_TESTFILES/tudfilter.h5
+$SRC_H5DUMP_TESTFILES/tudfilter.ddl
+"
+
+# RUNSERIAL is used. Check if it can return exit code from executable correctly.
+if [ -n "$RUNSERIAL_NOEXITCODE" ]; then
+ echo "***Warning*** Serial Exit Code is not passed back to shell correctly."
+ echo "***Warning*** Exit code checking is skipped."
+ h5haveexitcode=no
+fi
+
+# Main Body
+# Create test directories if not exists yet.
+test -d $PLUGIN_LIBDIR || mkdir -p $PLUGIN_LIBDIR
+if [ $? != 0 ]; then
+ echo "Failed to create test directory($PLUGIN_LIBDIR)"
+ exit $EXIT_FAILURE
+fi
+
+# copy plugin library for test
+$CP $PLUGIN_LIB $PLUGIN_LIBDIR
+if [ $? != 0 ]; then
+ echo "Failed to copy plugin library ($PLUGIN_LIB) for test."
+ exit $EXIT_FAILURE
+fi
+
+# setup plugin path
+ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}"
+
+#
+# copy test files and expected output files from source dirs to test dir
+#
+COPY_TESTFILES="$LIST_HDF5_TEST_FILES"
+
+COPY_TESTFILES_TO_TESTDIR()
+{
+ # copy test files. Used -f to make sure get a new copy
+ for tstfile in $COPY_TESTFILES
+ do
+ # ignore '#' comment
+ echo $tstfile | tr -d ' ' | grep '^#' > /dev/null
+ RET=$?
+ if [ $RET -eq 1 ]; then
+ # skip cp if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=`$DIRNAME $tstfile`
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $CP -f $tstfile $TESTDIR
+ if [ $? -ne 0 ]; then
+ echo "Error: FAILED to copy $tstfile ."
+
+ # Comment out this to CREATE expected file
+ exit $EXIT_FAILURE
+ fi
+ fi
+ fi
+ done
+}
+
+CLEAN_TESTFILES_AND_TESTDIR()
+{
+ # skip rm if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=$SRC_H5DUMP_TESTFILES
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $RM $TESTDIR
+ fi
+}
+
+# Print a $* message left justified in a field of 70 characters
+#
+MESSAGE() {
+ SPACES=" "
+ echo "$* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Verifying".
+#
+VERIFY() {
+ MESSAGE "Verifying $*"
+}
+
+# Source in the output filter function definitions.
+. $srcdir/../../../bin/output_filter.sh
+
+# Run a test and print PASS or *FAIL*. If a test fails then increment
+# the `nerrors' global variable and (if $verbose is set) display the
+# difference between the actual output and the expected output. The
+# expected output is given as the first argument to this function and
+# the actual output file is calculated by replacing the `.ddl' with
+# `.out'. The actual output is not removed if $HDF5_NOCLEANUP has a
+# non-zero value.
+# If $1 == ignorecase then do caseless CMP and DIFF.
+# ADD_H5_TEST
+TOOLTEST() {
+ expect="$TESTDIR/$1"
+ actual="$TESTDIR/`basename $1 .ddl`.out"
+ actual_err="$TESTDIR/`basename $1 .ddl`.err"
+ actual_sav=${actual}-sav
+ actual_err_sav=${actual_err}-sav
+ shift
+
+ # Run test.
+ TESTING $H5DUMP $@
+ (
+ cd $TESTDIR
+ $ENVCMD $RUNSERIAL $H5DUMP_BIN "$@"
+ ) >$actual 2>$actual_err
+
+ # save actual and actual_err in case they are needed later.
+ cp $actual $actual_sav
+ STDOUT_FILTER $actual
+ cp $actual_err $actual_err_sav
+ STDERR_FILTER $actual_err
+ cat $actual_err >> $actual
+
+ if [ ! -f $expect ]; then
+ # Create the expect file if it doesn't yet exist.
+ echo " CREATED"
+ cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
+ elif $CMP $expect $actual > /dev/null 2>&1 ; then
+ echo " PASSED"
+ else
+ echo "*FAILED*"
+ echo " Expected result (*.ddl) differs from actual result (*.out)"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $caseless $expect $actual |sed 's/^/ /'
+ fi
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $actual $actual_err $actual_sav $actual_err_sav $actual_ext
+ fi
+
+}
+
+##############################################################################
+### T H E T E S T S
+##############################################################################
+# prepare for test
+COPY_TESTFILES_TO_TESTDIR
+
+# Run the test
+TOOLTEST tudfilter.ddl --enable-error-stack tudfilter.h5
+
+# print results
+if test $nerrors -ne 0 ; then
+ echo "$nerrors errors encountered"
+ exit_code=$EXIT_FAILURE
+else
+ echo "All Plugin API tests passed."
+ exit_code=$EXIT_SUCCESS
+fi
+
+# Clean up temporary files/directories
+CLEAN_TESTFILES_AND_TESTDIR
+
+# Clean up temporary files/directories and leave
+$RM $PLUGIN_LIBDIR
+
+exit $exit_code
diff --git a/tools/test/h5dump/h5dumpgentest.c b/tools/test/h5dump/h5dumpgentest.c
index 335eafa..169eddb 100644
--- a/tools/test/h5dump/h5dumpgentest.c
+++ b/tools/test/h5dump/h5dumpgentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -114,6 +112,7 @@
#define FILE81 "tints4dims.h5"
#define FILE82 "tcompound_complex2.h5"
#define FILE83 "tvlenstr_array.h5"
+#define FILE84 "tudfilter.h5"
/*-------------------------------------------------------------------------
* prototypes
@@ -153,6 +152,23 @@ const H5Z_class2_t H5Z_MYFILTER[1] = {{
myfilter, /* The actual filter function */
}};
+#define H5Z_FILTER_DYNLIBUD 300
+#define MULTIPLIER 3
+
+static size_t H5Z_filter_dynlibud(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+
+/* This message derives from H5Z */
+const H5Z_class2_t H5Z_DYNLIBUD[1] = {{
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ H5Z_FILTER_DYNLIBUD, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "dynlibud", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
+ (H5Z_func_t)H5Z_filter_dynlibud, /* The actual filter function */
+}};
+
/* A UD link traversal function. Shouldn't actually be called. */
static hid_t UD_traverse(H5_ATTR_UNUSED const char * link_name, H5_ATTR_UNUSED hid_t cur_group,
@@ -271,8 +287,9 @@ typedef struct s1_t {
#define F64_DIM1 (F64_ARRAY_BUF_LEN / sizeof(int) + 1)
/* File 65 macros */
-#define STRATEGY H5F_FILE_SPACE_AGGR_VFD /* File space handling strategy */
-#define THRESHOLD10 10 /* Free space section threshold */
+#define STRATEGY H5F_FSPACE_STRATEGY_NONE /* File space handling strategy */
+#define THRESHOLD10 10 /* Free-space section threshold */
+#define FSPACE_PAGE_SIZE 8192 /* File space page size */
/* "FILE66" macros and for FILE69 */
#define F66_XDIM 8
@@ -379,9 +396,9 @@ typedef struct s1_t {
/* Dataset dimensions */
#define F82_DIM32 32
#define F82_RANK 1
-//#define F82_RANK2 2
-//#define F82_RANK3 3
-//#define F82_RANK4 4
+/* #define F82_RANK2 2 */
+/* #define F82_RANK3 3 */
+/* #define F82_RANK4 4 */
/* "File 83" macros */
/* Name of dataset to create in datafile */
@@ -7024,8 +7041,9 @@ gent_extlinks(void)
/*-------------------------------------------------------------------------
* Function: gent_fs_strategy_threshold
*
- * Purpose: Generate a file with non-default file space strategy and
- * non-default free-space section threshold.
+ * Purpose: Generate a file with non-default file space strategy,
+ * non-default free-space section threshold,
+ * non-default file space page size.
*-------------------------------------------------------------------------
*/
static void
@@ -7038,7 +7056,8 @@ gent_fs_strategy_threshold(void)
fcpl = H5Pcreate(H5P_FILE_CREATE);
/* Set file space information */
- H5Pset_file_space(fcpl, STRATEGY, (hsize_t)THRESHOLD10);
+ H5Pset_file_space_strategy(fcpl, STRATEGY, TRUE, (hsize_t)THRESHOLD10);
+ H5Pset_file_space_page_size(fcpl, (hsize_t)FSPACE_PAGE_SIZE);
/* Create the file with the specified strategy and threshold */
fid = H5Fcreate(FILE65, H5F_ACC_TRUNC, fcpl, H5P_DEFAULT);
@@ -9698,18 +9717,19 @@ static void gent_bitnopaquefields(void)
/* Compound datatype */
typedef struct s_t
{
- unsigned char a;
- unsigned int b;
- unsigned long c;
- unsigned long long d;
+ uint8_t a;
+ uint16_t b;
+ uint32_t c;
+ uint64_t d;
} s_t;
+
hid_t file, grp=-1, type=-1, space=-1, dset=-1;
size_t i;
hsize_t nelmts = F80_DIM32;
- unsigned char buf[F80_DIM32]; /* bitfield, opaque */
- unsigned int buf2[F80_DIM32]; /* bitfield, opaque */
- unsigned long buf3[F80_DIM32]; /* bitfield, opaque */
- unsigned long long buf4[F80_DIM32]; /* bitfield, opaque */
+ uint8_t buf[F80_DIM32]; /* bitfield, opaque */
+ uint16_t buf2[F80_DIM32]; /* bitfield, opaque */
+ uint32_t buf3[F80_DIM32]; /* bitfield, opaque */
+ uint64_t buf4[F80_DIM32]; /* bitfield, opaque */
s_t buf5[F80_DIM32]; /* compound */
file = H5Fcreate(FILE80, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
@@ -9720,7 +9740,7 @@ static void gent_bitnopaquefields(void)
if ((space = H5Screate_simple(1, &nelmts, NULL)) >= 0) {
if ((dset = H5Dcreate2(grp, "bitfield_1", type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
for (i = 0; i < nelmts; i++) {
- buf[i] = (unsigned char)0xff ^ (unsigned char)i;
+ buf[i] = (uint8_t)0xff ^ (uint8_t)i;
}
H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
H5Dclose(dset);
@@ -9735,7 +9755,7 @@ static void gent_bitnopaquefields(void)
if ((space = H5Screate_simple(1, &nelmts, NULL)) >= 0) {
if ((dset = H5Dcreate2(grp, "bitfield_2", type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
for (i = 0; i < nelmts; i++) {
- buf2[i] = (unsigned int)0xffff ^ (unsigned int)(i * 16);
+ buf2[i] = (uint16_t)0xffff ^ (uint16_t)(i * 16);
}
H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2);
H5Dclose(dset);
@@ -9750,7 +9770,7 @@ static void gent_bitnopaquefields(void)
if ((space = H5Screate_simple(1, &nelmts, NULL)) >= 0) {
if ((dset = H5Dcreate2(grp, "bitfield_3", type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
for (i = 0; i < nelmts; i++) {
- buf3[i] = (unsigned long)0xffffffff ^ (unsigned long)(i * 32);
+ buf3[i] = (uint32_t)0xffffffff ^ (uint32_t)(i * 32);
}
H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf3);
H5Dclose(dset);
@@ -9765,7 +9785,7 @@ static void gent_bitnopaquefields(void)
if ((space = H5Screate_simple(1, &nelmts, NULL)) >= 0) {
if ((dset = H5Dcreate2(grp, "bitfield_4", type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
for (i = 0; i < nelmts; i++) {
- buf4[i] = (unsigned long long)0xffffffffffffffff ^ (unsigned long long)(i * 64);
+ buf4[i] = (uint64_t)0xffffffffffffffff ^ (uint64_t)(i * 64);
}
H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf4);
H5Dclose(dset);
@@ -9785,7 +9805,7 @@ static void gent_bitnopaquefields(void)
if ((space = H5Screate_simple(1, &nelmts, NULL)) >= 0) {
if ((dset = H5Dcreate2(grp, "opaque_1", type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
for(i = 0; i < nelmts; i++)
- buf[i] = (unsigned char)0xff ^ (unsigned char)i;
+ buf[i] = (uint8_t)0xff ^ (uint8_t)i;
H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf);
H5Dclose(dset);
}
@@ -9801,7 +9821,7 @@ static void gent_bitnopaquefields(void)
if ((space = H5Screate_simple(1, &nelmts, NULL)) >= 0) {
if ((dset = H5Dcreate2(grp, "opaque_2", type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
for(i = 0; i < nelmts; i++)
- buf2[i] = (unsigned int)0xffff ^ (unsigned int)(i * 16);
+ buf2[i] = (uint16_t)0xffff ^ (uint16_t)(i * 16);
H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf2);
H5Dclose(dset);
@@ -9824,10 +9844,10 @@ static void gent_bitnopaquefields(void)
if ((space = H5Screate_simple(1, &nelmts, NULL)) >= 0) {
if ((dset = H5Dcreate2(grp, "compound_1", type, space, H5P_DEFAULT, H5P_DEFAULT, H5P_DEFAULT)) >= 0) {
for(i = 0; i < nelmts; i++) {
- buf5[i].a = (unsigned char)0xff ^ (unsigned char)i;
- buf5[i].b = (unsigned int)0xffff ^ (unsigned int)(i * 16);
- buf5[i].c = (unsigned long)0xffffffff ^ (unsigned long)(i * 32);
- buf5[i].d = (unsigned long long)0xffffffffffffffff ^ (unsigned long long)(i * 64);
+ buf5[i].a = (uint8_t)0xff ^ (uint8_t)i;
+ buf5[i].b = (uint16_t)0xffff ^ (uint16_t)(i * 16);
+ buf5[i].c = (uint32_t)0xffffffff ^ (uint32_t)(i * 32);
+ buf5[i].d = (uint64_t)0xffffffffffffffff ^ (uint64_t)(i * 64);
}
H5Dwrite(dset, type, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf5);
@@ -10286,6 +10306,132 @@ static void gent_vlenstr_array(void)
H5Fclose(file);
}
+/*-------------------------------------------------------------------------
+ * Function: gent_udfilter
+ *
+ * Purpose: Generate a file to be used in testing user defined filter plugin3.
+ *-------------------------------------------------------------------------
+ */
+static void gent_udfilter(void)
+{
+ hid_t fid; /* file id */
+ hid_t dcpl; /* dataset creation property list */
+ hid_t dsid; /* dataset ID */
+ hid_t sid; /* dataspace ID */
+ hid_t tid; /* datatype ID */
+
+ hsize_t dims1[RANK] = {DIM1,DIM2};
+ hsize_t chunk_dims[RANK] = {CDIM1,CDIM2};
+ int buf1[DIM1][DIM2];
+ int i, j, n, ret;
+
+ for(i=n=0; i<DIM1; i++){
+ for(j=0; j<DIM2; j++){
+ buf1[i][j]=n++;
+ }
+ }
+
+ /* create a file */
+ fid = H5Fcreate(FILE84, H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT);
+ HDassert(fid>=0);
+
+ /* create a space */
+ sid = H5Screate_simple(SPACE2_RANK, dims1, NULL);
+
+ dcpl = H5Pcreate(H5P_DATASET_CREATE);
+ HDassert(dcpl>=0);
+
+ ret = H5Pset_layout(dcpl, H5D_CHUNKED);
+ HDassert(ret >= 0);
+
+ ret = H5Pset_chunk(dcpl, SPACE2_RANK, chunk_dims);
+ HDassert(ret >= 0);
+
+ ret = H5Zregister (H5Z_DYNLIBUD);
+ HDassert(ret >= 0);
+
+ ret = H5Pset_filter (dcpl, H5Z_FILTER_DYNLIBUD, H5Z_FLAG_MANDATORY, 0, NULL);
+ HDassert(ret >= 0);
+
+ /* create the dataset */
+ dsid = H5Dcreate2(fid, "dynlibud", H5T_STD_I32LE, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT);
+ HDassert(dsid >= 0);
+
+ /* write */
+ ret = H5Dwrite(dsid, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf1);
+ HDassert(ret >= 0);
+
+ /* close */
+ ret = H5Dclose(dsid);
+ HDassert(ret >= 0);
+
+ /* remove the filters from the dcpl */
+ ret = H5Premove_filter(dcpl, H5Z_FILTER_ALL);
+ HDassert(ret >= 0);
+
+ /*-------------------------------------------------------------------------
+ * close
+ *-------------------------------------------------------------------------
+ */
+ ret = H5Sclose(sid);
+ HDassert(ret >= 0);
+
+ ret = H5Pclose(dcpl);
+ HDassert(ret >= 0);
+
+ ret = H5Fclose(fid);
+ HDassert(ret >= 0);
+}
+
+/*-------------------------------------------------------------------------
+ * Function: H5Z_filter_dynlibud
+ *
+ * Purpose: A dynlibud filter method that multiplies the original value
+ * during write and divide the original value during read. It
+ * will be built as a shared library. tools tests will load
+ * and use this filter as a plugin library.
+ *
+ * Return: Success: Data chunk size
+ *
+ * Failure: 0
+ *-------------------------------------------------------------------------
+ */
+static size_t
+H5Z_filter_dynlibud(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes,
+ size_t *buf_size, void **buf)
+{
+ char *int_ptr = (char *)*buf; /* Pointer to the data values */
+ size_t buf_left = *buf_size; /* Amount of data buffer left to process */
+
+ /* Check for the correct number of parameters */
+ if(cd_nelmts > 0)
+ return(0);
+
+ /* Assignment to eliminate unused parameter warning. */
+ cd_values = cd_values;
+
+ if(flags & H5Z_FLAG_REVERSE) { /*read*/
+ /* Subtract the original value with MULTIPLIER */
+ while(buf_left > 0) {
+ char temp = *int_ptr;
+ *int_ptr = temp - MULTIPLIER;
+ int_ptr++;
+ buf_left -= sizeof(*int_ptr);
+ } /* end while */
+ } /* end if */
+ else { /*write*/
+ /* Add the original value with MULTIPLIER */
+ while(buf_left > 0) {
+ char temp = *int_ptr;
+ *int_ptr = temp + MULTIPLIER;
+ int_ptr++;
+ buf_left -= sizeof(*int_ptr);
+ } /* end while */
+ } /* end else */
+
+ return nbytes;
+} /* end H5Z_filter_dynlibud() */
/*-------------------------------------------------------------------------
* Function: main
@@ -10383,6 +10529,8 @@ int main(void)
gent_intsfourdims();
+ gent_udfilter();
+
return 0;
}
diff --git a/tools/test/h5dump/testh5dump.sh.in b/tools/test/h5dump/testh5dump.sh.in
index 14d753c..1136742 100644
--- a/tools/test/h5dump/testh5dump.sh.in
+++ b/tools/test/h5dump/testh5dump.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5dump tool
@@ -44,6 +42,8 @@ AWK='awk'
# Skip plugin module to test missing filter
ENVCMD="env HDF5_PLUGIN_PRELOAD=::"
+WORDS_BIGENDIAN="@WORDS_BIGENDIAN@"
+
nerrors=0
verbose=yes
@@ -99,6 +99,7 @@ $SRC_H5DUMP_TESTFILES/tattrintsize.h5
$SRC_H5DUMP_TESTFILES/tattrreg.h5
$SRC_H5DUMP_TESTFILES/tbigdims.h5
$SRC_H5DUMP_TESTFILES/tbinary.h5
+$SRC_H5DUMP_TESTFILES/tbitnopaque.h5
$SRC_H5DUMP_TESTFILES/tchar.h5
$SRC_H5DUMP_TESTFILES/tcmpdattrintsize.h5
$SRC_H5DUMP_TESTFILES/tcmpdintsize.h5
@@ -219,6 +220,8 @@ $SRC_H5DUMP_TESTFILES/tbin3.ddl
$SRC_H5DUMP_TESTFILES/tbin4.ddl
$SRC_H5DUMP_TESTFILES/tbinregR.ddl
$SRC_H5DUMP_TESTFILES/tbigdims.ddl
+$SRC_H5DUMP_TESTFILES/tbitnopaque_be.ddl
+$SRC_H5DUMP_TESTFILES/tbitnopaque_le.ddl
$SRC_H5DUMP_TESTFILES/tboot1.ddl
$SRC_H5DUMP_TESTFILES/tboot2.ddl
$SRC_H5DUMP_TESTFILES/tboot2A.ddl
@@ -489,6 +492,8 @@ TOOLTEST() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $xCMP $expect $actual > /dev/null 2>&1 ; then
echo " PASSED"
else
@@ -531,11 +536,15 @@ TOOLTEST2() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
if [ ! -f $expectdata ]; then
# Create the expect data file if it doesn't yet exist.
echo " CREATED"
cp $actualdata $expectdata
+ echo " Expected data (*.exp) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expectdata $actualdata; then
echo " PASSED"
else
@@ -586,16 +595,22 @@ TOOLTEST2A() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
if [ ! -f $expectdata ]; then
# Create the expect data file if it doesn't yet exist.
echo " CREATED"
cp $actualdata $expectdata
+ echo " Expected data (*.exp) missing"
+ nerrors="`expr $nerrors + 1`"
elif $DIFF $expectdata $actualdata; then
if [ ! -f $expectmeta ]; then
# Create the expect meta file if it doesn't yet exist.
echo " CREATED"
cp $actualmeta $expectmeta
+ echo " Expected metafile (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expectmeta $actualmeta; then
echo " PASSED"
else
@@ -647,6 +662,8 @@ TOOLTEST2B() {
# Create the expect data file if it doesn't yet exist.
echo " CREATED"
cp $actualdata $expectdata
+ echo " Expected data (*.exp) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expectdata $actualdata; then
echo " PASSED"
else
@@ -702,6 +719,8 @@ TOOLTEST3() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -759,15 +778,17 @@ TOOLTEST4() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
if $CMP $expect_err $actual_ext; then
echo " PASSED"
- else
- echo "*FAILED*"
- echo " Expected result (*.err) differs from actual result (*.oerr)"
- nerrors="`expr $nerrors + 1`"
- test yes = "$verbose" && $DIFF $expect_err $actual_ext |sed 's/^/ /'
- fi
+ else
+ echo "*FAILED*"
+ echo " Expected result (*.err) differs from actual result (*.oerr)"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect_err $actual_ext |sed 's/^/ /'
+ fi
else
echo "*FAILED*"
echo " Expected result (*.ddl) differs from actual result (*.out)"
@@ -823,15 +844,17 @@ TOOLTEST5() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
if $CMP $expect_err $actual_ext; then
echo " PASSED"
- else
- echo "*FAILED*"
- echo " Expected result (*.err) differs from actual result (*.oerr)"
- nerrors="`expr $nerrors + 1`"
- test yes = "$verbose" && $DIFF $expect_err $actual_ext |sed 's/^/ /'
- fi
+ else
+ echo "*FAILED*"
+ echo " Expected result (*.err) differs from actual result (*.oerr)"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect_err $actual_ext |sed 's/^/ /'
+ fi
else
echo "*FAILED*"
echo " Expected result (*.ddl) differs from actual result (*.out)"
@@ -865,6 +888,8 @@ TOOLTEST_HELP() {
# Create the expect data file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect-CREATED
+ echo " Expected output (*.txt) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -1038,9 +1063,13 @@ TOOLTEST tcomp-2.ddl --enable-error-stack -N /type1 --any_path /type2 --any_path
TOOLTEST4 tcomp-3.ddl --enable-error-stack -t /#6632 -g /group2 tcompound.h5
# test complicated compound datatype
TOOLTEST tcomp-4.ddl --enable-error-stack tcompound_complex.h5
-TOOLTEST tcompound_complex.ddl --enable-error-stack tcompound_complex2.h5
+TOOLTEST tcompound_complex2.ddl --enable-error-stack tcompound_complex2.h5
# tests for bitfields and opaque data types
-TOOLTEST tbitnopaque.ddl --enable-error-stack tbitnopaque.h5
+if test $WORDS_BIGENDIAN != "yes"; then
+TOOLTEST tbitnopaque_le.ddl --enable-error-stack tbitnopaque.h5
+else
+TOOLTEST tbitnopaque_be.ddl --enable-error-stack tbitnopaque.h5
+fi
#test for the nested compound type
TOOLTEST tnestcomp-1.ddl --enable-error-stack tnestedcomp.h5
diff --git a/tools/test/h5dump/testh5dumppbits.sh.in b/tools/test/h5dump/testh5dumppbits.sh.in
index 92247d3..9cf5c99 100644
--- a/tools/test/h5dump/testh5dumppbits.sh.in
+++ b/tools/test/h5dump/testh5dumppbits.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5dump tool with packed bits type files
@@ -243,6 +241,8 @@ TOOLTEST() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -284,11 +284,15 @@ TOOLTEST2() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
if [ ! -f $expectdata ]; then
# Create the expect data file if it doesn't yet exist.
echo " CREATED"
cp $actualdata $expectdata
+ echo " Expected data (*.exp) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expectdata $actualdata; then
echo " PASSED"
else
@@ -350,6 +354,8 @@ TOOLTEST3() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -406,15 +412,17 @@ TOOLTEST4() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
if $CMP $expect_err $actual_ext; then
echo " PASSED"
- else
- echo "*FAILED*"
- echo " Expected result (*.err) differs from actual result (*.oerr)"
- nerrors="`expr $nerrors + 1`"
- test yes = "$verbose" && $DIFF $expect_err $actual_ext |sed 's/^/ /'
- fi
+ else
+ echo "*FAILED*"
+ echo " Expected result (*.err) differs from actual result (*.oerr)"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect_err $actual_ext |sed 's/^/ /'
+ fi
else
echo "*FAILED*"
echo " Expected result (*.ddl) differs from actual result (*.out)"
diff --git a/tools/test/h5dump/testh5dumpvds.sh.in b/tools/test/h5dump/testh5dumpvds.sh.in
index 459d506..f89234e 100644
--- a/tools/test/h5dump/testh5dumpvds.sh.in
+++ b/tools/test/h5dump/testh5dumpvds.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5dump tool with vds type files
@@ -222,6 +220,8 @@ TOOLTEST() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -263,11 +263,15 @@ TOOLTEST2() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
if [ ! -f $expectdata ]; then
# Create the expect data file if it doesn't yet exist.
echo " CREATED"
cp $actualdata $expectdata
+ echo " Expected data (*.exp) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expectdata $actualdata; then
echo " PASSED"
else
@@ -329,6 +333,8 @@ TOOLTEST3() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
diff --git a/tools/test/h5dump/testh5dumpxml.sh.in b/tools/test/h5dump/testh5dumpxml.sh.in
index 638a710..5f62946 100644
--- a/tools/test/h5dump/testh5dumpxml.sh.in
+++ b/tools/test/h5dump/testh5dumpxml.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5dump tool
@@ -32,6 +30,8 @@ DIRNAME='dirname'
LS='ls'
AWK='awk'
+WORDS_BIGENDIAN="@WORDS_BIGENDIAN@"
+
nerrors=0
verbose=yes
@@ -126,7 +126,8 @@ $SRC_H5DUMP_TESTFILES/tarray3.h5.xml
$SRC_H5DUMP_TESTFILES/tarray6.h5.xml
$SRC_H5DUMP_TESTFILES/tarray7.h5.xml
$SRC_H5DUMP_TESTFILES/tattr.h5.xml
-$SRC_H5DUMP_TESTFILES/tbitfields.h5.xml
+$SRC_H5DUMP_TESTFILES/tbitfields_be.h5.xml
+$SRC_H5DUMP_TESTFILES/tbitfields_le.h5.xml
$SRC_H5DUMP_TESTFILES/tcompound_complex.h5.xml
$SRC_H5DUMP_TESTFILES/tcompound.h5.xml
$SRC_H5DUMP_TESTFILES/tcompound2.h5.xml
@@ -204,10 +205,10 @@ COPY_TESTFILES_TO_TESTDIR()
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
- $CP -f $tstfile $TESTDIR
+ $CP -f $tstfile $TESTDIR
if [ $? -ne 0 ]; then
echo "Error: FAILED to copy $tstfile ."
-
+
# Comment out this to CREATE expected file
exit $EXIT_FAILURE
fi
@@ -265,7 +266,9 @@ TOOLTEST() {
if [ ! -f $expect ]; then
# Create the expect file if it doesn't yet exist.
echo " CREATED"
- cp $actual $expect
+ cp $actual $expect
+ echo " Expected result (*.xml) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -281,7 +284,7 @@ TOOLTEST() {
fi
}
-# Print a "SKIP" message
+# Print a "SKIP" message
SKIP() {
TESTING $DUMPER $@
echo " -SKIP-"
@@ -299,7 +302,11 @@ COPY_TESTFILES_TO_TESTDIR
# test XML
TOOLTEST tall.h5.xml --xml tall.h5
TOOLTEST tattr.h5.xml --xml tattr.h5
-TOOLTEST tbitfields.h5.xml --xml tbitfields.h5
+if test $WORDS_BIGENDIAN != "yes"; then
+TOOLTEST tbitfields_le.h5.xml --xml tbitfields.h5
+else
+TOOLTEST tbitfields_be.h5.xml --xml tbitfields.h5
+fi
TOOLTEST tcompound.h5.xml --xml tcompound.h5
TOOLTEST tcompound2.h5.xml --xml tcompound2.h5
TOOLTEST tdatareg.h5.xml --xml tdatareg.h5
@@ -347,7 +354,7 @@ TOOLTEST tsaf.h5.xml --xml tsaf.h5
TOOLTEST tempty.h5.xml --xml tempty.h5
TOOLTEST tnamed_dtype_attr.h5.xml --xml tnamed_dtype_attr.h5
##Test dataset and attribute of null space. Commented out:
-## wait until the XML schema is updated for null space.
+## wait until the XML schema is updated for null space.
##TOOLTEST tnullspace.h5.xml --xml tnulspace.h5
# other options for xml
@@ -358,7 +365,7 @@ TOOLTEST tempty-nons.h5.xml --xml -X ":" tempty.h5
TOOLTEST tempty-nons-2.h5.xml --xml --xml-ns=":" tempty.h5
## Some of these combinations are syntactically correct but
-## the URLs are dummies
+## the URLs are dummies
TOOLTEST tempty-ns.h5.xml --xml -X "thing:" tempty.h5
TOOLTEST tempty-ns-2.h5.xml --xml --xml-ns="thing:" tempty.h5
TOOLTEST tempty-nons-uri.h5.xml --xml --xml-ns=":" --xml-dtd="http://somewhere.net" tempty.h5
diff --git a/tools/test/h5format_convert/CMakeLists.txt b/tools/test/h5format_convert/CMakeLists.txt
index 1d18e9a..7e47b13 100644
--- a/tools/test/h5format_convert/CMakeLists.txt
+++ b/tools/test/h5format_convert/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5FC)
#-----------------------------------------------------------------------------
@@ -23,6 +23,6 @@ INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
set_target_properties (h5fc_gentest PROPERTIES FOLDER generator/tools)
#add_test (NAME h5fc_gentest COMMAND $<TARGET_FILE:h5fc_gentest>)
- endif (HDF5_BUILD_GENERATORS)
+ endif ()
include (CMakeTests.cmake)
diff --git a/tools/test/h5format_convert/CMakeTests.cmake b/tools/test/h5format_convert/CMakeTests.cmake
index 68f9318..3e423da 100644
--- a/tools/test/h5format_convert/CMakeTests.cmake
+++ b/tools/test/h5format_convert/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -64,11 +75,11 @@
foreach (ddl_file ${HDF5_REFERENCE_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5FC_SOURCE_DIR}/testfiles/${ddl_file}" "${PROJECT_BINARY_DIR}/testfiles/${ddl_file}" "h5fc_files")
- endforeach (ddl_file ${HDF5_REFERENCE_FILES})
+ endforeach ()
foreach (h5_file ${HDF5_REFERENCE_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5FC_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/testfiles/${h5_file}" "h5fc_files")
- endforeach (h5_file ${HDF5_REFERENCE_TEST_FILES})
+ endforeach ()
add_custom_target(h5fc_files ALL COMMENT "Copying files needed by h5fc tests" DEPENDS ${h5fc_files_list})
##############################################################################
@@ -77,7 +88,7 @@
##############################################################################
##############################################################################
- MACRO (ADD_H5_OUTPUT testname resultfile resultcode testfile)
+ macro (ADD_H5_OUTPUT testname resultfile resultcode testfile)
# If using memchecker add tests without using scripts
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -87,7 +98,7 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5FC-${testname}-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
if (NOT "${testfile}" STREQUAL "")
add_test (
NAME H5FC-${testname}-${testfile}-tmpfile
@@ -108,7 +119,7 @@
)
set_tests_properties (H5FC-${testname}-${testfile} PROPERTIES DEPENDS "H5FC-${testname}-${testfile}-tmpfile")
set (last_test "H5FC-${testname}-${testfile}")
- else (NOT "${testfile}" STREQUAL "")
+ else ()
add_test (
NAME H5FC-${testname}-NA
COMMAND "${CMAKE_COMMAND}"
@@ -122,11 +133,11 @@
)
set_tests_properties (H5FC-${testname}-NA PROPERTIES DEPENDS "H5FC-${testname}-clear-objects")
set (last_test "H5FC-${testname}-NA")
- endif (NOT "${testfile}" STREQUAL "")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_OUTPUT)
+ endif ()
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST testname resultcode testfile)
+ macro (ADD_H5_TEST testname resultcode testfile)
# If using memchecker add tests without using scripts
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -136,7 +147,7 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5FC-${testname}-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
add_test (
NAME H5FC-${testname}-tmpfile
COMMAND ${CMAKE_COMMAND}
@@ -156,10 +167,10 @@
)
set_tests_properties (H5FC-${testname} PROPERTIES DEPENDS "H5FC-${testname}-tmpfile")
set (last_test "H5FC-${testname}")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_CHECK_IDX dependtest testname)
+ macro (ADD_H5_CHECK_IDX dependtest testname)
# If using memchecker add tests without using scripts
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -167,10 +178,10 @@
COMMAND "$<TARGET_FILE:h5fc_chk_idx>" "./testfiles/tmp.h5" "${ARGN}"
)
set_tests_properties (H5FC_CHECK_IDX-${testname} PROPERTIES DEPENDS "H5FC-${dependtest}")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_CHECK_IDX)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST_CHECK_IDX testname resultcode testfile)
+ macro (ADD_H5_TEST_CHECK_IDX testname resultcode testfile)
# If using memchecker add tests without using scripts
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -180,7 +191,7 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5FC-${testname}-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
add_test (
NAME H5FC-${testname}-tmpfile
COMMAND ${CMAKE_COMMAND}
@@ -205,10 +216,10 @@
)
set_tests_properties (H5FC_CHECK_IDX-${testname} PROPERTIES DEPENDS "H5FC-${testname}")
set (last_test "H5FC_CHECK_IDX-${testname}")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST_CHECK_IDX)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_H5DUMP_CHECK testname)
+ macro (ADD_H5_H5DUMP_CHECK testname)
# If using memchecker add tests without using scripts
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -218,7 +229,7 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5FC-${testname}-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
add_test (
NAME H5FC-${testname}-tmpfile
COMMAND ${CMAKE_COMMAND}
@@ -250,8 +261,8 @@
)
set_tests_properties (H5FC_CHECK_DUMP-${testname} PROPERTIES DEPENDS "H5FC-${testname}")
set (last_test "H5FC_CHECK_DUMP-${testname}")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_H5DUMP_CHECK)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -324,9 +335,9 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5FC-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5FC-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# h5format_convert --help
# h5format_convert (no options)
diff --git a/tools/test/h5format_convert/Makefile.am b/tools/test/h5format_convert/Makefile.am
index 5779d4d..eb2d5af 100644
--- a/tools/test/h5format_convert/Makefile.am
+++ b/tools/test/h5format_convert/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -31,6 +29,8 @@ check_PROGRAMS=$(TEST_PROG) h5fc_chk_idx
check_SCRIPTS=$(TEST_SCRIPT)
SCRIPT_DEPEND=../../src/h5format_convert/h5format_convert$(EXEEXT)
+CLEANFILES=
+
# Tell automake to clean h5redeploy script
CHECK_CLEANFILES+=*.h5
diff --git a/tools/test/h5format_convert/h5fc_chk_idx.c b/tools/test/h5format_convert/h5fc_chk_idx.c
index 3a87594..8369668 100644
--- a/tools/test/h5format_convert/h5fc_chk_idx.c
+++ b/tools/test/h5format_convert/h5fc_chk_idx.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/h5format_convert/h5fc_gentest.c b/tools/test/h5format_convert/h5fc_gentest.c
index 520e5bf..8c873be 100644
--- a/tools/test/h5format_convert/h5fc_gentest.c
+++ b/tools/test/h5format_convert/h5fc_gentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -44,8 +42,6 @@ const char *FILENAME[] = {
#define GROUP "GROUP"
-#define DSET_BT1 "DSET_BT1"
-#define DSET_NDATA_BT1 "DSET_NDATA_BT1"
#define DSET_COMPACT "DSET_COMPACT"
#define DSET_CONTIGUOUS "DSET_CONTIGUOUS"
@@ -296,7 +292,6 @@ error:
H5Dclose(did2);
H5Gclose(gid);
H5Fclose(fcpl);
- H5Fclose(fapl);
H5Fclose(fid);
} H5E_END_TRY;
@@ -571,7 +566,7 @@ gen_ext(const char *fname, unsigned new_format, unsigned what)
H5Pset_shared_mesg_nindexes(fcpl, 4);
break;
case 2:
- H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0);
+ H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)1);
break;
case 3:
H5Pset_istore_k(fcpl, ISTORE_IK);
@@ -579,16 +574,16 @@ gen_ext(const char *fname, unsigned new_format, unsigned what)
break;
case 4:
H5Pset_istore_k(fcpl, ISTORE_IK);
- H5Pset_file_space(fcpl, H5F_FILE_SPACE_DEFAULT, (hsize_t)2);
+ H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1);
break;
case 5:
H5Pset_shared_mesg_nindexes(fcpl, 4);
- H5Pset_file_space(fcpl, H5F_FILE_SPACE_VFD, (hsize_t)0);
+ H5Pset_file_space_page_size(fcpl, (hsize_t)512);
break;
case 6:
H5Pset_istore_k(fcpl, ISTORE_IK);
H5Pset_shared_mesg_nindexes(fcpl, 4);
- H5Pset_file_space(fcpl, H5F_FILE_SPACE_AGGR_VFD, (hsize_t)0);
+ H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1);
break;
default:
break;
@@ -771,6 +766,7 @@ error:
H5Dclose(did2);
H5Gclose(gid);
H5Fclose(fid);
+ H5Pclose(fapl);
H5Pclose(fcpl);
} H5E_END_TRY;
diff --git a/tools/test/h5format_convert/testfiles/h5fc_edge_v3.h5 b/tools/test/h5format_convert/testfiles/h5fc_edge_v3.h5
index ac7dbd3..6f92057 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_edge_v3.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_edge_v3.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_err_level.h5 b/tools/test/h5format_convert/testfiles/h5fc_err_level.h5
index a10e8a4..d1aecc2 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_err_level.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_err_level.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext1_f.ddl b/tools/test/h5format_convert/testfiles/h5fc_ext1_f.ddl
index dae9284..db00a99 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext1_f.ddl
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext1_f.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext1_f.h5 b/tools/test/h5format_convert/testfiles/h5fc_ext1_f.h5
index 68ba2ac..5f2a01a 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext1_f.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext1_f.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext1_i.ddl b/tools/test/h5format_convert/testfiles/h5fc_ext1_i.ddl
index 8ec4656..4be6d90 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext1_i.ddl
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext1_i.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 64
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext1_i.h5 b/tools/test/h5format_convert/testfiles/h5fc_ext1_i.h5
index 1a58089..df86faf 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext1_i.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext1_i.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext1_s.ddl b/tools/test/h5format_convert/testfiles/h5fc_ext1_s.ddl
index dae9284..db00a99 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext1_s.ddl
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext1_s.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext1_s.h5 b/tools/test/h5format_convert/testfiles/h5fc_ext1_s.h5
index 26e9b25..e41f51a 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext1_s.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext1_s.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext2_if.ddl b/tools/test/h5format_convert/testfiles/h5fc_ext2_if.ddl
index 8ec4656..4be6d90 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext2_if.ddl
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext2_if.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 64
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext2_if.h5 b/tools/test/h5format_convert/testfiles/h5fc_ext2_if.h5
index e5c5e25..af2cb15 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext2_if.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext2_if.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext2_is.ddl b/tools/test/h5format_convert/testfiles/h5fc_ext2_is.ddl
index 8ec4656..4be6d90 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext2_is.ddl
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext2_is.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 64
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext2_is.h5 b/tools/test/h5format_convert/testfiles/h5fc_ext2_is.h5
index 0e3eca7..5e99ddf 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext2_is.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext2_is.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext2_sf.ddl b/tools/test/h5format_convert/testfiles/h5fc_ext2_sf.ddl
index dae9284..db00a99 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext2_sf.ddl
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext2_sf.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext2_sf.h5 b/tools/test/h5format_convert/testfiles/h5fc_ext2_sf.h5
index cb15f03..8996bf0 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext2_sf.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext2_sf.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext3_isf.ddl b/tools/test/h5format_convert/testfiles/h5fc_ext3_isf.ddl
index 8ec4656..4be6d90 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext3_isf.ddl
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext3_isf.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 64
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext3_isf.h5 b/tools/test/h5format_convert/testfiles/h5fc_ext3_isf.h5
index d46cef4..df1db97 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext3_isf.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext3_isf.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_ext_none.h5 b/tools/test/h5format_convert/testfiles/h5fc_ext_none.h5
index defbcb3..c498ead 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_ext_none.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_ext_none.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/h5fc_non_v3.h5 b/tools/test/h5format_convert/testfiles/h5fc_non_v3.h5
index 58a340d..2f0f063 100644
--- a/tools/test/h5format_convert/testfiles/h5fc_non_v3.h5
+++ b/tools/test/h5format_convert/testfiles/h5fc_non_v3.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.ddl b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.ddl
index dae9284..db00a99 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.ddl
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.h5 b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.h5
index 3cbc7f4..b71b31e 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.h5
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_f.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.ddl b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.ddl
index d1768c8..d9cc0b7 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.ddl
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 64
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.h5 b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.h5
index a2c9187..6a8e5eb 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.h5
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_i.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.ddl b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.ddl
index dae9284..db00a99 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.ddl
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.h5 b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.h5
index fdf4f33..ebc6919 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.h5
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext1_s.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.ddl b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.ddl
index 8ec4656..4be6d90 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.ddl
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 64
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.h5 b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.h5
index 6bf0a2f..a4178b3 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.h5
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_if.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.ddl b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.ddl
index 8ec4656..4be6d90 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.ddl
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 64
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.h5 b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.h5
index c0c7ecc..b5cd60a 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.h5
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_is.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.ddl b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.ddl
index dae9284..db00a99 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.ddl
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.h5 b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.h5
index 055cabf..8e63726 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.h5
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext2_sf.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.ddl b/tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.ddl
index 8ec4656..4be6d90 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.ddl
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 64
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.h5 b/tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.h5
index f4caaf4..d581e3c 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.h5
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext3_isf.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testfiles/old_h5fc_ext_none.h5 b/tools/test/h5format_convert/testfiles/old_h5fc_ext_none.h5
index d0bf344..0e02ca1 100644
--- a/tools/test/h5format_convert/testfiles/old_h5fc_ext_none.h5
+++ b/tools/test/h5format_convert/testfiles/old_h5fc_ext_none.h5
Binary files differ
diff --git a/tools/test/h5format_convert/testh5fc.sh.in b/tools/test/h5format_convert/testh5fc.sh.in
index 5384354..c3056e8 100644
--- a/tools/test/h5format_convert/testh5fc.sh.in
+++ b/tools/test/h5format_convert/testh5fc.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5format_convert tool
#
@@ -180,6 +178,9 @@ CLEAN_TESTFILES_AND_TESTDIR()
$RM $TESTDIR
else
$RM $TESTDIR/$TMPFILE
+ $RM $TESTDIR/$TMPOUTFILE
+ $RM $TESTDIR/$TMPCHKFILE
+ $RM $TESTDIR/$TMPDMPFILE
fi
}
diff --git a/tools/test/h5import/CMakeLists.txt b/tools/test/h5import/CMakeLists.txt
index 22491b4..0d23e5d 100644
--- a/tools/test/h5import/CMakeLists.txt
+++ b/tools/test/h5import/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5IMPORT)
#-----------------------------------------------------------------------------
diff --git a/tools/test/h5import/CMakeTests.cmake b/tools/test/h5import/CMakeTests.cmake
index eb0b413..27c736a 100644
--- a/tools/test/h5import/CMakeTests.cmake
+++ b/tools/test/h5import/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -64,15 +75,15 @@
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
foreach (conf_file ${HDF5_REFERENCE_CONF_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5IMPORT_SOURCE_DIR}/testfiles/${conf_file}" "${PROJECT_BINARY_DIR}/testfiles/${conf_file}" "h5import_files")
- endforeach (conf_file ${HDF5_REFERENCE_CONF_FILES})
+ endforeach ()
foreach (txt_file ${HDF5_REFERENCE_TXT_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5IMPORT_SOURCE_DIR}/testfiles/${txt_file}" "${PROJECT_BINARY_DIR}/testfiles/${txt_file}" "h5import_files")
- endforeach (txt_file ${HDF5_REFERENCE_TXT_FILES})
+ endforeach ()
foreach (h5_file ${HDF5_REFERENCE_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5IMPORT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/testfiles/${h5_file}" "h5import_files")
- endforeach (h5_file ${HDF5_REFERENCE_TEST_FILES})
+ endforeach ()
add_custom_target(h5import_files ALL COMMENT "Copying files needed by h5import tests" DEPENDS ${h5import_files_list})
##############################################################################
@@ -80,14 +91,14 @@
### T H E T E S T S M A C R O S ###
##############################################################################
##############################################################################
- MACRO (ADD_H5_TEST testname importfile conffile testfile)
+ macro (ADD_H5_TEST testname importfile conffile testfile)
# If using memchecker skip macro based tests
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5IMPORT-${testname} COMMAND $<TARGET_FILE:h5import> ${importfile} -c ${conffile} -o ${testfile})
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5IMPORT-${testname} PROPERTIES DEPENDS H5IMPORT-h5importtest)
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5IMPORT-${testname}-clear-objects
COMMAND ${CMAKE_COMMAND}
@@ -125,10 +136,10 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5IMPORT-${testname}-H5DMP_CMP PROPERTIES DEPENDS H5IMPORT-${testname}-H5DMP)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST testname importfile conffile testfile)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_DUMPTEST testname datasetname testfile)
+ macro (ADD_H5_DUMPTEST testname datasetname testfile)
# If using memchecker skip tests
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -152,7 +163,7 @@
-D "TEST_SKIP_COMPARE=TRUE"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- else ("${ARGN}" STREQUAL "BINARY")
+ else ()
add_test (
NAME H5IMPORT-DUMP-${testname}-H5DMP
COMMAND "${CMAKE_COMMAND}"
@@ -164,7 +175,7 @@
-D "TEST_SKIP_COMPARE=TRUE"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif ("${ARGN}" STREQUAL "BINARY")
+ endif ()
set_tests_properties (H5IMPORT-DUMP-${testname}-H5DMP PROPERTIES DEPENDS "H5IMPORT-DUMP-${testname}-clear-objects")
add_test (
@@ -193,24 +204,24 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5IMPORT-DUMP-${testname}-H5DFF PROPERTIES DEPENDS "H5IMPORT-DUMP-${testname}")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_DUMPTEST testname datasetname testfile)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_SKIP_DUMPTEST testname datasetname testfile)
+ macro (ADD_H5_SKIP_DUMPTEST testname datasetname testfile)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5IMPORT-DUMP-${testname}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${testname} ${datasetname} ${testfile} --- DEFLATE filter not available"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_SKIP_DUMPTEST testname datasetname testfile)
+ endif ()
+ endmacro ()
# --------------------------------------------------------------------
# Determine if filter is available for h5diff
# --------------------------------------------------------------------
if (H5_HAVE_FILTER_DEFLATE)
set (USE_FILTER_DEFLATE "true")
- endif (H5_HAVE_FILTER_DEFLATE)
+ endif ()
##############################################################################
##############################################################################
@@ -377,7 +388,7 @@
dtxtstr.h5.dff.err
)
set (last_test "H5IMPORT-clear-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
add_test (
NAME H5IMPORT-h5importtest-clear-objects
@@ -393,7 +404,7 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5IMPORT-h5importtest-clear-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5IMPORT-clear-objects")
add_test (NAME H5IMPORT-h5importtest COMMAND $<TARGET_FILE:h5importtest>)
@@ -424,17 +435,17 @@
ADD_H5_TEST (BINARY_F64 binfp64.bin testfiles/binfp64.conf binfp64.h5)
if (NOT USE_FILTER_DEFLATE)
ADD_H5_SKIP_DUMPTEST (BINARY_F64 "/fp/bin/64-bit" binfp64.h5 BINARY)
- else (NOT USE_FILTER_DEFLATE)
+ else ()
ADD_H5_DUMPTEST (BINARY_F64 "/fp/bin/64-bit" binfp64.h5 BINARY)
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
# ----- TESTING "BINARY I8 - rank 3 - Output I16LE + Chunked+Extended+Compressed "
ADD_H5_TEST (BINARY_I8 binin8.bin testfiles/binin8.conf binin8.h5)
if (NOT USE_FILTER_DEFLATE)
ADD_H5_SKIP_DUMPTEST (BINARY_I8 "/int/bin/8-bit" binin8.h5 BINARY)
- else (NOT USE_FILTER_DEFLATE)
+ else ()
ADD_H5_DUMPTEST (BINARY_I8 "/int/bin/8-bit" binin8.h5 BINARY)
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
# ----- TESTING "BINARY I16 - rank 3 - Output order LE + CHUNKED + extended "
ADD_H5_TEST (BINARY_I16 binin16.bin testfiles/binin16.conf binin16.h5)
diff --git a/tools/test/h5import/Makefile.am b/tools/test/h5import/Makefile.am
index f238f77..7c5371c 100644
--- a/tools/test/h5import/Makefile.am
+++ b/tools/test/h5import/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/test/h5import/h5importtest.c b/tools/test/h5import/h5importtest.c
index 38fd75b..135b8e4 100644
--- a/tools/test/h5import/h5importtest.c
+++ b/tools/test/h5import/h5importtest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
diff --git a/tools/test/h5import/h5importtestutil.sh.in b/tools/test/h5import/h5importtestutil.sh.in
index 1a9a3fc..3bbe37b 100644
--- a/tools/test/h5import/h5importtestutil.sh.in
+++ b/tools/test/h5import/h5importtestutil.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5import tool
diff --git a/tools/test/h5jam/CMakeLists.txt b/tools/test/h5jam/CMakeLists.txt
index d3980c4..562b4f3 100644
--- a/tools/test/h5jam/CMakeLists.txt
+++ b/tools/test/h5jam/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5JAM)
#-----------------------------------------------------------------------------
@@ -17,7 +17,7 @@ INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
set_target_properties (h5jamgentest PROPERTIES FOLDER generator/tools)
#add_test (NAME h5jamgentest COMMAND $<TARGET_FILE:h5jamgentest>)
- endif (HDF5_BUILD_GENERATORS)
+ endif ()
add_executable (getub ${HDF5_TOOLS_TEST_H5JAM_SOURCE_DIR}/getub.c)
TARGET_NAMING (getub STATIC)
diff --git a/tools/test/h5jam/CMakeTests.cmake b/tools/test/h5jam/CMakeTests.cmake
index 9ed2015..ae6d440 100644
--- a/tools/test/h5jam/CMakeTests.cmake
+++ b/tools/test/h5jam/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -23,11 +34,11 @@
file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
foreach (h5_file ${HDF5_REFERENCE_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5JAM_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/testfiles/${h5_file}" "h5jam_files")
- endforeach (h5_file ${HDF5_REFERENCE_TEST_FILES})
+ endforeach ()
foreach (txt_file ${HDF5_REFERENCE_TXT_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5JAM_SOURCE_DIR}/testfiles/${txt_file}" "${PROJECT_BINARY_DIR}/testfiles/${txt_file}" "h5jam_files")
- endforeach (txt_file ${HDF5_REFERENCE_TXT_FILES})
+ endforeach ()
add_custom_target(h5jam_files ALL COMMENT "Copying files needed by h5jam tests" DEPENDS ${h5jam_files_list})
##############################################################################
@@ -40,14 +51,14 @@
# TEST_H5JAM_OUTPUT
# For the purpose to verify only output & exitcode from h5jam
#
- MACRO (TEST_H5JAM_OUTPUT expectfile resultcode)
+ macro (TEST_H5JAM_OUTPUT expectfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5JAM-${expectfile} COMMAND $<TARGET_FILE:h5jam> ${ARGN})
if (NOT "${resultcode}" STREQUAL "0")
set_tests_properties (H5JAM-${expectfile} PROPERTIES WILL_FAIL "true")
- endif (NOT "${resultcode}" STREQUAL "0")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
add_test (
NAME H5JAM-${expectfile}
COMMAND "${CMAKE_COMMAND}"
@@ -59,21 +70,21 @@
-D "TEST_REFERENCE=testfiles/${expectfile}.txt"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (TEST_H5JAM_OUTPUT)
+ endif ()
+ endmacro ()
# ============================================================
# TEST_H5UNJAM_OUTPUT
# For the purpose to verify only output & exitcode from h5unjam
#
- MACRO (TEST_H5UNJAM_OUTPUT expectfile resultcode)
+ macro (TEST_H5UNJAM_OUTPUT expectfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5JAM-UNJAM-${expectfile} COMMAND $<TARGET_FILE:h5unjam> ${ARGN})
if (NOT "${resultcode}" STREQUAL "0")
set_tests_properties (H5JAM-UNJAM-${expectfile} PROPERTIES WILL_FAIL "true")
- endif (NOT "${resultcode}" STREQUAL "0")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+ else ()
add_test (
NAME H5JAM-UNJAM-${expectfile}
COMMAND "${CMAKE_COMMAND}"
@@ -85,10 +96,10 @@
-D "TEST_REFERENCE=testfiles/${expectfile}.txt"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (TEST_H5UNJAM_OUTPUT)
+ endif ()
+ endmacro ()
- MACRO (CHECKFILE testname testdepends expected actual)
+ macro (CHECKFILE testname testdepends expected actual)
# If using memchecker add tests without using scripts
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -117,10 +128,10 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5JAM-${testname}-CHECKFILE-H5DMP_CMP PROPERTIES DEPENDS H5JAM-${testname}-CHECKFILE-H5DMP)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO(CHECKFILE testname testdepends expected actual)
+ endif ()
+ endmacro()
- MACRO (UNJAMTEST testname setfile infile ufile chkfile outfile)
+ macro (UNJAMTEST testname setfile infile ufile chkfile outfile)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5JAM-${testname}-UNJAM-SETUP-clear-objects
@@ -145,7 +156,7 @@
add_test (NAME H5JAM-${testname}-UNJAM COMMAND $<TARGET_FILE:h5unjam> -i ${infile} -u ${ufile} -o ${outfile})
set_tests_properties (H5JAM-${testname}-UNJAM PROPERTIES DEPENDS H5JAM-${testname}-UNJAM_D-clear-objects)
set (compare_test ${ufile})
- else (NOT "${ufile}" STREQUAL "NONE")
+ else ()
if (NOT "${ARGN}" STREQUAL "--delete")
add_test (
NAME H5JAM-${testname}-UNJAM
@@ -160,12 +171,12 @@
)
set_tests_properties (H5JAM-${testname}-UNJAM PROPERTIES DEPENDS H5JAM-${testname}-UNJAM-clear-objects)
set (compare_test "${outfile}.ufile.txt")
- else (NOT "${ARGN}" STREQUAL "--delete")
+ else ()
add_test (NAME H5JAM-${testname}-UNJAM COMMAND $<TARGET_FILE:h5unjam> -i ${infile} -o ${outfile})
set_tests_properties (H5JAM-${testname}-UNJAM PROPERTIES DEPENDS H5JAM-${testname}-UNJAM-clear-objects)
set (compare_test "")
- endif (NOT "${ARGN}" STREQUAL "--delete")
- endif (NOT "${ufile}" STREQUAL "NONE")
+ endif ()
+ endif ()
if (NOT "${compare_test}" STREQUAL "")
add_test (
NAME H5JAM-${testname}-UNJAM-CHECK_UB_1-clear-objects
@@ -190,7 +201,7 @@
-P "${HDF_RESOURCES_DIR}/userblockTest.cmake"
)
set_tests_properties (H5JAM-${testname}-UNJAM-CHECK_UB_1 PROPERTIES DEPENDS H5JAM-${testname}-UNJAM-CHECK_UB_1-clear-objects)
- endif (NOT "${compare_test}" STREQUAL "")
+ endif ()
add_test (
NAME H5JAM-${testname}-UNJAM-CHECK_NOUB
@@ -207,21 +218,21 @@
)
if (NOT "${compare_test}" STREQUAL "")
set_tests_properties (H5JAM-${testname}-UNJAM-CHECK_NOUB PROPERTIES DEPENDS H5JAM-${testname}-UNJAM-CHECK_UB_1)
- else (NOT "${compare_test}" STREQUAL "")
+ else ()
set_tests_properties (H5JAM-${testname}-UNJAM-CHECK_NOUB PROPERTIES DEPENDS H5JAM-${testname}-UNJAM)
- endif (NOT "${compare_test}" STREQUAL "")
+ endif ()
CHECKFILE (${testname} "H5JAM-${testname}-UNJAM-CHECK_NOUB" ${chkfile} ${outfile})
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO(UNJAMTEST testname infile ufile outfile)
+ endif ()
+ endmacro()
- MACRO (JAMTEST testname jamfile infile chkfile outfile)
+ macro (JAMTEST testname jamfile infile chkfile outfile)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5JAM-${testname}-clear-objects
COMMAND ${CMAKE_COMMAND} -E remove ${outfile} ${infile}.cpy.h5
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
add_test (NAME H5JAM-${testname} COMMAND $<TARGET_FILE:h5jam> -u testfiles/${jamfile} -i testfiles/${infile} -o ${outfile} ${ARGN})
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
set_tests_properties (H5JAM-${testname} PROPERTIES DEPENDS H5JAM-${testname}-clear-objects)
@@ -229,7 +240,7 @@
set (compare_orig testfiles/${infile})
if ("${ARGN}" STREQUAL "--clobber")
set (compare_orig "")
- endif ("${ARGN}" STREQUAL "--clobber")
+ endif ()
add_test (
NAME H5JAM-${testname}-CHECK_UB_1-clear-objects
@@ -255,10 +266,10 @@
)
set_tests_properties (H5JAM-${testname}-CHECK_UB_1 PROPERTIES DEPENDS H5JAM-${testname}-CHECK_UB_1-clear-objects)
CHECKFILE (${testname} "H5JAM-${testname}-CHECK_UB_1" ${chkfile} ${outfile})
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (JAMTEST testname jamfile infile outfile)
+ endif ()
+ endmacro ()
- MACRO (JAMTEST_NONE testname jamfile infile setfile chkfile)
+ macro (JAMTEST_NONE testname jamfile infile setfile chkfile)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5JAM-${testname}_NONE-clear-objects
@@ -284,7 +295,7 @@
set (compare_orig ${chkfile}.cpy.h5)
if ("${ARGN}" STREQUAL "--clobber")
set (compare_orig "")
- endif ("${ARGN}" STREQUAL "--clobber")
+ endif ()
add_test (
NAME H5JAM-${testname}_NONE-CHECK_UB_1-clear-objects
@@ -310,8 +321,8 @@
)
set_tests_properties (H5JAM-${testname}_NONE-CHECK_UB_1 PROPERTIES DEPENDS H5JAM-${testname}_NONE-CHECK_UB_1-clear-objects)
CHECKFILE (${testname} "H5JAM-${testname}_NONE-CHECK_UB_1" ${infile} ${chkfile})
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (JAMTEST_NONE testname jamfile infile setfile chkfile)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
diff --git a/tools/test/h5jam/Makefile.am b/tools/test/h5jam/Makefile.am
index 068f437..c07aeb2 100644
--- a/tools/test/h5jam/Makefile.am
+++ b/tools/test/h5jam/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/test/h5jam/getub.c b/tools/test/h5jam/getub.c
index 4e02e6b..7cfde36 100644
--- a/tools/test/h5jam/getub.c
+++ b/tools/test/h5jam/getub.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "H5private.h"
diff --git a/tools/test/h5jam/h5jamgentest.c b/tools/test/h5jam/h5jamgentest.c
index a12b17a..8648f07 100644
--- a/tools/test/h5jam/h5jamgentest.c
+++ b/tools/test/h5jam/h5jamgentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/h5jam/tellub.c b/tools/test/h5jam/tellub.c
index b4f87af..fad14b7 100644
--- a/tools/test/h5jam/tellub.c
+++ b/tools/test/h5jam/tellub.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
diff --git a/tools/test/h5jam/testh5jam.sh.in b/tools/test/h5jam/testh5jam.sh.in
index fb6d9e2..3ae180b 100644
--- a/tools/test/h5jam/testh5jam.sh.in
+++ b/tools/test/h5jam/testh5jam.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5jam/h5unjam tools
@@ -109,10 +107,10 @@ COPY_TESTFILES_TO_TESTDIR()
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
- $CP -f $tstfile $TESTDIR
+ $CP -f $tstfile $TESTDIR
if [ $? -ne 0 ]; then
echo "Error: FAILED to copy $tstfile ."
-
+
# Comment out this to CREATE expected file
exit $EXIT_FAILURE
fi
@@ -156,19 +154,19 @@ SKIP() {
echo " -SKIP-"
}
-#
+#
# COMPARE_FILES a.h5 b.h5
-# Compare two files, skipping the first line. This is used to
+# Compare two files, skipping the first line. This is used to
# compare the output of the dumper, skipping the file name which
# is different.
# The result is stored in 'compval'.
#
cmpval=0;
COMPARE_FILES() {
- $AWK 'NR > 1' $1 > $1.cmp
- $AWK 'NR > 1' $2 > $2.cmp
- $CMP $1.cmp $2.cmp
- cmpval=$?
+ $AWK 'NR > 1' $1 > $1.cmp
+ $AWK 'NR > 1' $2 > $2.cmp
+ $CMP $1.cmp $2.cmp
+ cmpval=$?
rm -f $1.cmp $2.cmp
}
@@ -176,10 +174,10 @@ COMPARE_FILES() {
# Clean up named files.
CLEANUP() {
if test -z "$HDF5_NOCLEANUP"; then
- for i in $*
- do
- rm -f $i
- done
+ for i in $*
+ do
+ rm -f $i
+ done
fi
}
@@ -192,7 +190,7 @@ CLEANUP() {
# the "cat" command.
#
SETUP() {
- cat < $1 > $2
+ cat < $1 > $2
}
#
@@ -235,7 +233,7 @@ CHECKFILE() {
}
#
-# CHECK_UB file.h5 user_block_file origfile.h5
+# CHECK_UB file.h5 user_block_file origfile.h5
#
# Check the user block in 'file.h5' is the same as
# 'user_block' (allowing for padding).
@@ -245,73 +243,73 @@ CHECKFILE() {
# and the test file compared to:
# cat compare_ub user_block_file.
#
-# This test uses './getub' to extract the user block from
+# This test uses './getub' to extract the user block from
# 'file.h5', which is compared to the file described above.
#
# The result is set in variable 'result1'.
#
result1=0;
CHECK_UB_1() {
- hfile="$1"
- ufile="$2"
-
- # check for third argument (the original file)
- origfile="";
- if [ -n "$3" ];
- then
- origfile="$3"
- fi
-
- # find the length of the user block to check
- s1=`cat $ufile | wc -c | sed -e 's/ //g'`
- if [ "$s1" = "0" ];
- then
- echo "File "$ufile" is empty"
- result1=1;
- fi
-
- # Get the size of the original user block, if any.
- if [ -n "$origfile" ];
- then
- # 'tellub' calls H5Fget_user_block to get the size
- # of the user block
- s2=`$JAM_BIN/tellub $origfile`
- if [ "$s2" = "0" ];
- then
- size=$s1;
- cmpfile=$ufile
- else
- cmpfile="tt2"
- size=`expr $s2 + $s1`
- $JAM_BIN/getub -c $s2 $origfile > $cmpfile
- cat $ufile >> $cmpfile
- fi
- else
- # assume no user block
- s2="0"
- size=$s1;
- cmpfile=$ufile
- fi
-
- # Extract 'size' bytes from the front of 'hfile'
- # Compare to 'cmpfile', result is set in result1
- tfile="tt1"
- $JAM_BIN/getub -c $size $hfile > $tfile
- res=`cmp $cmpfile $tfile`
- if [ "$?" != "0" ];
- then
- echo $res
- result1=1;
- else
- result1=0;
- fi
-
- # clean up
- rm -f $tfile
- if [ "$s2" != "0" ] ;
- then
- rm -f $cmpfile
- fi
+ hfile="$1"
+ ufile="$2"
+
+ # check for third argument (the original file)
+ origfile="";
+ if [ -n "$3" ];
+ then
+ origfile="$3"
+ fi
+
+ # find the length of the user block to check
+ s1=`cat $ufile | wc -c | sed -e 's/ //g'`
+ if [ "$s1" = "0" ];
+ then
+ echo "File "$ufile" is empty"
+ result1=1;
+ fi
+
+ # Get the size of the original user block, if any.
+ if [ -n "$origfile" ];
+ then
+ # 'tellub' calls H5Fget_user_block to get the size
+ # of the user block
+ s2=`$JAM_BIN/tellub $origfile`
+ if [ "$s2" = "0" ];
+ then
+ size=$s1;
+ cmpfile=$ufile
+ else
+ cmpfile="tt2"
+ size=`expr $s2 + $s1`
+ $JAM_BIN/getub -c $s2 $origfile > $cmpfile
+ cat $ufile >> $cmpfile
+ fi
+ else
+ # assume no user block
+ s2="0"
+ size=$s1;
+ cmpfile=$ufile
+ fi
+
+ # Extract 'size' bytes from the front of 'hfile'
+ # Compare to 'cmpfile', result is set in result1
+ tfile="tt1"
+ $JAM_BIN/getub -c $size $hfile > $tfile
+ res=`cmp $cmpfile $tfile`
+ if [ "$?" != "0" ];
+ then
+ echo $res
+ result1=1;
+ else
+ result1=0;
+ fi
+
+ # clean up
+ rm -f $tfile
+ if [ "$s2" != "0" ] ;
+ then
+ rm -f $cmpfile
+ fi
}
@@ -323,25 +321,25 @@ CHECK_UB_1() {
result2=0;
CHECK_NOUB() {
- hfile="$1"
-
- # call 'ubsize' to get the size of the user block
- ubsize=`$JAM_BIN/tellub $hfile`
-
- if [ "$?" != "0" ];
- then
- # error
- result2=1;
- else
- if [ "$ubsize" = "0" ];
- then
- # pass
- result2=0;
- else
- # fail
- result2=1;
- fi
- fi
+ hfile="$1"
+
+ # call 'ubsize' to get the size of the user block
+ ubsize=`$JAM_BIN/tellub $hfile`
+
+ if [ "$?" != "0" ];
+ then
+ # error
+ result2=1;
+ else
+ if [ "$ubsize" = "0" ];
+ then
+ # pass
+ result2=0;
+ else
+ # fail
+ result2=1;
+ fi
+ fi
}
# JAMTEST user_block file.h5 [--clobber] [ofile.h5]
@@ -353,119 +351,119 @@ CHECK_NOUB() {
# 3. check the user block is correct in the output (Check_UB)
# If the user block is correct, print "PASSED", else "*FAILED*"
JAMTEST() {
- ufile="$1"
- ifile="$2"
- compare_test="" # the file to test
- compare_orig="" # the comparison to test against
- cleanup=""
-
- # sort out the arguments for the test and the check
- do_clobber="no"
- if [ "$3" = "--clobber" ];
- then
- # clobber overwrites any existing user block
- do_clobber="yes"
- clobber="--clobber"
- compare_orig=""
- if [ -z "$4" ];
- then
- # output goes to infile, compare ubfile to infile
- ofile=""
- compare_test="$ifile"
- else
- # output goes to $4, compare ofile to ubfile
- ofile="$4"
- compare_test="$ofile"
- fi
- else
- clobber=""
- # add user block to existing ub, if any
- if [ -z "$3" ];
- then
- # output goes to infile, compare ubfile to infile
- ofile=""
- compare_test="$ifile"
- cp $ifile xxofile.h5
- compare_orig="xxofile.h5"
- cleanup="$cleanup $compare_orig"
- else
- # output goes to $3, compare ofile to ubfile
- ofile="$3"
- compare_test="$ofile"
- compare_orig="$ifile"
- fi
- fi
-
- # call 'jam' with the appropriate arguments
- if [ -n "$ofile" ];
- then
- TESTING h5jam -u `basename $ufile` -i `basename $ifile` -o `basename $ofile` $clobber
- $JAM_BIN/$JAM -u $ufile -i $ifile -o $ofile $clobber
- else
- TESTING jam -u `basename $ufile` -i `basename $ifile` $clobber
- $JAM_BIN/$JAM -u $ufile -i $ifile $clobber
- fi
-
- #echo "CHECK_UB_1 $compare_test $ufile $compare_orig"
- CHECK_UB_1 $compare_test $ufile $compare_orig
-
- if [ "$result1" = "0" ] ;
- then
- echo " PASSED"
- else
- echo " *FAILED*"
- nerrors="`expr $nerrors + 1`"
- fi
- CLEANUP $cleanup
+ ufile="$1"
+ ifile="$2"
+ compare_test="" # the file to test
+ compare_orig="" # the comparison to test against
+ cleanup=""
+
+ # sort out the arguments for the test and the check
+ do_clobber="no"
+ if [ "$3" = "--clobber" ];
+ then
+ # clobber overwrites any existing user block
+ do_clobber="yes"
+ clobber="--clobber"
+ compare_orig=""
+ if [ -z "$4" ];
+ then
+ # output goes to infile, compare ubfile to infile
+ ofile=""
+ compare_test="$ifile"
+ else
+ # output goes to $4, compare ofile to ubfile
+ ofile="$4"
+ compare_test="$ofile"
+ fi
+ else
+ clobber=""
+ # add user block to existing ub, if any
+ if [ -z "$3" ];
+ then
+ # output goes to infile, compare ubfile to infile
+ ofile=""
+ compare_test="$ifile"
+ cp $ifile xxofile.h5
+ compare_orig="xxofile.h5"
+ cleanup="$cleanup $compare_orig"
+ else
+ # output goes to $3, compare ofile to ubfile
+ ofile="$3"
+ compare_test="$ofile"
+ compare_orig="$ifile"
+ fi
+ fi
+
+ # call 'jam' with the appropriate arguments
+ if [ -n "$ofile" ];
+ then
+ TESTING h5jam -u `basename $ufile` -i `basename $ifile` -o `basename $ofile` $clobber
+ $JAM_BIN/$JAM -u $ufile -i $ifile -o $ofile $clobber
+ else
+ TESTING jam -u `basename $ufile` -i `basename $ifile` $clobber
+ $JAM_BIN/$JAM -u $ufile -i $ifile $clobber
+ fi
+
+ #echo "CHECK_UB_1 $compare_test $ufile $compare_orig"
+ CHECK_UB_1 $compare_test $ufile $compare_orig
+
+ if [ "$result1" = "0" ] ;
+ then
+ echo " PASSED"
+ else
+ echo " *FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ fi
+ CLEANUP $cleanup
}
-
+
# UNJAMTEST file.h5 [- | --delete] ofile
#
# Test the 'unjam' tool
#
###fix the working directory here and in jamtest
UNJAMTEST () {
- infile="$1"
- ofile="$3"
- if [ "$2" = "-" ];
- then
- uofile="uofile"
- TESTING h5unjam -i `basename $infile` -o `basename $ofile` "> "`basename $uofile`
- $JAM_BIN/$UNJAM -i $infile -o $ofile > $uofile
- else
- if [ "$2" = "--delete" ];
- then
- uofile="none"
- TESTING h5unjam -i `basename $infile` -o `basename $ofile` --delete
- $JAM_BIN/$UNJAM -i $infile -o $ofile --delete
-
- else
- uofile="$2"
- TESTING h5unjam -i `basename $infile` -u `basename $uofile` -o `basename $ofile`
- $JAM_BIN/$UNJAM -i $infile -u $uofile -o $ofile
- fi
- fi
-
- result1=0
- result2=0
- cleanup=""
- if [ "$uofile" != "none" ];
- then
- # sets result1
- CHECK_UB_1 $infile $uofile
- CLEANUP $uofile
- fi
-
- # sets result2
- CHECK_NOUB $ofile
-
- if [ "$result1" = "0" -a "$result2" = "0" ];
- then
- echo " PASSED"
- else
- echo " *FAILED*"
- nerrors="`expr $nerrors + 1`"
- fi
+ infile="$1"
+ ofile="$3"
+ if [ "$2" = "-" ];
+ then
+ uofile="uofile"
+ TESTING h5unjam -i `basename $infile` -o `basename $ofile` "> "`basename $uofile`
+ $JAM_BIN/$UNJAM -i $infile -o $ofile > $uofile
+ else
+ if [ "$2" = "--delete" ];
+ then
+ uofile="none"
+ TESTING h5unjam -i `basename $infile` -o `basename $ofile` --delete
+ $JAM_BIN/$UNJAM -i $infile -o $ofile --delete
+
+ else
+ uofile="$2"
+ TESTING h5unjam -i `basename $infile` -u `basename $uofile` -o `basename $ofile`
+ $JAM_BIN/$UNJAM -i $infile -u $uofile -o $ofile
+ fi
+ fi
+
+ result1=0
+ result2=0
+ cleanup=""
+ if [ "$uofile" != "none" ];
+ then
+ # sets result1
+ CHECK_UB_1 $infile $uofile
+ CLEANUP $uofile
+ fi
+
+ # sets result2
+ CHECK_NOUB $ofile
+
+ if [ "$result1" = "0" -a "$result2" = "0" ];
+ then
+ echo " PASSED"
+ else
+ echo " *FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ fi
}
@@ -502,28 +500,30 @@ TOOLTEST_OUTPUT() {
STDOUT_FILTER $actual
cp $actual_err $actual_err_sav
STDERR_FILTER $actual_err
- # combine stderr to stdout for output compare
+ # combine stderr to stdout for output compare
cat $actual_err >> $actual
if [ ! -f $expect ]; then
- # Create the expect file if it doesn't yet exist.
+ # Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
- rm -f $actual $actual_sav $actual_err $actual_err_sav
+ echo " Expected result (*.ls) missing"
+ nerrors="`expr $nerrors + 1`"
+ rm -f $actual $actual_sav $actual_err $actual_err_sav
elif $CMP $expect $actual; then
echo " PASSED"
- rm -f $actual $actual_sav $actual_err $actual_err_sav
+ rm -f $actual $actual_sav $actual_err $actual_err_sav
else
echo "*FAILED*"
- echo " Expected result differs from actual result"
- nerrors="`expr $nerrors + 1`"
+ echo " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
fi
}
##############################################################################
##############################################################################
-### T H E T E S T S ###
+### T H E T E S T S ###
##############################################################################
##############################################################################
# prepare for test
@@ -552,19 +552,19 @@ CHECKFILE $TESTDIR/tall.h5 ta5.h5
CLEANUP ta5.h5
SETUP $TESTDIR/tall.h5 ta6.h5
-JAMTEST $TESTDIR/u10.txt ta6.h5
+JAMTEST $TESTDIR/u10.txt ta6.h5
CHECKFILE $TESTDIR/tall.h5 ta6.h5
CLEANUP ta6.h5
SETUP $TESTDIR/tall.h5 ta7.h5
-JAMTEST $TESTDIR/u511.txt ta7.h5
+JAMTEST $TESTDIR/u511.txt ta7.h5
CHECKFILE $TESTDIR/tall.h5 ta7.h5
CLEANUP ta7.h5
SETUP $TESTDIR/tall.h5 ta8.h5
-JAMTEST $TESTDIR/u512.txt ta8.h5
+JAMTEST $TESTDIR/u512.txt ta8.h5
CHECKFILE $TESTDIR/tall.h5 ta8.h5
CLEANUP ta8.h5
SETUP $TESTDIR/tall.h5 ta9.h5
-JAMTEST $TESTDIR/u513.txt ta9.h5
+JAMTEST $TESTDIR/u513.txt ta9.h5
CHECKFILE $TESTDIR/tall.h5 ta9.h5
CLEANUP ta9.h5
@@ -594,29 +594,29 @@ JAMTEST $TESTDIR/u513.txt $TESTDIR/twithub513.h5 tax9.h5
CHECKFILE $TESTDIR/tall.h5 tax9.h5
CLEANUP tax9.h5
-JAMTEST $TESTDIR/u10.txt $TESTDIR/twithub.h5 --clobber taz2.h5
+JAMTEST $TESTDIR/u10.txt $TESTDIR/twithub.h5 --clobber taz2.h5
CHECKFILE $TESTDIR/tall.h5 taz2.h5
CLEANUP taz2.h5
JAMTEST $TESTDIR/u511.txt $TESTDIR/twithub.h5 --clobber taz3.h5
CHECKFILE $TESTDIR/tall.h5 taz3.h5
CLEANUP taz3.h5
-JAMTEST $TESTDIR/u512.txt $TESTDIR/twithub.h5 --clobber taz4.h5
+JAMTEST $TESTDIR/u512.txt $TESTDIR/twithub.h5 --clobber taz4.h5
CHECKFILE $TESTDIR/tall.h5 taz4.h5
CLEANUP taz4.h5
-JAMTEST $TESTDIR/u513.txt $TESTDIR/twithub.h5 --clobber taz5.h5
+JAMTEST $TESTDIR/u513.txt $TESTDIR/twithub.h5 --clobber taz5.h5
CHECKFILE $TESTDIR/tall.h5 taz5.h5
CLEANUP taz5.h5
-JAMTEST $TESTDIR/u10.txt $TESTDIR/twithub513.h5 --clobber taz6.h5
+JAMTEST $TESTDIR/u10.txt $TESTDIR/twithub513.h5 --clobber taz6.h5
CHECKFILE $TESTDIR/tall.h5 taz6.h5
CLEANUP taz6.h5
JAMTEST $TESTDIR/u511.txt $TESTDIR/twithub513.h5 --clobber taz7.h5
CHECKFILE $TESTDIR/tall.h5 taz7.h5
CLEANUP taz7.h5
-JAMTEST $TESTDIR/u512.txt $TESTDIR/twithub513.h5 --clobber taz8.h5
+JAMTEST $TESTDIR/u512.txt $TESTDIR/twithub513.h5 --clobber taz8.h5
CHECKFILE $TESTDIR/tall.h5 taz8.h5
CLEANUP taz8.h5
-JAMTEST $TESTDIR/u513.txt $TESTDIR/twithub513.h5 --clobber taz9.h5
+JAMTEST $TESTDIR/u513.txt $TESTDIR/twithub513.h5 --clobber taz9.h5
CHECKFILE $TESTDIR/tall.h5 taz9.h5
CLEANUP taz9.h5
@@ -655,7 +655,7 @@ CHECKFILE $TESTDIR/tall.h5 tay9.h5
CLEANUP tay9.h5
#---------------------------------
-# Testing h5unjam
+# Testing h5unjam
#---------------------------------
# help page
TOOLTEST_OUTPUT UNJAM h5unjam-help.txt 0 -h
diff --git a/tools/test/h5ls/CMakeLists.txt b/tools/test/h5ls/CMakeLists.txt
index 48894fb..c21ca92 100644
--- a/tools/test/h5ls/CMakeLists.txt
+++ b/tools/test/h5ls/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5LS)
#-----------------------------------------------------------------------------
@@ -6,6 +6,34 @@ PROJECT (HDF5_TOOLS_TEST_H5LS)
#-----------------------------------------------------------------------------
INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
+ #-----------------------------------------------------------------------------
+ # If plugin library tests can be tested
+ #-----------------------------------------------------------------------------
+ set (HDF5_TOOL_PLUGIN_LIB_CORENAME "dynlibls")
+ set (HDF5_TOOL_PLUGIN_LIB_NAME "${HDF5_EXTERNAL_LIB_PREFIX}${HDF5_TOOL_PLUGIN_LIB_CORENAME}")
+ set (HDF5_TOOL_PLUGIN_LIB_TARGET ${HDF5_TOOL_PLUGIN_LIB_CORENAME})
+ add_definitions (${HDF_EXTRA_C_FLAGS})
+ INCLUDE_DIRECTORIES (${HDF5_SRC_DIR})
+
+ add_library (${HDF5_TOOL_PLUGIN_LIB_TARGET} SHARED dynlib_ls.c)
+ TARGET_C_PROPERTIES (${HDF5_TOOL_PLUGIN_LIB_TARGET} SHARED " " " ")
+ target_link_libraries (${HDF5_TOOL_PLUGIN_LIB_TARGET} ${HDF5_TEST_LIB_TARGET})
+ H5_SET_LIB_OPTIONS (${HDF5_TOOL_PLUGIN_LIB_TARGET} ${HDF5_TOOL_PLUGIN_LIB_NAME} SHARED ${HDF5_PACKAGE_SOVERSION})
+
+ # make plugins dir
+ file (MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/plugins")
+ #-----------------------------------------------------------------------------
+ # Copy plugin library to a plugins folder
+ #-----------------------------------------------------------------------------
+ add_custom_command (
+ TARGET ${HDF5_TOOL_PLUGIN_LIB_TARGET}
+ POST_BUILD
+ COMMAND ${CMAKE_COMMAND}
+ ARGS -E copy_if_different
+ "$<TARGET_FILE:${HDF5_TOOL_PLUGIN_LIB_TARGET}>"
+ "${CMAKE_BINARY_DIR}/plugins/$<TARGET_FILE_NAME:${HDF5_TOOL_PLUGIN_LIB_TARGET}>"
+ )
+
include (CMakeTests.cmake)
include (CMakeTestsVDS.cmake)
diff --git a/tools/test/h5ls/CMakeTests.cmake b/tools/test/h5ls/CMakeTests.cmake
index e7a5dd6..5f61eab 100644
--- a/tools/test/h5ls/CMakeTests.cmake
+++ b/tools/test/h5ls/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -29,6 +40,7 @@
${HDF5_TOOLS_DIR}/testfiles/tslink.h5
${HDF5_TOOLS_DIR}/testfiles/tsoftlinks.h5
${HDF5_TOOLS_DIR}/testfiles/tstr.h5
+ ${HDF5_TOOLS_DIR}/testfiles/tudfilter.h5
${HDF5_TOOLS_DIR}/testfiles/tudlink.h5
${HDF5_TOOLS_DIR}/testfiles/tvldtypes1.h5
${HDF5_TOOLS_DIR}/testfiles/tdset_idx.h5
@@ -87,6 +99,7 @@
${HDF5_TOOLS_DIR}/testfiles/tsaf.ls
${HDF5_TOOLS_DIR}/testfiles/tslink-1.ls
${HDF5_TOOLS_DIR}/testfiles/tstr-1.ls
+ ${HDF5_TOOLS_DIR}/testfiles/tudfilter.ls
${HDF5_TOOLS_DIR}/testfiles/tudlink-1.ls
${HDF5_TOOLS_DIR}/testfiles/tvldtypes1.ls
${HDF5_TOOLS_DIR}/testfiles/tvldtypes2le.ls
@@ -100,7 +113,7 @@
foreach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
get_filename_component(fname "${listfiles}" NAME)
HDFTEST_COPY_FILE("${listfiles}" "${PROJECT_BINARY_DIR}/testfiles/${fname}" "h5ls_files")
- endforeach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
+ endforeach ()
add_custom_target(h5ls_files ALL COMMENT "Copying files needed by h5ls tests" DEPENDS ${h5ls_files_list})
##############################################################################
@@ -109,7 +122,7 @@
##############################################################################
##############################################################################
- MACRO (ADD_H5_TEST resultfile resultcode)
+ macro (ADD_H5_TEST resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5LS-${resultfile} COMMAND $<TARGET_FILE:h5ls> ${ARGN})
@@ -120,7 +133,7 @@
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5LS-${resultfile} PROPERTIES DEPENDS ${last_test})
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5LS-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -132,8 +145,35 @@
-D "TEST_REFERENCE=${resultfile}.ls"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST file)
+ endif ()
+ endmacro ()
+
+ macro (ADD_H5_UD_TEST testname resultcode resultfile)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ # Remove any output file left over from previous test run
+ add_test (
+ NAME H5LS_UD-${testname}-clearall-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ testfiles/${resultfile}.out
+ testfiles/${resultfile}.out.err
+ )
+ add_test (
+ NAME H5LS_UD-${testname}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:h5ls>"
+ -D "TEST_ARGS=${ARGN}"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
+ -D "TEST_OUTPUT=${resultfile}.out"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_REFERENCE=${resultfile}.ls"
+ -D "TEST_ENV_VAR=HDF5_PLUGIN_PATH"
+ -D "TEST_ENV_VALUE=${CMAKE_BINARY_DIR}/plugins"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ set_tests_properties (H5LS_UD-${testname} PROPERTIES DEPENDS H5LS_UD-${testname}-clearall-objects)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -257,9 +297,22 @@
set_tests_properties (H5LS-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5LS-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5LS-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
+
+# See which filters are usable (and skip tests for filters we
+# don't have). Do this by searching H5pubconf.h to see which
+# filters are defined.
+
+# detect whether the encoder is present.
+ if (H5_HAVE_FILTER_DEFLATE)
+ set (USE_FILTER_DEFLATE "true")
+ endif ()
+
+ if (H5_HAVE_FILTER_SZIP)
+ set (USE_FILTER_SZIP "true")
+ endif ()
# test the help syntax
ADD_H5_TEST (help-1 0 -w80 -h)
@@ -370,9 +423,9 @@
# ( HDFFV-7838, )
if (H5_WORDS_BIGENDIAN)
ADD_H5_TEST (tattrreg_be 0 -w80 -v -d tattrreg.h5)
- else (H5_WORDS_BIGENDIAN)
+ else ()
ADD_H5_TEST (tattrreg_le 0 -w80 -v -d tattrreg.h5)
- endif (H5_WORDS_BIGENDIAN)
+ endif ()
# test for non-existing file
ADD_H5_TEST (nosuchfile 1 nosuchfile.h5)
@@ -380,18 +433,25 @@
# test for variable length data types in verbose mode
if (H5_WORDS_BIGENDIAN)
ADD_H5_TEST (tvldtypes2be 0 -v tvldtypes1.h5)
- else (H5_WORDS_BIGENDIAN)
+ else ()
ADD_H5_TEST (tvldtypes2le 0 -v tvldtypes1.h5)
- endif (H5_WORDS_BIGENDIAN)
+ endif ()
# test for dataset region references data types in verbose mode
if (H5_WORDS_BIGENDIAN)
ADD_H5_TEST (tdataregbe 0 -v tdatareg.h5)
- else (H5_WORDS_BIGENDIAN)
+ else ()
ADD_H5_TEST (tdataregle 0 -v tdatareg.h5)
- endif (H5_WORDS_BIGENDIAN)
+ endif ()
# test for file with datasets that use Fixed Array chunk indices
-#echo "***skip testing tdset_idx.h5"
-ADD_H5_TEST (tdset_idx 0 -w80 -d tdset_idx.h5)
-
+ if (USE_FILTER_DEFLATE)
+ # data read internal filters
+ ADD_H5_TEST (tdset_idx 0 -w80 -d tdset_idx.h5)
+ endif ()
+
+
+##############################################################################
+### P L U G I N T E S T S
+##############################################################################
+ADD_H5_UD_TEST (h5ls_plugin_test 0 tudfilter -w80 -d tudfilter.h5)
diff --git a/tools/test/h5ls/CMakeTestsVDS.cmake b/tools/test/h5ls/CMakeTestsVDS.cmake
index 72e14a1..4a665a5 100644
--- a/tools/test/h5ls/CMakeTestsVDS.cmake
+++ b/tools/test/h5ls/CMakeTestsVDS.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -49,7 +60,7 @@
foreach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
get_filename_component(fname "${listfiles}" NAME)
HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/vds/${listfiles}" "${PROJECT_BINARY_DIR}/testfiles/vds/${fname}" "h5ls_vds_files")
- endforeach (listfiles ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
+ endforeach ()
add_custom_target(h5ls_vds_files ALL COMMENT "Copying files needed by h5ls_vds tests" DEPENDS ${h5ls_vds_files_list})
##############################################################################
@@ -58,7 +69,7 @@
##############################################################################
##############################################################################
- MACRO (ADD_H5_VDS_TEST resultfile resultcode)
+ macro (ADD_H5_VDS_TEST resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5LS-${resultfile} COMMAND $<TARGET_FILE:h5ls> ${ARGN})
@@ -69,7 +80,7 @@
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5LS-${resultfile} PROPERTIES DEPENDS ${last_test})
endif ()
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5LS-${resultfile}
COMMAND "${CMAKE_COMMAND}"
@@ -81,8 +92,8 @@
-D "TEST_REFERENCE=${resultfile}.ls"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_VDS_TEST file)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -124,9 +135,9 @@
set_tests_properties (H5LS_VDS-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles/vds")
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5LS_VDS-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5LS_VDS-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
ADD_H5_VDS_TEST (tvds-1 0 -w80 -v -S 1_vds.h5)
ADD_H5_VDS_TEST (tvds-2 0 -w80 -v -S 2_vds.h5)
diff --git a/tools/test/h5ls/Makefile.am b/tools/test/h5ls/Makefile.am
index 6f3d3dd..8ace14d 100644
--- a/tools/test/h5ls/Makefile.am
+++ b/tools/test/h5ls/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -27,8 +25,29 @@ AM_CPPFLAGS+=-I$(top_srcdir)/src -I$(top_srcdir)/tools/lib
TEST_SCRIPT=testh5ls.sh testh5lsvds.sh
check_SCRIPTS=$(TEST_SCRIPT)
SCRIPT_DEPEND=../../src/h5ls/h5ls$(EXEEXT)
+if HAVE_SHARED_CONDITIONAL
+if USE_PLUGINS_CONDITIONAL
+ TEST_SCRIPT += h5ls_plugin.sh
+endif
+endif
# All programs depend on the hdf5 and h5tools libraries
LDADD=$(LIBH5TOOLS) $(LIBHDF5)
+if HAVE_SHARED_CONDITIONAL
+ # Build it as shared library if configure is enabled for shared library.
+ dyn_LTLIBRARIES=libdynlibls.la
+ libdynlibls_la_SOURCES=dynlib_ls.c
+ libdynlibls_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+
+libdynlibls.la: $(libdynlibls_la_OBJECTS) $(libdynlibls_la_DEPENDENCIES) $(EXTRA_libdynlibls_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlibls_la_LINK) $(am_libdynlibls_la_rpath) $(libdynlibls_la_OBJECTS) $(libdynlibls_la_LIBADD)
+
+#install-exec-hook:
+# $(RM) $(DESTDIR)$(dyndir)/*dynlib*
+endif
+
+
+DISTCLEANFILES=h5ls_plugin.sh
+
include $(top_srcdir)/config/conclude.am
diff --git a/tools/test/h5ls/dynlib_ls.c b/tools/test/h5ls/dynlib_ls.c
new file mode 100644
index 0000000..571452e
--- /dev/null
+++ b/tools/test/h5ls/dynlib_ls.c
@@ -0,0 +1,89 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
+/*
+ * Purpose: Tests the plugin module (H5PL)
+ */
+
+#include <stdlib.h>
+#include <stdio.h>
+#include "H5PLextern.h"
+
+#define H5Z_FILTER_DYNLIBUD 300
+#define MULTIPLIER 3
+
+static size_t H5Z_filter_dynlibud(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes, size_t *buf_size, void **buf);
+
+/* This message derives from H5Z */
+const H5Z_class2_t H5Z_DYNLIBUD[1] = {{
+ H5Z_CLASS_T_VERS, /* H5Z_class_t version */
+ H5Z_FILTER_DYNLIBUD, /* Filter id number */
+ 1, 1, /* Encoding and decoding enabled */
+ "dynlibud", /* Filter name for debugging */
+ NULL, /* The "can apply" callback */
+ NULL, /* The "set local" callback */
+ (H5Z_func_t)H5Z_filter_dynlibud, /* The actual filter function */
+}};
+
+H5PL_type_t H5PLget_plugin_type(void) {return H5PL_TYPE_FILTER;}
+const void *H5PLget_plugin_info(void) {return H5Z_DYNLIBUD;}
+
+/*-------------------------------------------------------------------------
+ * Function: H5Z_filter_dynlibud
+ *
+ * Purpose: A dynlib2 filter method that multiplies the original value
+ * during write and divide the original value during read. It
+ * will be built as a shared library. plugin.c test will load
+ * and use this filter library.
+ *
+ * Return: Success: Data chunk size
+ *
+ * Failure: 0
+ *-------------------------------------------------------------------------
+ */
+static size_t
+H5Z_filter_dynlibud(unsigned int flags, size_t cd_nelmts,
+ const unsigned int *cd_values, size_t nbytes,
+ size_t *buf_size, void **buf)
+{
+ char *int_ptr = (char *)*buf; /* Pointer to the data values */
+ size_t buf_left = *buf_size; /* Amount of data buffer left to process */
+
+ /* Check for the correct number of parameters */
+ if(cd_nelmts > 0)
+ return(0);
+
+ /* Assignment to eliminate unused parameter warning. */
+ cd_values = cd_values;
+
+ if(flags & H5Z_FLAG_REVERSE) { /*read*/
+ /* Subtract the original value with MULTIPLIER */
+ while(buf_left > 0) {
+ char temp = *int_ptr;
+ *int_ptr = temp - MULTIPLIER;
+ int_ptr++;
+ buf_left -= sizeof(*int_ptr);
+ } /* end while */
+ } /* end if */
+ else { /*write*/
+ /* Add the original value with MULTIPLIER */
+ while(buf_left > 0) {
+ char temp = *int_ptr;
+ *int_ptr = temp + MULTIPLIER;
+ int_ptr++;
+ buf_left -= sizeof(*int_ptr);
+ } /* end while */
+ } /* end else */
+
+ return nbytes;
+} /* end H5Z_filter_dynlibud() */
+
diff --git a/tools/test/h5ls/h5ls_plugin.sh.in b/tools/test/h5ls/h5ls_plugin.sh.in
new file mode 100644
index 0000000..c89269d
--- /dev/null
+++ b/tools/test/h5ls/h5ls_plugin.sh.in
@@ -0,0 +1,253 @@
+#! /bin/sh
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+srcdir=@srcdir@
+TOP_BUILDDIR=@top_builddir@
+
+# Determine backward compatibility options enabled
+DEPRECATED_SYMBOLS="@DEPRECATED_SYMBOLS@"
+
+EXIT_SUCCESS=0
+EXIT_FAILURE=1
+
+H5LS=../../src/h5ls/h5ls # The tool name
+H5LS_BIN=`pwd`/$H5LS # The path of the tool binary
+
+nerrors=0
+verbose=yes
+h5haveexitcode=yes # default is yes
+
+TEST_NAME=ud_plugin
+FROM_DIR=`pwd`/.libs
+PLUGIN_LIB="$FROM_DIR/libdynlibls.*"
+PLUGIN_LIBDIR=testdir3
+RM='rm -rf'
+
+RM='rm -rf'
+GREP='grep'
+CMP='cmp -s'
+DIFF='diff -c'
+CP='cp'
+DIRNAME='dirname'
+LS='ls'
+AWK='awk'
+
+# source dirs
+SRC_TOOLS="$srcdir/../.."
+
+# testfiles source dirs for tools
+SRC_TOOLS_TESTFILES="$SRC_TOOLS/testfiles"
+SRC_H5LS_TESTFILES="$SRC_TOOLS_TESTFILES"
+
+TESTDIR=./testplug
+test -d $TESTDIR || mkdir $TESTDIR
+
+######################################################################
+# test files
+# --------------------------------------------------------------------
+# All the test files copy from source directory to test directory
+# NOTE: Keep this framework to add/remove test files.
+# Any test files from other tools can be used in this framework.
+# This list are also used for checking exist.
+# Comment '#' without space can be used.
+# --------------------------------------------------------------------
+LIST_HDF5_TEST_FILES="
+$SRC_TOOLS_TESTFILES/tudfilter.h5
+$SRC_TOOLS_TESTFILES/tudfilter.ls
+"
+
+# RUNSERIAL is used. Check if it can return exit code from executable correctly.
+if [ -n "$RUNSERIAL_NOEXITCODE" ]; then
+ echo "***Warning*** Serial Exit Code is not passed back to shell correctly."
+ echo "***Warning*** Exit code checking is skipped."
+ h5haveexitcode=no
+fi
+
+# Main Body
+# Create test directories if not exists yet.
+test -d $PLUGIN_LIBDIR || mkdir -p $PLUGIN_LIBDIR
+if [ $? != 0 ]; then
+ echo "Failed to create test directory($PLUGIN_LIBDIR)"
+ exit $EXIT_FAILURE
+fi
+
+# copy plugin library for test
+$CP $PLUGIN_LIB $PLUGIN_LIBDIR
+if [ $? != 0 ]; then
+ echo "Failed to copy plugin library ($PLUGIN_LIB) for test."
+ exit $EXIT_FAILURE
+fi
+
+# setup plugin path
+ENVCMD="env HDF5_PLUGIN_PATH=../${PLUGIN_LIBDIR}"
+
+#
+# copy test files and expected output files from source dirs to test dir
+#
+COPY_TESTFILES="$LIST_HDF5_TEST_FILES"
+
+COPY_TESTFILES_TO_TESTDIR()
+{
+ # copy test files. Used -f to make sure get a new copy
+ for tstfile in $COPY_TESTFILES
+ do
+ # ignore '#' comment
+ echo $tstfile | tr -d ' ' | grep '^#' > /dev/null
+ RET=$?
+ if [ $RET -eq 1 ]; then
+ # skip cp if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=`$DIRNAME $tstfile`
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $CP -f $tstfile $TESTDIR
+ if [ $? -ne 0 ]; then
+ echo "Error: FAILED to copy $tstfile ."
+
+ # Comment out this to CREATE expected file
+ exit $EXIT_FAILURE
+ fi
+ fi
+ fi
+ done
+}
+
+CLEAN_TESTFILES_AND_TESTDIR()
+{
+ # skip rm if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=$SRC_H5LS_TESTFILES
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $RM $TESTDIR
+ fi
+}
+
+# Print a $* message left justified in a field of 70 characters
+#
+MESSAGE() {
+ SPACES=" "
+ echo "$* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Testing".
+#
+TESTING() {
+ SPACES=" "
+ echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
+}
+
+# Print a line-line message left justified in a field of 70 characters
+# beginning with the word "Verifying".
+#
+VERIFY() {
+ MESSAGE "Verifying $*"
+}
+
+# Source in the output filter function definitions.
+. $srcdir/../../../bin/output_filter.sh
+
+# Run a test and print PASS or *FAIL*. For now, if h5ls can complete
+# with exit status 0, consider it pass. If a test fails then increment
+# the `nerrors' global variable and (if $verbose is set) display up to $NLINS
+# lines of the actual output from the tool test. The actual output is not
+# removed if $HDF5_NOCLEANUP has a non-zero value.
+# Arguemnts:
+# $1 -- actual output filename to use
+# $2 and on -- argument for the h5ls tool
+TOOLTEST() {
+ expect="$TESTDIR/$1"
+ actual="$TESTDIR/`basename $1 .ls`.out"
+ actual_err="$TESTDIR/`basename $1 .ls`.err"
+ actual_sav=${actual}-sav
+ actual_err_sav=${actual_err}-sav
+ shift
+ retvalexpect=$1
+ shift
+
+ # Run test.
+ # Stderr is included in stdout so that the diff can detect
+ # any unexpected output from that stream too.
+ TESTING $H5LS $@
+ (
+ cd $TESTDIR
+ $ENVCMD $RUNSERIAL $H5LS_BIN "$@"
+ ) >$actual 2>$actual_err
+
+ exitcode=$?
+ # save actual and actual_err in case they are needed later.
+ cp $actual $actual_sav
+ STDOUT_FILTER $actual
+ cp $actual_err $actual_err_sav
+ STDERR_FILTER $actual_err
+ cat $actual_err >> $actual
+ if [ $h5haveexitcode = 'yes' -a $exitcode -ne $retvalexpect ]; then
+ echo "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ if [ yes = "$verbose" ]; then
+ echo "test returned with exit code $exitcode"
+ echo "test output: (up to $NLINES lines)"
+ head -$NLINES $actual
+ echo "***end of test output***"
+ echo ""
+ fi
+ elif [ ! -f $expect ]; then
+ # Create the expect file if it doesn't yet exist.
+ echo " CREATED"
+ cp $actual $expect
+ echo " Expected result (*.ls) missing"
+ nerrors="`expr $nerrors + 1`"
+ elif $CMP $expect $actual; then
+ echo " PASSED"
+ else
+ echo "*FAILED*"
+ echo " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
+ fi
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ rm -f $actual $actual_err $actual_sav $actual_err_sav
+ fi
+}
+
+##############################################################################
+### T H E T E S T S
+##############################################################################
+# prepare for test
+COPY_TESTFILES_TO_TESTDIR
+
+# Run the test
+TOOLTEST tudfilter.ls 0 -w80 -d tudfilter.h5
+
+# print results
+if test $nerrors -ne 0 ; then
+ echo "$nerrors errors encountered"
+ exit_code=$EXIT_FAILURE
+else
+ echo "All Plugin API tests passed."
+ exit_code=$EXIT_SUCCESS
+fi
+
+# Clean up temporary files/directories
+CLEAN_TESTFILES_AND_TESTDIR
+
+# Clean up temporary files/directories and leave
+$RM $PLUGIN_LIBDIR
+
+exit $exit_code
diff --git a/tools/test/h5ls/testh5ls.sh.in b/tools/test/h5ls/testh5ls.sh.in
index e2a72ba..a88ae88 100644
--- a/tools/test/h5ls/testh5ls.sh.in
+++ b/tools/test/h5ls/testh5ls.sh.in
@@ -6,17 +6,18 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5ls tool
srcdir=@srcdir@
+USE_FILTER_SZIP="@USE_FILTER_SZIP@"
+USE_FILTER_DEFLATE="@USE_FILTER_DEFLATE@"
+
TESTNAME=h5ls
EXIT_SUCCESS=0
EXIT_FAILURE=1
@@ -180,10 +181,10 @@ COPY_TESTFILES_TO_TESTDIR()
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
- $CP -f $tstfile $TESTDIR
+ $CP -f $tstfile $TESTDIR
if [ $? -ne 0 ]; then
echo "Error: FAILED to copy $tstfile ."
-
+
# Comment out this to CREATE expected file
exit $EXIT_FAILURE
fi
@@ -238,10 +239,10 @@ TOOLTEST() {
# any unexpected output from that stream too.
TESTING $H5LS $@
(
- cd $TESTDIR
+ cd $TESTDIR
$RUNSERIAL $H5LS_BIN "$@"
- ) >$actual 2>$actual_err
-
+ ) >$actual 2>$actual_err
+
exitcode=$?
# save actual and actual_err in case they are needed later.
cp $actual $actual_sav
@@ -250,37 +251,39 @@ TOOLTEST() {
STDERR_FILTER $actual_err
cat $actual_err >> $actual
if [ $h5haveexitcode = 'yes' -a $exitcode -ne $retvalexpect ]; then
- echo "*FAILED*"
- nerrors="`expr $nerrors + 1`"
- if [ yes = "$verbose" ]; then
- echo "test returned with exit code $exitcode"
- echo "test output: (up to $NLINES lines)"
- head -$NLINES $actual
- echo "***end of test output***"
- echo ""
- fi
+ echo "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ if [ yes = "$verbose" ]; then
+ echo "test returned with exit code $exitcode"
+ echo "test output: (up to $NLINES lines)"
+ head -$NLINES $actual
+ echo "***end of test output***"
+ echo ""
+ fi
elif [ ! -f $expect ]; then
- # Create the expect file if it doesn't yet exist.
+ # Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ls) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
echo "*FAILED*"
- echo " Expected result differs from actual result"
- nerrors="`expr $nerrors + 1`"
- test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
+ echo " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
fi
# Clean up output file
if test -z "$HDF5_NOCLEANUP"; then
- rm -f $actual $actual_err $actual_sav $actual_err_sav
+ rm -f $actual $actual_err $actual_sav $actual_err_sav
fi
}
##############################################################################
##############################################################################
-### T H E T E S T S ###
+### T H E T E S T S ###
##############################################################################
##############################################################################
# prepare for test
@@ -339,7 +342,7 @@ TOOLTEST textlinksrc-3-old.ls 0 -w80 -Er textlinksrc.h5/ext_link1
TOOLTEST textlinksrc-6-old.ls 0 -w80 -E textlinksrc.h5
TOOLTEST textlinksrc-7-old.ls 0 -w80 -E textlinksrc.h5/ext_link1
-# tests for no-dangling-links
+# tests for no-dangling-links
# if this option is given on dangling link, h5ls should return exit code 1
# when used alone , expect to print out help and return exit code 1
TOOLTEST textlinksrc-nodangle-1.ls 1 -w80 --no-dangling-links textlinksrc.h5
@@ -374,7 +377,7 @@ TOOLTEST tnestcomp-4.ls 0 -w80 -r -d -l -S tnestedcomp.h5
# test for loop detection
TOOLTEST tloop-1.ls 0 -w80 -r -d tloop.h5
-# test for string
+# test for string
TOOLTEST tstr-1.ls 0 -w80 -r -d tstr.h5
# test test file created from lib SAF team
@@ -405,24 +408,28 @@ fi
# test for non-existing file
TOOLTEST nosuchfile.ls 1 nosuchfile.h5
-# test for variable length data types in verbose mode
+# test for variable length data types in verbose mode
if test $WORDS_BIGENDIAN != "yes"; then
TOOLTEST tvldtypes2le.ls 0 -v tvldtypes1.h5
else
TOOLTEST tvldtypes2be.ls 0 -v tvldtypes1.h5
-fi
+fi
-# test for dataset region references data types in verbose mode
+# test for dataset region references data types in verbose mode
if test $WORDS_BIGENDIAN != "yes"; then
TOOLTEST tdataregle.ls 0 -v tdatareg.h5
else
TOOLTEST tdataregbe.ls 0 -v tdatareg.h5
-fi
+fi
# test for file with datasets that use Fixed Array chunk indices
-echo "***skip testing tdset_idx.h5"
-TOOLTEST tdset_idx.ls 0 -w80 -d tdset_idx.h5
+if test $USE_FILTER_DEFLATE = "yes" ; then
+ # data read internal filters
+ TOOLTEST tdset_idx.ls 0 -w80 -d tdset_idx.h5
+else
+ echo "***skip testing tdset_idx.h5"
+fi
# Clean up temporary files/directories
CLEAN_TESTFILES_AND_TESTDIR
diff --git a/tools/test/h5ls/testh5lsvds.sh.in b/tools/test/h5ls/testh5lsvds.sh.in
index b5c6aac..eb44367 100644
--- a/tools/test/h5ls/testh5lsvds.sh.in
+++ b/tools/test/h5ls/testh5lsvds.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5ls tool
@@ -189,7 +187,7 @@ TOOLTEST() {
# any unexpected output from that stream too.
TESTING $H5LS $@
(
- cd $TESTDIR
+ cd $TESTDIR
$RUNSERIAL $H5LS_BIN "$@"
) >$actual 2>$actual_err
@@ -201,37 +199,39 @@ TOOLTEST() {
STDERR_FILTER $actual_err
cat $actual_err >> $actual
if [ $h5haveexitcode = 'yes' -a $exitcode -ne $retvalexpect ]; then
- echo "*FAILED*"
- nerrors="`expr $nerrors + 1`"
- if [ yes = "$verbose" ]; then
- echo "test returned with exit code $exitcode"
- echo "test output: (up to $NLINES lines)"
- head -$NLINES $actual
- echo "***end of test output***"
- echo ""
- fi
+ echo "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
+ if [ yes = "$verbose" ]; then
+ echo "test returned with exit code $exitcode"
+ echo "test output: (up to $NLINES lines)"
+ head -$NLINES $actual
+ echo "***end of test output***"
+ echo ""
+ fi
elif [ ! -f $expect ]; then
- # Create the expect file if it doesn't yet exist.
+ # Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ls) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
echo "*FAILED*"
- echo " Expected result differs from actual result"
- nerrors="`expr $nerrors + 1`"
- test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
+ echo " Expected result differs from actual result"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
fi
# Clean up output file
if test -z "$HDF5_NOCLEANUP"; then
- rm -f $actual $actual_err $actual_sav $actual_err_sav
+ rm -f $actual $actual_err $actual_sav $actual_err_sav
fi
}
##############################################################################
##############################################################################
-### T H E T E S T S ###
+### T H E T E S T S ###
##############################################################################
##############################################################################
# prepare for test
diff --git a/tools/test/h5repack/CMakeLists.txt b/tools/test/h5repack/CMakeLists.txt
index cecec53..9d67ec3 100644
--- a/tools/test/h5repack/CMakeLists.txt
+++ b/tools/test/h5repack/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5REPACK)
#-----------------------------------------------------------------------------
diff --git a/tools/test/h5repack/CMakeTests.cmake b/tools/test/h5repack/CMakeTests.cmake
index 576fb58..897209c 100644
--- a/tools/test/h5repack/CMakeTests.cmake
+++ b/tools/test/h5repack/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -17,9 +28,9 @@
if (DIRECT_VFD)
set (VFD_LIST ${VFD_LIST} direct)
- endif (DIRECT_VFD)
+ endif ()
- MACRO (ADD_VFD_TEST vfdname resultcode)
+ macro (ADD_VFD_TEST vfdname resultcode)
add_test (
NAME H5REPACK-VFD-${vfdname}-h5repacktest
COMMAND "${CMAKE_COMMAND}"
@@ -33,10 +44,10 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5REPACK-VFD-${vfdname}-h5repacktest PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5REPACK-VFD-${vfdname}-h5repacktest")
- ENDMACRO (ADD_VFD_TEST)
- endif (HDF5_TEST_VFD)
+ endmacro ()
+ endif ()
# --------------------------------------------------------------------
# Copy all the HDF5 files from the source directory into the test directory
@@ -79,6 +90,7 @@
${HDF5_TOOLS_DIR}/testfiles/tfamily00008.h5
${HDF5_TOOLS_DIR}/testfiles/tfamily00009.h5
${HDF5_TOOLS_DIR}/testfiles/tfamily00010.h5
+ ${HDF5_TOOLS_DIR}/testfiles/tordergr.h5
# tools/testfiles/vds
${HDF5_TOOLS_DIR}/testfiles/vds/1_a.h5
${HDF5_TOOLS_DIR}/testfiles/vds/1_b.h5
@@ -126,12 +138,13 @@
${HDF5_TOOLS_TEST_H5REPACK_SOURCE_DIR}/testfiles/4_vds.h5-vds_compa-v.ddl
${HDF5_TOOLS_TEST_H5REPACK_SOURCE_DIR}/testfiles/4_vds.h5-vds_conti-v.ddl
${HDF5_TOOLS_TEST_H5REPACK_SOURCE_DIR}/testfiles/h5repack_layout.h5-plugin_zero.tst
+ ${HDF5_TOOLS_TEST_H5REPACK_SOURCE_DIR}/testfiles/crtorder.tordergr.h5.ddl
)
foreach (h5_file ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
get_filename_component(fname "${h5_file}" NAME)
HDFTEST_COPY_FILE("${h5_file}" "${PROJECT_BINARY_DIR}/testfiles/${fname}" "h5repack_files")
- endforeach (h5_file ${LIST_HDF5_TEST_FILES} ${LIST_OTHER_TEST_FILES})
+ endforeach ()
add_custom_target(h5repack_files ALL COMMENT "Copying files needed by h5repack tests" DEPENDS ${h5repack_files_list})
##############################################################################
@@ -140,7 +153,7 @@
##############################################################################
##############################################################################
- MACRO (ADD_HELP_TEST testname resultcode)
+ macro (ADD_HELP_TEST testname resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5REPACK-${testname} COMMAND $<TARGET_FILE:h5repack> ${ARGN})
@@ -149,7 +162,7 @@
set_tests_properties (H5REPACK-${testname} PROPERTIES DEPENDS ${last_test})
endif ()
set (last_test "H5REPACK-${testname}")
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5REPACK-h5repack-${testname}
COMMAND "${CMAKE_COMMAND}"
@@ -161,18 +174,18 @@
-D "TEST_REFERENCE=h5repack-${testname}.txt"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_HELP_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST_OLD testname testtype testfile)
+ macro (ADD_H5_TEST_OLD testname testtype testfile)
if ("${testtype}" STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_OLD-${testname}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${ARGN} -i ${PROJECT_BINARY_DIR}/testfiles/${testfile} -o ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${testfile}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else ("${testtype}" STREQUAL "SKIP")
+ endif ()
+ else ()
add_test (
NAME H5REPACK_OLD-${testname}
COMMAND $<TARGET_FILE:h5repack> ${ARGN} -i ${PROJECT_BINARY_DIR}/testfiles/${testfile} -o ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${testfile}
@@ -185,18 +198,18 @@
COMMAND $<TARGET_FILE:h5diff> ${PROJECT_BINARY_DIR}/testfiles/${testfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${testfile}
)
set_tests_properties (H5REPACK_OLD-${testname}_DFF PROPERTIES DEPENDS H5REPACK_OLD-${testname})
- endif ("${testtype}" STREQUAL "SKIP")
- ENDMACRO (ADD_H5_TEST_OLD)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST testname testtype testfile)
+ macro (ADD_H5_TEST testname testtype testfile)
if ("${testtype}" STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK-${testname}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${ARGN} ${PROJECT_BINARY_DIR}/testfiles/${testfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${testfile}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else ("${testtype}" STREQUAL "SKIP")
+ endif ()
+ else ()
add_test (
NAME H5REPACK-${testname}
COMMAND $<TARGET_FILE:h5repack> ${ARGN} ${PROJECT_BINARY_DIR}/testfiles/${testfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${testfile}
@@ -209,24 +222,24 @@
COMMAND $<TARGET_FILE:h5diff> ${PROJECT_BINARY_DIR}/testfiles/${testfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${testfile}
)
set_tests_properties (H5REPACK-${testname}_DFF PROPERTIES DEPENDS H5REPACK-${testname})
- endif ("${testtype}" STREQUAL "SKIP")
- ENDMACRO (ADD_H5_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_CMP_TEST testname testfilter testtype resultcode resultfile)
+ macro (ADD_H5_CMP_TEST testname testfilter testtype resultcode resultfile)
if ("${testtype}" STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_CMP-${testname}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${ARGN} ${PROJECT_BINARY_DIR}/testfiles/${resultfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${resultfile}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else ("${testtype}" STREQUAL "SKIP")
+ endif ()
+ else ()
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_CMP-${testname}
COMMAND $<TARGET_FILE:h5repack> ${ARGN} ${PROJECT_BINARY_DIR}/testfiles/${resultfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${resultfile})
- else (HDF5_ENABLE_USING_MEMCHECKER)
+ else ()
add_test (
NAME H5REPACK_CMP-${testname}
COMMAND "${CMAKE_COMMAND}"
@@ -239,22 +252,22 @@
-D "TEST_REFERENCE=${resultfile}-${testname}.tst"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5REPACK_CMP-${testname} PROPERTIES DEPENDS ${last_test})
endif ()
- endif ("${testtype}" STREQUAL "SKIP")
- ENDMACRO (ADD_H5_CMP_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_MASK_TEST testname testtype resultcode resultfile)
+ macro (ADD_H5_MASK_TEST testname testtype resultcode resultfile)
if ("${testtype}" STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_MASK-${testname}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${ARGN} ${PROJECT_BINARY_DIR}/testfiles/${resultfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${resultfile}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else ("${testtype}" STREQUAL "SKIP")
+ endif ()
+ else ()
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (
@@ -273,22 +286,22 @@
-D "TEST_REFERENCE=${resultfile}-${testname}.tst"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5REPACK_MASK-${testname} PROPERTIES DEPENDS ${last_test})
endif ()
- endif ("${testtype}" STREQUAL "SKIP")
- ENDMACRO (ADD_H5_MASK_TEST)
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_DMP_TEST testname testtype resultcode resultfile)
+ macro (ADD_H5_DMP_TEST testname testtype resultcode resultfile)
if ("${testtype}" STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_DMP-${testname}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP ${ARGN} ${PROJECT_BINARY_DIR}/testfiles/${resultfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${resultfile}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else ("${testtype}" STREQUAL "SKIP")
+ endif ()
+ else ()
# If using memchecker add tests without using scripts
add_test (
NAME H5REPACK_DMP-${testname}
@@ -301,7 +314,7 @@
NAME H5REPACK_DMP-h5dump-${testname}
COMMAND "${CMAKE_COMMAND}"
-D "TEST_PROGRAM=$<TARGET_FILE:h5dump>"
- -D "TEST_ARGS:STRING=-pH;out-${testname}.${resultfile}"
+ -D "TEST_ARGS:STRING=-q;creation_order;-pH;out-${testname}.${resultfile}"
-D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
-D "TEST_OUTPUT=${resultfile}-${testname}.out"
-D "TEST_EXPECT=${resultcode}"
@@ -309,19 +322,19 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5REPACK_DMP-h5dump-${testname} PROPERTIES DEPENDS "H5REPACK_DMP-${testname}")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- endif ("${testtype}" STREQUAL "SKIP")
- ENDMACRO (ADD_H5_DMP_TEST)
+ endif ()
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_VERIFY_TEST testname testtype resultcode testfile testdset testfilter)
+ macro (ADD_H5_VERIFY_TEST testname testtype resultcode testfile testdset testfilter)
if ("${testtype}" STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_VERIFY_LAYOUT-${testname}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP -d ${testdset} -pH ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${resultfile}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else ("${testtype}" STREQUAL "SKIP")
+ endif ()
+ else ()
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_VERIFY_LAYOUT-${testname}
@@ -349,7 +362,7 @@
-P "${HDF_RESOURCES_EXT_DIR}/grepTest.cmake"
)
set_tests_properties (H5REPACK_VERIFY_LAYOUT-${testname}_DMP PROPERTIES DEPENDS H5REPACK_VERIFY_LAYOUT-${testname}_DFF)
- else ("${resultcode}" STREQUAL "0")
+ else ()
if ("${testfilter}" STREQUAL "CHUNKED")
set (nottestfilter "(CONTIGUOUS|COMPACT)")
endif ()
@@ -372,20 +385,20 @@
-P "${HDF_RESOURCES_EXT_DIR}/grepTest.cmake"
)
set_tests_properties (H5REPACK_VERIFY_LAYOUT-${testname}_DMP PROPERTIES DEPENDS H5REPACK_VERIFY_LAYOUT-${testname}_DFF)
- endif ("${resultcode}" STREQUAL "0")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- endif ("${testtype}" STREQUAL "SKIP")
- ENDMACRO (ADD_H5_VERIFY_TEST)
+ endif ()
+ endif ()
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_VERIFY_VDS testname testtype resultcode testfile testdset testfilter)
+ macro (ADD_H5_VERIFY_VDS testname testtype resultcode testfile testdset testfilter)
if ("${testtype}" STREQUAL "SKIP")
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_VERIFY_LAYOUT_VDS-${testname}-SKIPPED
COMMAND ${CMAKE_COMMAND} -E echo "SKIP -d ${testdset} -pH ${PROJECT_BINARY_DIR}/testfiles/out-${testname}.${resultfile}"
)
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- else ("${testtype}" STREQUAL "SKIP")
+ endif ()
+ else ()
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5REPACK_VERIFY_LAYOUT_VDS-${testname}
@@ -408,11 +421,11 @@
)
set_tests_properties (H5REPACK_VERIFY_LAYOUT_VDS-${testname}_DMP PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
set_tests_properties (H5REPACK_VERIFY_LAYOUT_VDS-${testname}_DMP PROPERTIES DEPENDS H5REPACK_VERIFY_LAYOUT_VDS-${testname})
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- endif ("${testtype}" STREQUAL "SKIP")
- ENDMACRO (ADD_H5_VERIFY_VDS)
+ endif ()
+ endif ()
+ endmacro ()
- MACRO (ADD_H5_TEST_META testname testfile)
+ macro (ADD_H5_TEST_META testname testfile)
add_test (
NAME H5REPACK_META-${testname}_N
COMMAND $<TARGET_FILE:h5repack> ${PROJECT_BINARY_DIR}/testfiles/${testfile} ${PROJECT_BINARY_DIR}/testfiles/out-${testname}_N.${testname}.h5
@@ -429,9 +442,9 @@
add_test (NAME H5REPACK_META-${testname} COMMAND ${CMAKE_COMMAND} -E compare_files ${PROJECT_BINARY_DIR}/testfiles/out-${testname}_N.${testname}.h5 ${PROJECT_BINARY_DIR}/testfiles/out-${testname}_M.${testname}.h5)
set_tests_properties (H5REPACK_META-${testname} PROPERTIES WILL_FAIL "true")
set_tests_properties (H5REPACK_META-${testname} PROPERTIES DEPENDS H5REPACK_META-${testname}_M)
- ENDMACRO (ADD_H5_TEST_META)
+ endmacro ()
- MACRO (ADD_H5_UD_TEST testname resultcode resultfile)
+ macro (ADD_H5_UD_TEST testname resultcode resultfile)
if (NOT HDF5_ENABLE_USING_MEMCHECKER)
# Remove any output file left over from previous test run
add_test (
@@ -469,8 +482,8 @@
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
set_tests_properties (H5REPACK_UD-h5dump-${testname} PROPERTIES DEPENDS "H5REPACK_UD-${testname}")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_UD_TEST)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -708,7 +721,7 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5REPACK-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
ADD_HELP_TEST(help 0 -h)
@@ -717,14 +730,14 @@
if (HDF5_ENABLE_SZIP_ENCODING)
set (passRegex "yes")
set_tests_properties (H5REPACK-testh5repack_detect_szip PROPERTIES PASS_REGULAR_EXPRESSION "yes")
- else (HDF5_ENABLE_SZIP_ENCODING)
+ else ()
set (passRegex "no")
set_tests_properties (H5REPACK-testh5repack_detect_szip PROPERTIES PASS_REGULAR_EXPRESSION "no")
- endif (HDF5_ENABLE_SZIP_ENCODING)
- else (HDF5_ENABLE_SZIP_SUPPORT)
+ endif ()
+ else ()
set (passRegex "no")
set_tests_properties (H5REPACK-testh5repack_detect_szip PROPERTIES PASS_REGULAR_EXPRESSION "no")
- endif (HDF5_ENABLE_SZIP_SUPPORT)
+ endif ()
set_tests_properties (H5REPACK-testh5repack_detect_szip PROPERTIES DEPENDS H5REPACK-clearall-objects)
add_test (NAME H5REPACK-h5repacktest COMMAND $<TARGET_FILE:h5repacktest>)
@@ -748,15 +761,15 @@
set (USE_FILTER_SZIP_ENCODER "no")
if (HDF5_ENABLE_SZIP_ENCODING)
set (USE_FILTER_SZIP_ENCODER ${testh5repack_detect_szip})
- endif (HDF5_ENABLE_SZIP_ENCODING)
+ endif ()
if (H5_HAVE_FILTER_DEFLATE)
set (USE_FILTER_DEFLATE "true")
- endif (H5_HAVE_FILTER_DEFLATE)
+ endif ()
if (H5_HAVE_FILTER_SZIP)
set (USE_FILTER_SZIP "true")
- endif (H5_HAVE_FILTER_SZIP)
+ endif ()
# copy files (these files have no filters)
ADD_H5_TEST (fill "TEST" ${FILE0})
@@ -768,10 +781,10 @@
# nested 8bit enum in both deflated and non-deflated datafiles
if (NOT USE_FILTER_DEFLATE)
- ADD_H5_TEST (nested_8bit_enum "TEST" h5repack_nested_8bit_enum.h5)
- else (NOT USE_FILTER_DEFLATE)
- ADD_H5_TEST (nested_8bit_enum "TEST" h5repack_nested_8bit_enum_deflated.h5)
- endif (NOT USE_FILTER_DEFLATE)
+ ADD_H5_TEST (nested_8bit_enum "TEST" h5repack_nested_8bit_enum.h5)
+ else ()
+ ADD_H5_TEST (nested_8bit_enum "TEST" h5repack_nested_8bit_enum_deflated.h5)
+ endif ()
# use $FILE4 to write some filters (this file has no filters)
@@ -780,7 +793,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (gzip_individual ${TESTTYPE} ${arg})
# gzip for all
@@ -788,7 +801,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (gzip_all ${TESTTYPE} ${arg})
# szip with individual object
@@ -796,7 +809,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP)
+ endif ()
ADD_H5_TEST (szip_individual ${TESTTYPE} ${arg})
# szip for all
@@ -804,7 +817,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP)
+ endif ()
ADD_H5_TEST (szip_all ${TESTTYPE} ${arg})
# shuffle with individual object
@@ -828,7 +841,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP OR NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP OR NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (all_filters ${TESTTYPE} ${arg})
# verbose gzip with individual object
@@ -836,7 +849,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_CMP_TEST (gzip_verbose_filters "O?...ing file[^\n]+\n" ${TESTTYPE} 0 ${arg})
###########################################################
@@ -848,7 +861,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP)
+ endif ()
ADD_H5_TEST (szip_copy ${TESTTYPE} ${arg})
# szip remove
@@ -856,7 +869,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP)
+ endif ()
ADD_H5_TEST (szip_remove ${TESTTYPE} ${arg})
# deflate copy
@@ -864,7 +877,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (deflate_copy ${TESTTYPE} ${arg})
# deflate remove
@@ -872,7 +885,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (deflate_remove ${TESTTYPE} ${arg})
# shuffle copy
@@ -920,7 +933,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE OR NOT USE_FILTER_SZIP OR NOT USE_FILTER_SZIP_ENCODER)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE OR NOT USE_FILTER_SZIP OR NOT USE_FILTER_SZIP_ENCODER)
+ endif ()
ADD_H5_TEST (remove_all ${TESTTYPE} ${arg})
#filter conversions
@@ -928,14 +941,14 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP OR NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_SZIP OR NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (deflate_convert ${TESTTYPE} ${arg})
set (arg ${FILE7} -f dset_szip:GZIP=1)
set (TESTTYPE "TEST")
if (NOT USE_FILTER_SZIP OR NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_SZIP OR NOT USE_FILTER_SZIP_ENCODER OR NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (szip_convert ${TESTTYPE} ${arg})
#limit
@@ -943,7 +956,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_DMP_TEST (deflate_limit ${TESTTYPE} 0 ${arg})
#file
@@ -951,9 +964,14 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (deflate_file ${TESTTYPE} ${arg})
+#crtorder
+ set (arg tordergr.h5 -L)
+ set (TESTTYPE "TEST")
+ ADD_H5_DMP_TEST (crtorder ${TESTTYPE} 0 ${arg})
+
#########################################################
# layout options (these files have no filters)
#########################################################
@@ -1039,7 +1057,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_VERIFY_TEST (layout_long_switches ${TESTTYPE} 1 ${FILE4} null CHUNKED ${arg})
# latest file format with short switches. use FILE4=h5repack_layout.h5 (no filters)
@@ -1047,7 +1065,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_VERIFY_TEST (layout_short_switches ${TESTTYPE} 1 ${FILE4} null CHUNKED ${arg})
# several global filters
@@ -1055,7 +1073,7 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST (global_filters ${TESTTYPE} ${arg})
# syntax of -i infile -o outfile
@@ -1064,7 +1082,7 @@
set (TESTTYPE "LEGACY")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_TEST_OLD (old_style_layout_short_switches ${TESTTYPE} ${arg})
# add a userblock to file
@@ -1080,7 +1098,10 @@
ADD_H5_TEST (upgrade_layout "TEST" ${FILE14})
# test for datum size > H5TOOLS_MALLOCSIZE
- ADD_H5_TEST (gt_mallocsize "TEST" ${FILE1} -f GZIP=1)
+ if (NOT USE_FILTER_DEFLATE)
+ set (TESTTYPE "SKIP")
+ endif ()
+ ADD_H5_TEST (gt_mallocsize ${TESTTYPE} ${FILE1} -f GZIP=1)
# Check repacking file with committed datatypes in odd configurations
ADD_H5_TEST (committed_dt "TEST" ${FILE15})
@@ -1114,49 +1135,49 @@
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_VERIFY_VDS (vds_dset_chunk20x10x5 ${TESTTYPE} 0 ${FILEV1} vds_dset CHUNKED -l vds_dset:CHUNK=20x10x5)
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_VERIFY_VDS (vds_chunk2x5x8 ${TESTTYPE} 0 ${FILEV3_1} vds_dset CHUNKED -l vds_dset:CHUNK=2x5x8)
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_VERIFY_VDS (vds_chunk3x6x9 ${TESTTYPE} 0 ${FILEV2} vds_dset CHUNKED -l vds_dset:CHUNK=3x6x9)
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_VERIFY_VDS (vds_compa ${TESTTYPE} 0 ${FILEV4} vds_dset COMPACT -l vds_dset:COMPA)
set (TESTTYPE "TEST")
if (NOT USE_FILTER_DEFLATE)
set (TESTTYPE "SKIP")
- endif (NOT USE_FILTER_DEFLATE)
+ endif ()
ADD_H5_VERIFY_VDS (vds_conti ${TESTTYPE} 0 ${FILEV4} vds_dset CONTIGUOUS -l vds_dset:CONTI)
##############################################################################
### P L U G I N T E S T S
##############################################################################
- ADD_H5_UD_TEST (plugin_version_test 0 h5repack_layout.h5 -v -f UD=260,4,9,1,9,235)
+ ADD_H5_UD_TEST (plugin_version_test 0 h5repack_layout.h5 -v -f UD=260,4,9,${H5_VERS_MAJOR},${H5_VERS_MINOR},${H5_VERS_RELEASE})
ADD_H5_UD_TEST (plugin_test 0 h5repack_layout.h5 -v -f UD=257,1,9)
ADD_H5_UD_TEST (plugin_none 0 h5repack_layout.UD.h5 -v -f NONE)
# check for no parameters
set (TESTRETVAL 255)
if (WIN32)
set (TESTRETVAL -1)
- endif()
+ endif ()
ADD_H5_CMP_TEST (plugin_zero "" "TEST" ${TESTRETVAL} h5repack_layout.h5 -v -f UD=250,0)
if (HDF5_TEST_VFD)
# Run test with different Virtual File Driver
foreach (vfd ${VFD_LIST})
ADD_VFD_TEST (${vfd} 0)
- endforeach (vfd ${VFD_LIST})
- endif (HDF5_TEST_VFD)
+ endforeach ()
+ endif ()
diff --git a/tools/test/h5repack/Makefile.am b/tools/test/h5repack/Makefile.am
index 6c045c4..da2a3c3 100644
--- a/tools/test/h5repack/Makefile.am
+++ b/tools/test/h5repack/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -37,28 +35,32 @@ endif
check_SCRIPTS=$(TEST_SCRIPT)
-# Depend on the hdf5 library, the tools library, the test library
-LDADD=$(LIBH5TOOLS) $(LIBH5TEST) $(LIBHDF5)
+# Depend on the h5repack library, the hdf5 library, the tools library, the test library
+LDADD=../../src/h5repack/libh5repack.la $(LIBH5TOOLS) $(LIBH5TEST) $(LIBHDF5)
testh5repack_detect_szip_SOURCES=testh5repack_detect_szip.c
-# Source files
-COMMON_SOURCES=../../src/h5repack/h5repack.c ../../src/h5repack/h5repack_copy.c ../../src/h5repack/h5repack_filters.c \
- ../../src/h5repack/h5repack_opttable.c ../../src/h5repack/h5repack_parse.c ../../src/h5repack/h5repack_refs.c \
- ../../src/h5repack/h5repack_verify.c
-h5repacktst_SOURCES=$(COMMON_SOURCES) h5repacktst.c
+h5repacktst_SOURCES=h5repacktst.c
if HAVE_SHARED_CONDITIONAL
# Build it as shared library if configure is enabled for shared library.
- lib_LTLIBRARIES=libdynlibadd.la libdynlibvers.la
+ dyn_LTLIBRARIES=libdynlibadd.la libdynlibvers.la
libdynlibadd_la_SOURCES=dynlib_rpk.c
libdynlibvers_la_SOURCES=dynlib_vrpk.c
+ libdynlibadd_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
+ libdynlibvers_la_LDFLAGS = -avoid-version -module -shared -export-dynamic
-install-exec-hook:
- $(RM) $(DESTDIR)$(libdir)/*dynlib*
+libdynlibadd.la: $(libdynlibadd_la_OBJECTS) $(libdynlibadd_la_DEPENDENCIES) $(EXTRA_libdynlibadd_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlibadd_la_LINK) $(am_libdynlibadd_la_rpath) $(libdynlibadd_la_OBJECTS) $(libdynlibadd_la_LIBADD)
+libdynlibvers.la: $(libdynlibvers_la_OBJECTS) $(libdynlibvers_la_DEPENDENCIES) $(EXTRA_libdynlibvers_la_DEPENDENCIES)
+ $(AM_V_CCLD)$(libdynlibvers_la_LINK) $(am_libdynlibvers_la_rpath) $(libdynlibvers_la_OBJECTS) $(libdynlibvers_la_LIBADD)
+
+#install-exec-hook:
+# $(RM) $(DESTDIR)$(dyndir)/*dynlib*
endif
+
# Temporary files. *.h5 are generated by h5repack. They should
# copied to the testfiles/ directory if update is required.
CHECK_CLEANFILES+=*.h5 *.bin testfiles/h5diff_attr1.h5 testfiles/tfamily*.h5
diff --git a/tools/test/h5repack/dynlib_rpk.c b/tools/test/h5repack/dynlib_rpk.c
index 3469e58..29c3eae 100644
--- a/tools/test/h5repack/dynlib_rpk.c
+++ b/tools/test/h5repack/dynlib_rpk.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Programmer: Raymond Lu
diff --git a/tools/test/h5repack/dynlib_vrpk.c b/tools/test/h5repack/dynlib_vrpk.c
index 8da0270..06d90ff 100644
--- a/tools/test/h5repack/dynlib_vrpk.c
+++ b/tools/test/h5repack/dynlib_vrpk.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic document set and is *
- * linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access *
- * to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
* Purpose: Tests the plugin module (H5PL)
diff --git a/tools/test/h5repack/h5repack.sh.in b/tools/test/h5repack/h5repack.sh.in
index 4d3cae8..bc6b527 100644
--- a/tools/test/h5repack/h5repack.sh.in
+++ b/tools/test/h5repack/h5repack.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5repack tool
#
@@ -29,14 +27,14 @@ TESTNAME=h5repack
EXIT_SUCCESS=0
EXIT_FAILURE=1
-H5REPACK=../../src/h5repack/h5repack # The tool name
-H5REPACK_BIN=`pwd`/$H5REPACK # The path of the tool binary
+H5REPACK=../../src/h5repack/h5repack # The tool name
+H5REPACK_BIN=`pwd`/$H5REPACK # The path of the tool binary
-H5DIFF=../../src/h5diff/h5diff # The h5diff tool name
-H5DIFF_BIN=`pwd`/$H5DIFF # The path of the h5diff tool binary
+H5DIFF=../../src/h5diff/h5diff # The h5diff tool name
+H5DIFF_BIN=`pwd`/$H5DIFF # The path of the h5diff tool binary
-H5DUMP=../../src/h5dump/h5dump # The h5dump tool name
-H5DUMP_BIN=`pwd`/$H5DUMP # The path of the h5dump tool binary
+H5DUMP=../../src/h5dump/h5dump # The h5dump tool name
+H5DUMP_BIN=`pwd`/$H5DUMP # The path of the h5dump tool binary
RM='rm -rf'
CMP='cmp'
@@ -115,6 +113,7 @@ $SRC_TOOLS_TESTFILES/tfamily00007.h5
$SRC_TOOLS_TESTFILES/tfamily00008.h5
$SRC_TOOLS_TESTFILES/tfamily00009.h5
$SRC_TOOLS_TESTFILES/tfamily00010.h5
+$SRC_TOOLS_TESTFILES/tordergr.h5
$SRC_TOOLS_TESTFILES/vds/1_a.h5
$SRC_TOOLS_TESTFILES/vds/1_b.h5
$SRC_TOOLS_TESTFILES/vds/1_c.h5
@@ -145,6 +144,7 @@ $SRC_H5REPACK_TESTFILES/h5repack-help.txt
$SRC_H5REPACK_TESTFILES/h5repack_ext.bin
$SRC_H5REPACK_TESTFILES/ublock.bin
$SRC_H5REPACK_TESTFILES/h5repack.info
+$SRC_H5REPACK_TESTFILES/crtorder.tordergr.h5.ddl
$SRC_H5REPACK_TESTFILES/deflate_limit.h5repack_layout.h5.ddl
$SRC_H5REPACK_TESTFILES/h5repack_layout.h5.ddl
$SRC_H5REPACK_TESTFILES/h5repack_filters.h5-gzip_verbose_filters.tst
@@ -179,7 +179,7 @@ COPY_TESTFILES_TO_TESTDIR()
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
- $CP -f $tstfile $TESTDIR
+ $CP -f $tstfile $TESTDIR
if [ $? -ne 0 ]; then
echo "Error: FAILED to copy $tstfile ."
@@ -375,6 +375,8 @@ VERIFY_LAYOUT_VDS()
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual > /dev/null 2>&1 ; then
echo " PASSED"
else
@@ -386,7 +388,7 @@ VERIFY_LAYOUT_VDS()
# Clean up output file
if test -z "$HDF5_NOCLEANUP"; then
- rm -f $actual $actual_err
+ rm -f $actual $actual_err
fi
# clean up tmp files
@@ -541,11 +543,11 @@ TOOLTEST1()
)
RET=$?
if [ $RET != 0 ] ; then
- echo "*FAILED*"
- nerrors="`expr $nerrors + 1`"
+ echo "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
else
- echo " PASSED"
- DIFFTEST $infile $outfile
+ echo " PASSED"
+ DIFFTEST $infile $outfile
fi
rm -f $outfile
}
@@ -572,11 +574,11 @@ TOOLTESTV()
) >$actual 2>$actual_err
RET=$?
if [ $RET != 0 ] ; then
- echo "*FAILED*"
- nerrors="`expr $nerrors + 1`"
+ echo "*FAILED*"
+ nerrors="`expr $nerrors + 1`"
else
- echo " PASSED"
- DIFFTEST $infile $outfile
+ echo " PASSED"
+ DIFFTEST $infile $outfile
fi
# display output compare
@@ -637,6 +639,8 @@ TOOLTESTM() {
# Create the expect file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect
+ echo " Expected result (*.tst) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -648,7 +652,7 @@ TOOLTESTM() {
# Clean up output file
if test -z "$HDF5_NOCLEANUP"; then
- rm -f $actual $actual_err $actual_sav $actual_err_sav
+ rm -f $actual $actual_err $actual_sav $actual_err_sav
fi
}
@@ -682,7 +686,7 @@ TOOLTEST_DUMP()
VERIFY h5dump output $@
(
cd $TESTDIR
- $RUNSERIAL $H5DUMP_BIN -pH $outfile
+ $RUNSERIAL $H5DUMP_BIN -q creation_order -pH $outfile
) >$actual 2>$actual_err
cat $actual_err >> $actual
@@ -762,11 +766,11 @@ TOOLTEST_META()
# verify sizes.
MESSAGE "Verify the sizes of both output files ($size1 vs $size2)"
if [ $size1 -lt $size2 ]; then
- # pass
- echo " PASSED"
+ # pass
+ echo " PASSED"
else
- #fail
- echo "*FAILED*"
+ #fail
+ echo "*FAILED*"
nerrors="`expr $nerrors + 1`"
fi
@@ -793,6 +797,8 @@ TOOLTEST_HELP() {
# Create the expect data file if it doesn't yet exist.
echo " CREATED"
cp $actual $expect-CREATED
+ echo " Expected result (*.txt) missing"
+ nerrors="`expr $nerrors + 1`"
elif cmp -s $expect $actual; then
echo " PASSED"
else
@@ -840,7 +846,7 @@ USE_FILTER_SZIP_ENCODER=`$RUNSERIAL $H5DETECTSZIP_BIN`
fi
##############################################################################
-### T H E T E S T S
+### T H E T E S T S
##############################################################################
# prepare for test
COPY_TESTFILES_TO_TESTDIR
@@ -1047,6 +1053,10 @@ else
TOOLTEST deflate_file $arg
fi
+#crtorder
+arg="tordergr.h5 -L"
+TOOLTEST_DUMP crtorder $arg
+
#########################################################
# layout options (these files have no filters)
#########################################################
@@ -1223,27 +1233,27 @@ TOOLTEST_META meta_long h5repack_layout.h5 --metadata_block_size=8192
# layout conversions
###############################################################
if test $USE_FILTER_DEFLATE != "yes" ; then
- SKIP vds_dset_chunk_20x10x5
+ SKIP vds_dset_chunk_20x10x5
else
- VERIFY_LAYOUT_VDS vds_dset_chunk_20x10x5 1_vds.h5 vds_dset CHUNKED --layout vds_dset:CHUNK=20x10x5
+ VERIFY_LAYOUT_VDS vds_dset_chunk20x10x5 1_vds.h5 vds_dset CHUNKED --layout vds_dset:CHUNK=20x10x5
fi
if test $USE_FILTER_DEFLATE != "yes" ; then
- SKIP vds_chunk2x5x8
+ SKIP vds_chunk2x5x8
else
- VERIFY_LAYOUT_VDS vds_chunk2x5x8 3_1_vds.h5 vds_dset CHUNKED -l vds_dset:CHUNK=2x5x8
+ VERIFY_LAYOUT_VDS vds_chunk2x5x8 3_1_vds.h5 vds_dset CHUNKED -l vds_dset:CHUNK=2x5x8
fi
if test $USE_FILTER_DEFLATE != "yes" ; then
- SKIP vds_chunk3x6x9
+ SKIP vds_chunk3x6x9
else
- VERIFY_LAYOUT_VDS vds_chunk3x6x9 2_vds.h5 vds_dset CHUNKED -l vds_dset:CHUNK=3x6x9
+ VERIFY_LAYOUT_VDS vds_chunk3x6x9 2_vds.h5 vds_dset CHUNKED -l vds_dset:CHUNK=3x6x9
fi
if test $USE_FILTER_DEFLATE != "yes" ; then
- SKIP vds_compa 4_vds.h5
+ SKIP vds_compa 4_vds.h5
else
- VERIFY_LAYOUT_VDS vds_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA
+ VERIFY_LAYOUT_VDS vds_compa 4_vds.h5 vds_dset COMPACT -l vds_dset:COMPA
fi
if test $USE_FILTER_DEFLATE != "yes" ; then
diff --git a/tools/test/h5repack/h5repack_plugin.sh.in b/tools/test/h5repack/h5repack_plugin.sh.in
index c7a9a6b..c9fe53c 100644
--- a/tools/test/h5repack/h5repack_plugin.sh.in
+++ b/tools/test/h5repack/h5repack_plugin.sh.in
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic document set and is
-# linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have access
-# to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
srcdir=@srcdir@
TOP_BUILDDIR=@top_builddir@
@@ -246,9 +244,10 @@ TOOLTEST_DUMP()
##############################################################################
# prepare for test
COPY_TESTFILES_TO_TESTDIR
+version_str=`echo @H5_VERSION@ | awk -F"-" '{print $1}' | sed 's/\./,/g'`
# Run the test
-arg="h5repack_layout.h5 -v -f UD=260,4,9,1,9,235"
+arg="h5repack_layout.h5 -v -f UD=260,4,9,$version_str"
TOOLTEST_DUMP plugin_version_test $arg
arg="h5repack_layout.h5 -v -f UD=257,1,9"
diff --git a/tools/test/h5repack/h5repacktst.c b/tools/test/h5repack/h5repacktst.c
index 82b45fc..3b82383 100644
--- a/tools/test/h5repack/h5repacktst.c
+++ b/tools/test/h5repack/h5repacktst.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
-* the files COPYING and Copyright.html. COPYING can be found at the root *
-* of the source code distribution tree; Copyright.html can be found at the *
-* root level of an installed copy of the electronic HDF5 document set and *
-* is linked from the top-level documents page. It can also be found at *
-* http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
-* access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "h5repack.h"
@@ -79,6 +77,20 @@
#define FNAME18 "h5repack_layout2.h5"
+/* Files for testing file space paging */
+#define FSPACE_OUT "h5repack_fspace_OUT.h5" /* The output file */
+#define NELMTS(X) (sizeof(X)/sizeof(X[0])) /* # of elements */
+const char *H5REPACK_FSPACE_FNAMES[] = {
+ "h5repack_latest.h5", /* 0 */
+ "h5repack_default.h5", /* 1 */
+ "h5repack_page_persist.h5", /* 2 */
+ "h5repack_fsm_aggr_persist.h5", /* 3 */
+ "h5repack_page_threshold.h5", /* 4 */
+ "h5repack_fsm_aggr_threshold.h5", /* 5 */
+ "h5repack_aggr.h5", /* 6 */
+ "h5repack_none.h5" /* 7 */
+};
+
#define FNAME_UB "ublock.bin"
/* obj and region references */
@@ -97,7 +109,6 @@ const char *H5REPACK_FILENAMES[] = {
/* Name of tool */
#define PROGRAMNAME "h5repacktst"
-
#define DIM1 40
#define DIM2 20
#define CDIM1 DIM1/2
@@ -170,8 +181,10 @@ int main (void)
{
pack_opt_t pack_options;
diff_opt_t diff_options;
- hsize_t fs_size = 0; /* free space section threshold */
- H5F_file_space_type_t fs_type = H5F_FILE_SPACE_DEFAULT; /* file space handling strategy */
+
+ unsigned j; /* Local index variable for testing file space */
+ const char *fname; /* File name for testing file space */
+
h5_stat_t file_stat;
h5_stat_size_t fsize1, fsize2; /* file sizes */
#if defined (H5_HAVE_FILTER_SZIP)
@@ -192,7 +205,7 @@ int main (void)
puts("Testing h5repack:");
/* make the test files */
- TESTING(" generating datasets");
+ TESTING(" generating files for testing");
if (make_testfiles() < 0)
GOERROR;
PASSED();
@@ -204,8 +217,161 @@ int main (void)
* 2) use the h5diff function to compare the input and output file
*-------------------------------------------------------------------------
*/
+ /*-------------------------------------------------------------------------
+ * Testing file space info setting
+ *-------------------------------------------------------------------------
+ */
+ TESTING(" files with file space info setting--no options (-S, -P, -T, -G) are set");
+ j = 0; /* #0 */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ fname = H5REPACK_FSPACE_FNAMES[j];
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
+ GOERROR;
+
+ if(h5repack(fname, FSPACE_OUT, &pack_options) < 0)
+ GOERROR;
+ if(h5diff(fname, FSPACE_OUT, NULL, NULL, &diff_options) > 0)
+ GOERROR;
+ if(h5repack_verify(fname, FSPACE_OUT, &pack_options)<=0)
+ GOERROR;
+ if(h5repack_end(&pack_options) < 0)
+ GOERROR;
+ PASSED();
+ TESTING(" files with file space info setting--all options -S, -P, -T, -G are set");
+ ++j; /* #1 */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ fname = H5REPACK_FSPACE_FNAMES[j];
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
+ GOERROR;
+ pack_options.fs_strategy = H5F_FSPACE_STRATEGY_NONE;
+ pack_options.fs_persist = -1; /* "FALSE" is set via -P 0 */
+ pack_options.fs_threshold = 1;
+ pack_options.fs_pagesize = 8192;
+ if(h5repack(fname, FSPACE_OUT, &pack_options) < 0)
+ GOERROR;
+ if(h5diff(fname, FSPACE_OUT, NULL, NULL, &diff_options) > 0)
+ GOERROR;
+ if(h5repack_verify(fname, FSPACE_OUT, &pack_options)<=0)
+ GOERROR;
+ if(h5repack_end(&pack_options) < 0)
+ GOERROR;
+ PASSED();
+
+
+ TESTING(" files with file space info setting--options -S and -T are set");
+ ++j; /* #2 */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ fname = H5REPACK_FSPACE_FNAMES[j];
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
+ GOERROR;
+ pack_options.fs_strategy = -1; /* "FSM_AGGR" specified via -S FSM_AGGR */
+ pack_options.fs_threshold = -1; /* "0" specified via -T 0 */
+ if(h5repack(fname, FSPACE_OUT, &pack_options) < 0)
+ GOERROR;
+ if(h5diff(fname, FSPACE_OUT, NULL, NULL, &diff_options) > 0)
+ GOERROR;
+ if(h5repack_verify(fname, FSPACE_OUT, &pack_options)<=0)
+ GOERROR;
+ if(h5repack_end(&pack_options) < 0)
+ GOERROR;
+ PASSED();
+
+
+ TESTING(" files with file space info setting-- options -S and -P are set & -L");
+ ++j; /* #3 */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ fname = H5REPACK_FSPACE_FNAMES[j];
+ if(h5repack_init(&pack_options, 0, TRUE) < 0)
+ GOERROR;
+ pack_options.fs_strategy = H5F_FSPACE_STRATEGY_PAGE; /* "PAGE" specified via -S */
+ pack_options.fs_persist = TRUE;
+ if(h5repack(fname, FSPACE_OUT, &pack_options) < 0)
+ GOERROR;
+ if(h5diff(fname, FSPACE_OUT, NULL, NULL, &diff_options) > 0)
+ GOERROR;
+ if(h5repack_verify(fname, FSPACE_OUT, &pack_options)<=0)
+ GOERROR;
+ if(h5repack_end(&pack_options) < 0)
+ GOERROR;
+ PASSED();
+
+ TESTING(" files with file space info setting-- options -P and -T are set & -L");
+ ++j; /* #4 */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ fname = H5REPACK_FSPACE_FNAMES[j];
+ if(h5repack_init(&pack_options, 0, TRUE) < 0)
+ GOERROR;
+ pack_options.fs_persist = -1; /* "FALSE" is set via -P 0 */
+ pack_options.fs_threshold = 2;
+ if(h5repack(fname, FSPACE_OUT, &pack_options) < 0)
+ GOERROR;
+ if(h5diff(fname, FSPACE_OUT, NULL, NULL, &diff_options) > 0)
+ GOERROR;
+ if(h5repack_verify(fname, FSPACE_OUT, &pack_options)<=0)
+ GOERROR;
+ if(h5repack_end(&pack_options) < 0)
+ GOERROR;
+ PASSED();
+
+ TESTING(" files with file space info setting-- options -S and -G are set & -L");
+ ++j; /* #5 */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ fname = H5REPACK_FSPACE_FNAMES[j];
+ if(h5repack_init(&pack_options, 0, TRUE) < 0)
+ GOERROR;
+ pack_options.fs_strategy = H5F_FSPACE_STRATEGY_PAGE;
+ pack_options.fs_pagesize = 8192;
+ if(h5repack(fname, FSPACE_OUT, &pack_options) < 0)
+ GOERROR;
+ if(h5diff(fname, FSPACE_OUT, NULL, NULL, &diff_options) > 0)
+ GOERROR;
+ if(h5repack_verify(fname, FSPACE_OUT, &pack_options)<=0)
+ GOERROR;
+ if(h5repack_end(&pack_options) < 0)
+ GOERROR;
+ PASSED();
+
+ TESTING(" files with file space info setting-- options -S, -P, -T, -G are set");
+ ++j; /* #6 */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ fname = H5REPACK_FSPACE_FNAMES[j];
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
+ GOERROR;
+ pack_options.fs_strategy = H5F_FSPACE_STRATEGY_NONE;
+ pack_options.fs_persist = -1; /* "FALSE" is set via -P 0 */
+ pack_options.fs_threshold = 1;
+ pack_options.fs_pagesize = 8192;
+ if(h5repack(fname, FSPACE_OUT, &pack_options) < 0)
+ GOERROR;
+ if(h5diff(fname, FSPACE_OUT, NULL, NULL, &diff_options) > 0)
+ GOERROR;
+ if(h5repack_verify(fname, FSPACE_OUT, &pack_options)<=0)
+ GOERROR;
+ if(h5repack_end(&pack_options) < 0)
+ GOERROR;
+ PASSED();
+
+ TESTING(" files with file space info setting-- options -S, -T, -G are set & -L");
+ ++j; /* #7 */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ fname = H5REPACK_FSPACE_FNAMES[j];
+ if(h5repack_init(&pack_options, 0, TRUE) < 0)
+ GOERROR;
+ pack_options.fs_strategy = H5F_FSPACE_STRATEGY_AGGR;
+ pack_options.fs_threshold = 1;
+ pack_options.fs_pagesize = 4096;
+ if(h5repack(fname, FSPACE_OUT, &pack_options) < 0)
+ GOERROR;
+ if(h5diff(fname, FSPACE_OUT, NULL, NULL, &diff_options) > 0)
+ GOERROR;
+ if(h5repack_verify(fname, FSPACE_OUT, &pack_options)<=0)
+ GOERROR;
+ if(h5repack_end(&pack_options) < 0)
+ GOERROR;
+ PASSED();
+
/*-------------------------------------------------------------------------
* file with fill values
*-------------------------------------------------------------------------
@@ -213,8 +379,7 @@ int main (void)
TESTING(" copy of datasets (fill values)");
- /* fs_type = 0; fs_size = 0 */
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME0,FNAME0OUT,&pack_options) < 0)
GOERROR;
@@ -234,7 +399,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" copy of datasets (all datatypes)");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME1,FNAME1OUT,&pack_options) < 0)
GOERROR;
@@ -254,7 +419,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" copy of datasets (attributes)");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME2,FNAME2OUT,&pack_options) < 0)
GOERROR;
@@ -273,7 +438,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" copy of datasets (hardlinks)");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME3,FNAME3OUT,&pack_options) < 0)
GOERROR;
@@ -293,7 +458,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" copy of allocation early file");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME5,FNAME5OUT,&pack_options) < 0)
GOERROR;
@@ -323,7 +488,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset1:GZIP=9",&pack_options) < 0)
GOERROR;
@@ -350,7 +515,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, TRUE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, TRUE) < 0)
GOERROR;
if (h5repack_addfilter("dset1:GZIP=9",&pack_options) < 0)
GOERROR;
@@ -378,7 +543,7 @@ int main (void)
#ifdef H5_HAVE_FILTER_DEFLATE
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("GZIP=1",&pack_options) < 0)
GOERROR;
@@ -416,7 +581,7 @@ int main (void)
*/
if (szip_can_encode) {
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset2:SZIP=8,EC",&pack_options) < 0)
GOERROR;
@@ -448,7 +613,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP)
if (szip_can_encode) {
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("SZIP=8,NN",&pack_options) < 0)
GOERROR;
@@ -477,7 +642,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset1:SHUF",&pack_options) < 0)
GOERROR;
@@ -501,8 +666,7 @@ int main (void)
TESTING(" addding shuffle filter to all");
- /* fs_type = H5F_FILE_SPACE_ALL_PERSIST; fs_size = 1 */
- if (h5repack_init (&pack_options, 0, FALSE, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("SHUF",&pack_options) < 0)
GOERROR;
@@ -527,7 +691,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset1:FLET",&pack_options) < 0)
GOERROR;
@@ -552,7 +716,7 @@ int main (void)
TESTING(" adding checksum filter to all");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("FLET",&pack_options) < 0)
GOERROR;
@@ -577,7 +741,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset1:CHUNK 20x10",&pack_options) < 0)
GOERROR;
@@ -617,7 +781,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset1:CHUNK=20x10",&pack_options) < 0)
GOERROR;
@@ -639,7 +803,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, TRUE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, TRUE) < 0)
GOERROR;
if (h5repack_addlayout("dset1:CHUNK=20x10",&pack_options) < 0)
GOERROR;
@@ -659,7 +823,7 @@ int main (void)
*/
TESTING(" adding layout chunked to all");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("CHUNK=20x10",&pack_options) < 0)
GOERROR;
@@ -680,7 +844,7 @@ int main (void)
* test an individual object option
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset1:CONTI",&pack_options) < 0)
GOERROR;
@@ -701,7 +865,7 @@ int main (void)
* test all objects option
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("CONTI",&pack_options) < 0)
GOERROR;
@@ -718,7 +882,7 @@ int main (void)
* do the same test for a file with filters (chunked)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("CONTI",&pack_options) < 0)
GOERROR;
@@ -740,7 +904,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset1:COMPA",&pack_options) < 0)
GOERROR;
@@ -761,8 +925,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
- /* fs_type = H5F_FILE_SPACE_ALL; fs_size = 2 */
- if (h5repack_init (&pack_options, 0, FALSE, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("COMPA",&pack_options) < 0)
GOERROR;
@@ -784,7 +947,7 @@ int main (void)
* layout compact to contiguous conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_compact:CONTI",&pack_options) < 0)
GOERROR;
@@ -804,7 +967,7 @@ int main (void)
* layout compact to chunk conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_compact:CHUNK=2x5",&pack_options) < 0)
GOERROR;
@@ -824,7 +987,7 @@ int main (void)
* layout compact to compact conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_compact:COMPA",&pack_options) < 0)
GOERROR;
@@ -843,7 +1006,7 @@ int main (void)
* layout contiguous to compact conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_contiguous:COMPA",&pack_options) < 0)
GOERROR;
@@ -862,7 +1025,7 @@ int main (void)
* layout contiguous to chunk conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_contiguous:CHUNK=3x6",&pack_options) < 0)
GOERROR;
@@ -882,7 +1045,7 @@ int main (void)
* layout contiguous to contiguous conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_contiguous:CONTI",&pack_options) < 0)
GOERROR;
@@ -901,7 +1064,7 @@ int main (void)
* layout chunked to compact conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_chunk:COMPA",&pack_options) < 0)
GOERROR;
@@ -921,7 +1084,7 @@ int main (void)
* layout chunked to contiguous conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_chunk:CONTI",&pack_options) < 0)
GOERROR;
@@ -940,7 +1103,7 @@ int main (void)
* layout chunked to chunked conversion
*-------------------------------------------------------------------------
*/
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addlayout("dset_chunk:CHUNK=18x13",&pack_options) < 0)
GOERROR;
@@ -970,8 +1133,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP)
if (szip_can_encode) {
- /* fs_type = H5F_FILE_SPACE_AGGR_VFD; fs_size = 3 */
- if (h5repack_init (&pack_options, 0, FALSE, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME7,FNAME7OUT,&pack_options) < 0)
GOERROR;
@@ -996,7 +1158,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP)
if (szip_can_encode) {
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_szip:NONE",&pack_options) < 0)
GOERROR;
@@ -1021,7 +1183,7 @@ int main (void)
TESTING(" copy of deflate filter");
#ifdef H5_HAVE_FILTER_DEFLATE
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME8,FNAME8OUT,&pack_options) < 0)
GOERROR;
@@ -1041,7 +1203,7 @@ int main (void)
TESTING(" removing deflate filter");
#ifdef H5_HAVE_FILTER_DEFLATE
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_deflate:NONE",&pack_options) < 0)
GOERROR;
@@ -1063,7 +1225,7 @@ int main (void)
TESTING(" copy of shuffle filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME9,FNAME9OUT,&pack_options) < 0)
GOERROR;
@@ -1079,7 +1241,7 @@ int main (void)
TESTING(" removing shuffle filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_shuffle:NONE",&pack_options) < 0)
GOERROR;
@@ -1097,7 +1259,7 @@ int main (void)
TESTING(" copy of fletcher filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME10,FNAME10OUT,&pack_options) < 0)
GOERROR;
@@ -1113,7 +1275,7 @@ int main (void)
TESTING(" removing fletcher filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_fletcher32:NONE",&pack_options) < 0)
GOERROR;
@@ -1131,7 +1293,7 @@ int main (void)
TESTING(" copy of nbit filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME12,FNAME12OUT,&pack_options) < 0)
GOERROR;
@@ -1147,7 +1309,7 @@ int main (void)
TESTING(" removing nbit filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_nbit:NONE",&pack_options) < 0)
GOERROR;
@@ -1165,7 +1327,7 @@ int main (void)
TESTING(" adding nbit filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_int31:NBIT",&pack_options) < 0)
GOERROR;
@@ -1183,7 +1345,7 @@ int main (void)
TESTING(" copy of scaleoffset filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME13,FNAME13OUT,&pack_options) < 0)
GOERROR;
@@ -1199,7 +1361,7 @@ int main (void)
TESTING(" removing scaleoffset filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_scaleoffset:NONE",&pack_options) < 0)
GOERROR;
@@ -1217,7 +1379,7 @@ int main (void)
TESTING(" adding scaleoffset filter");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_none:SOFF=31,IN",&pack_options) < 0)
GOERROR;
@@ -1249,8 +1411,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP) && defined (H5_HAVE_FILTER_DEFLATE)
if (szip_can_encode) {
- /* fs_type = H5F_FILE_SPACE_VFD; fs_size = 4 */
- if (h5repack_init (&pack_options, 0, FALSE, H5_INC_ENUM(H5F_file_space_type_t, fs_type), ++fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_deflate:SZIP=8,NN",&pack_options) < 0)
GOERROR;
@@ -1276,7 +1437,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP) && defined (H5_HAVE_FILTER_DEFLATE)
if (szip_can_encode) {
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("dset_szip:GZIP=1",&pack_options) < 0)
GOERROR;
@@ -1307,7 +1468,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_SZIP) && defined (H5_HAVE_FILTER_DEFLATE)
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("NONE",&pack_options) < 0)
GOERROR;
@@ -1331,7 +1492,7 @@ int main (void)
*/
TESTING(" big file");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME14,FNAME14OUT,&pack_options) < 0)
GOERROR;
@@ -1348,7 +1509,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" external datasets");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack(FNAME15,FNAME15OUT,&pack_options) < 0)
GOERROR;
@@ -1365,7 +1526,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" file with userblock");
- if(h5repack_init(&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
GOERROR;
if(h5repack(FNAME16, FNAME16OUT, &pack_options) < 0)
GOERROR;
@@ -1384,7 +1545,7 @@ int main (void)
*-------------------------------------------------------------------------
*/
TESTING(" latest file format options");
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
pack_options.latest=1;
pack_options.grp_compact=10;
@@ -1414,7 +1575,7 @@ int main (void)
#if defined (H5_HAVE_FILTER_DEFLATE)
- if (h5repack_init (&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if (h5repack_init (&pack_options, 0, FALSE) < 0)
GOERROR;
if (h5repack_addfilter("GZIP=1",&pack_options) < 0)
GOERROR;
@@ -1443,7 +1604,7 @@ int main (void)
#ifdef H5_HAVE_FILTER_DEFLATE
- if(h5repack_init(&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
GOERROR;
/* add the options for a user block size and user block filename */
@@ -1476,7 +1637,7 @@ int main (void)
#ifdef H5_HAVE_FILTER_DEFLATE
- if(h5repack_init(&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
GOERROR;
/* add the options for aligment */
@@ -1530,7 +1691,7 @@ int main (void)
*/
TESTING(" file with committed datatypes");
- if(h5repack_init(&pack_options, 0, FALSE, fs_type, fs_size) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
GOERROR;
if(h5repack(FNAME17, FNAME17OUT, &pack_options) < 0)
@@ -1557,7 +1718,7 @@ int main (void)
/* First run without metadata option. No need to verify the correctness */
/* since this has been verified by earlier tests. Just record the file */
/* size of the output file. */
- if(h5repack_init(&pack_options, 0, FALSE, H5F_FILE_SPACE_DEFAULT, (hsize_t)0) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
GOERROR;
if(h5repack(FNAME4, FNAME4OUT, &pack_options) < 0)
GOERROR;
@@ -1568,7 +1729,7 @@ int main (void)
GOERROR;
/* run it again with metadata option */
- if(h5repack_init(&pack_options, 0, FALSE, H5F_FILE_SPACE_DEFAULT, (hsize_t)0) < 0)
+ if(h5repack_init(&pack_options, 0, FALSE) < 0)
GOERROR;
pack_options.meta_block_size = 8192;
if(h5repack(FNAME4, FNAME4OUT, &pack_options) < 0)
@@ -1626,6 +1787,9 @@ static
int make_testfiles(void)
{
hid_t fid;
+ hid_t fcpl; /* File creation property list */
+ hid_t fapl; /* File access property list */
+ unsigned j; /* Local index variable */
/*-------------------------------------------------------------------------
* create a file for general copy test
@@ -1866,6 +2030,173 @@ int make_testfiles(void)
if(H5Fclose(fid) < 0)
return -1;
+ /*-------------------------------------------------------------------------
+ * create 8 files with combinations ???
+ *------------------------------------------------------------------------- */
+
+ /* Create file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ return -1;
+
+ /* Set to use latest library format */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ return -1;
+
+ /*
+ * #0 -- h5repack_latest.h5
+ * default: strategy=FSM_AGGR, persist=FALSE, threshold=1
+ * default: inpage=4096
+ */
+ j = 0;
+ if((fid = H5Fcreate(H5REPACK_FSPACE_FNAMES[j], H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /*
+ * #1 -- h5repack_default.h5
+ * default: strategy=FSM_AGGR, persist=FALSE, threshold=1
+ * default: inpage=4096
+ */
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ if((fid = H5Fcreate(H5REPACK_FSPACE_FNAMES[++j], H5F_ACC_TRUNC, H5P_DEFAULT, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+
+ /*
+ * #2 -- h5repack_page_persist.h5
+ * Setting:
+ * strategy=PAGE, persist=TRUE, threshold=1
+ * inpage=512
+ * latest format
+ */
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+ if(H5Pset_file_space_page_size(fcpl, (hsize_t)512) < 0)
+ return -1;
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, TRUE, (hsize_t)1) < 0)
+ return -1;
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ if((fid = H5Fcreate(H5REPACK_FSPACE_FNAMES[++j], H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+
+ /*
+ * #3 -- h5repack_fsm_aggr_persist.h5
+ * Setting:
+ * strategy=FSM_AGGR, persist=TRUE, threshold=1
+ * default: inpage=4096
+ */
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, TRUE, (hsize_t)1) < 0)
+ return -1;
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ if((fid = H5Fcreate(H5REPACK_FSPACE_FNAMES[++j], H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+
+ /*
+ * #4 -- h5repack_page_threshold.h5
+ * Setting:
+ * strategy=PAGE, persist=FALSE, threshold=3
+ * inpage=8192
+ * latest format
+ */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, FALSE, (hsize_t)3) < 0)
+ return -1;
+ if(H5Pset_file_space_page_size(fcpl, (hsize_t)8192) < 0)
+ return -1;
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ if((fid = H5Fcreate(H5REPACK_FSPACE_FNAMES[++j], H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /*
+ * #5 -- h5repack_fsm_aggr_threshold.h5
+ * Setting:
+ * strategy=FSM_AGGR, persist=FALSE, threshold=3
+ * inpage=4096
+ */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, FALSE, (hsize_t)3) < 0)
+ return -1;
+ if(H5Pset_file_space_page_size(fcpl, (hsize_t)FS_PAGESIZE_DEF) < 0)
+ return -1;
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ if((fid = H5Fcreate(H5REPACK_FSPACE_FNAMES[++j], H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /*
+ * #6 -- h5repack_aggr.h5
+ * Setting:
+ * strategy=AGGR, persist=FALSE, threshold=1
+ * latest format
+ */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_AGGR, FALSE, (hsize_t)1) < 0)
+ return -1;
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ if((fid = H5Fcreate(H5REPACK_FSPACE_FNAMES[++j], H5F_ACC_TRUNC, fcpl, fapl)) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ /*
+ * #7 -- h5repack_none.h5
+ * Setting:
+ * strategy=NONE, persist=FALSE, threshold=1
+ * inpage=8192
+ */
+
+ /* Create file creation property list */
+ if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
+ return -1;
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_NONE, FALSE, (hsize_t)1) < 0)
+ return -1;
+ if(H5Pset_file_space_page_size(fcpl, (hsize_t)8192) < 0)
+ return -1;
+ HDassert(j < NELMTS(H5REPACK_FSPACE_FNAMES));
+ if((fid = H5Fcreate(H5REPACK_FSPACE_FNAMES[++j], H5F_ACC_TRUNC, fcpl, H5P_DEFAULT)) < 0)
+ return -1;
+ if(H5Fclose(fid) < 0)
+ return -1;
+ if(H5Pclose(fcpl) < 0)
+ return -1;
+
+ if(H5Pclose(fapl) < 0)
+ return -1;
+
return 0;
out:
diff --git a/tools/test/h5repack/testfiles/4_vds.h5-vds_conti-v.ddl b/tools/test/h5repack/testfiles/4_vds.h5-vds_conti-v.ddl
index c499b35..54bf26c 100644
--- a/tools/test/h5repack/testfiles/4_vds.h5-vds_conti-v.ddl
+++ b/tools/test/h5repack/testfiles/4_vds.h5-vds_conti-v.ddl
@@ -5,7 +5,7 @@ DATASET "vds_dset" {
STORAGE_LAYOUT {
CONTIGUOUS
SIZE 576
- OFFSET 2144
+ OFFSET 2048
}
FILTERS {
NONE
diff --git a/tools/test/h5repack/testfiles/crtorder.tordergr.h5.ddl b/tools/test/h5repack/testfiles/crtorder.tordergr.h5.ddl
new file mode 100644
index 0000000..870d154
--- /dev/null
+++ b/tools/test/h5repack/testfiles/crtorder.tordergr.h5.ddl
@@ -0,0 +1,36 @@
+HDF5 "out-crtorder.tordergr.h5" {
+GROUP "/" {
+ GROUP "2" {
+ GROUP "a" {
+ GROUP "a1" {
+ }
+ GROUP "a2" {
+ GROUP "a21" {
+ }
+ GROUP "a22" {
+ }
+ }
+ }
+ GROUP "b" {
+ }
+ GROUP "c" {
+ }
+ }
+ GROUP "1" {
+ GROUP "c" {
+ }
+ GROUP "b" {
+ }
+ GROUP "a" {
+ GROUP "a1" {
+ }
+ GROUP "a2" {
+ GROUP "a22" {
+ }
+ GROUP "a21" {
+ }
+ }
+ }
+ }
+}
+}
diff --git a/tools/test/h5repack/testfiles/h5repack-help.txt b/tools/test/h5repack/testfiles/h5repack-help.txt
index 049e27d..65edd89 100644
--- a/tools/test/h5repack/testfiles/h5repack-help.txt
+++ b/tools/test/h5repack/testfiles/h5repack-help.txt
@@ -17,10 +17,14 @@ usage: h5repack [OPTIONS] file1 file2
-M A, --metadata_block_size=A Metadata block size for H5Pset_meta_block_size
-t T, --threshold=T Threshold value for H5Pset_alignment
-a A, --alignment=A Alignment value for H5Pset_alignment
+ -q Q, --sort_by=Q Sort groups and attributes by index Q
+ -z Z, --sort_order=Z Sort groups and attributes by order Z
-f FILT, --filter=FILT Filter type
-l LAYT, --layout=LAYT Layout type
- -S FS_STRGY, --fs_strategy=FS_STRGY File space management strategy
- -T FS_THRD, --fs_threshold=FS_THRD Free-space section threshold
+ -S FS_STRATEGY, --fs_strategy=FS_STRATEGY File space management strategy for H5Pset_file_space_strategy
+ -P FS_PERSIST, --fs_persist=FS_PERSIST Persisting or not persisting free-space for H5Pset_file_space_strategy
+ -T FS_THRESHOLD, --fs_threshold=FS_THRESHOLD Free-space section threshold for H5Pset_file_space_strategy
+ -G FS_PAGESIZE, --fs_pagesize=FS_PAGESIZE File space page size for H5Pset_file_space_page_size
M - is an integer greater than 1, size of dataset in bytes (default is 0)
E - is a filename.
@@ -28,6 +32,8 @@ usage: h5repack [OPTIONS] file1 file2
U - is a filename.
T - is an integer
A - is an integer greater than zero
+ Q - is the sort index type for the input file. It can be "name" or "creation_order" (default)
+ Z - is the sort order type for the input file. It can be "descending" or "ascending" (default)
B - is the user block size, any value that is 512 or greater and is
a power of 2 (1024 default)
F - is the shared object header message type, any of <dspace|dtype|fill|
@@ -36,18 +42,27 @@ usage: h5repack [OPTIONS] file1 file2
--enable-error-stack Prints messages from the HDF5 error stack as they
occur.
- FS_STRGY is the file space management strategy to use for the output file.
- It is a string as listed below:
- ALL_PERSIST - Use persistent free-space managers, aggregators and virtual file driver
- for file space allocation
- ALL - Use non-persistent free-space managers, aggregators and virtual file driver
- for file space allocation
- AGGR_VFD - Use aggregators and virtual file driver for file space allocation
- VFD - Use virtual file driver for file space allocation
-
- FS_THRD is the free-space section threshold to use for the output file.
- It is the minimum size (in bytes) of free-space sections to be tracked
- by the the library's free-space managers.
+ FS_STRATEGY is a string indicating the file space strategy used:
+ FSM_AGGR:
+ The mechanisms used in managing file space are free-space managers, aggregators and virtual file driver.
+ PAGE:
+ The mechanisms used in managing file space are free-space managers with embedded paged aggregation and virtual file driver.
+ AGGR:
+ The mechanisms used in managing file space are aggregators and virtual file driver.
+ NONE:
+ The mechanisms used in managing file space are virtual file driver.
+ The default strategy when not set is FSM_AGGR without persisting free-space.
+
+ FS_PERSIST is 1 to persisting free-space or 0 to not persisting free-space.
+ The default when not set is not persisting free-space.
+ The value is ignored for AGGR and NONE strategies.
+
+ FS_THRESHOLD is the minimum size (in bytes) of free-space sections to be tracked by the library.
+ The default when not set is 1.
+ The value is ignored for AGGR and NONE strategies.
+
+ FS_PAGESIZE is the size (in bytes) >=512 that is used by the library when the file space strategy PAGE is used.
+ The default when not set is 4096.
FILT - is a string with the format:
diff --git a/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl b/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl
index 3d09e14..a951638 100644
--- a/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl
+++ b/tools/test/h5repack/testfiles/h5repack_layout.h5-plugin_version_test.ddl
@@ -11,7 +11,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 9 235 }
+ PARAMS { 9 1 11 0 }
}
}
FILLVALUE {
@@ -33,7 +33,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 9 235 }
+ PARAMS { 9 1 11 0 }
}
}
FILLVALUE {
@@ -55,7 +55,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 9 235 }
+ PARAMS { 9 1 11 0 }
}
}
FILLVALUE {
@@ -77,7 +77,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 9 235 }
+ PARAMS { 9 1 11 0 }
}
}
FILLVALUE {
@@ -99,7 +99,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 9 235 }
+ PARAMS { 9 1 11 0 }
}
}
FILLVALUE {
@@ -121,7 +121,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 9 235 }
+ PARAMS { 9 1 11 0 }
}
}
FILLVALUE {
@@ -143,7 +143,7 @@ GROUP "/" {
USER_DEFINED_FILTER {
FILTER_ID 260
COMMENT dynlib4
- PARAMS { 9 1 9 235 }
+ PARAMS { 9 1 11 0 }
}
}
FILLVALUE {
diff --git a/tools/test/h5repack/testh5repack_detect_szip.c b/tools/test/h5repack/testh5repack_detect_szip.c
index e91b2f7..e08d5ab 100644
--- a/tools/test/h5repack/testh5repack_detect_szip.c
+++ b/tools/test/h5repack/testh5repack_detect_szip.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include <stdio.h>
diff --git a/tools/test/h5stat/CMakeLists.txt b/tools/test/h5stat/CMakeLists.txt
index 98cfed7..10ac5e0 100644
--- a/tools/test/h5stat/CMakeLists.txt
+++ b/tools/test/h5stat/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_H5STAT)
#-----------------------------------------------------------------------------
@@ -17,6 +17,6 @@ INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
set_target_properties (h5stat_gentest PROPERTIES FOLDER generator/tools)
#add_test (NAME h5stat_gentest COMMAND $<TARGET_FILE:h5stat_gentest>)
- endif (HDF5_BUILD_GENERATORS)
+ endif ()
include (CMakeTests.cmake)
diff --git a/tools/test/h5stat/CMakeTests.cmake b/tools/test/h5stat/CMakeTests.cmake
index 39faca0..cd52886 100644
--- a/tools/test/h5stat/CMakeTests.cmake
+++ b/tools/test/h5stat/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -25,7 +36,6 @@
h5stat_newgrat.ddl
h5stat_newgrat-UG.ddl
h5stat_newgrat-UA.ddl
- h5stat_idx.ddl
h5stat_err1_links.ddl
h5stat_links1.ddl
h5stat_links2.ddl
@@ -46,17 +56,16 @@
h5stat_filters.h5
h5stat_tsohm.h5
h5stat_newgrat.h5
- h5stat_idx.h5
h5stat_threshold.h5
)
foreach (ddl_file ${HDF5_REFERENCE_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5STAT_SOURCE_DIR}/testfiles/${ddl_file}" "${PROJECT_BINARY_DIR}/${ddl_file}" "h5stat_files")
- endforeach (ddl_file ${HDF5_REFERENCE_FILES})
+ endforeach ()
foreach (h5_file ${HDF5_REFERENCE_TEST_FILES})
HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_H5STAT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/${h5_file}" "h5stat_files")
- endforeach (h5_file ${HDF5_REFERENCE_TEST_FILES})
+ endforeach ()
add_custom_target(h5stat_files ALL COMMENT "Copying files needed by h5stat tests" DEPENDS ${h5stat_files_list})
##############################################################################
@@ -65,16 +74,16 @@
##############################################################################
##############################################################################
- MACRO (ADD_H5_TEST resultfile resultcode)
+ macro (ADD_H5_TEST resultfile resultcode)
# If using memchecker add tests without using scripts
if (HDF5_ENABLE_USING_MEMCHECKER)
add_test (NAME H5STAT-${resultfile} COMMAND $<TARGET_FILE:h5stat> ${ARGN})
if (NOT ${resultcode} STREQUAL "0")
set_tests_properties (H5STAT-${resultfile} PROPERTIES WILL_FAIL "true")
- endif (NOT ${resultcode} STREQUAL "0")
+ endif ()
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5STAT-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
else (HDF5_ENABLE_USING_MEMCHECKER)
add_test (
NAME H5STAT-${resultfile}
@@ -87,8 +96,8 @@
-D "TEST_REFERENCE=${resultfile}.ddl"
-P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST file)
+ endif ()
+ endmacro ()
##############################################################################
##############################################################################
@@ -167,9 +176,9 @@
)
if (NOT "${last_test}" STREQUAL "")
set_tests_properties (H5STAT-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
+ endif ()
set (last_test "H5STAT-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
+ endif ()
# Test for help flag
ADD_H5_TEST (h5stat_help1 0 -h)
@@ -196,8 +205,6 @@
ADD_H5_TEST (h5stat_newgrat 0 h5stat_newgrat.h5)
ADD_H5_TEST (h5stat_newgrat-UG 0 -G h5stat_newgrat.h5)
ADD_H5_TEST (h5stat_newgrat-UA 0 -A h5stat_newgrat.h5)
-# h5stat_idx.h5 is generated by h5stat_gentest.c
- ADD_H5_TEST (h5stat_idx 0 h5stat_idx.h5)
#
# Tests for -l (--links) option on h5stat_threshold.h5:
# -l 0 (incorrect threshold value)
diff --git a/tools/test/h5stat/Makefile.am b/tools/test/h5stat/Makefile.am
index 630e896..dd251f8 100644
--- a/tools/test/h5stat/Makefile.am
+++ b/tools/test/h5stat/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/test/h5stat/h5stat_gentest.c b/tools/test/h5stat/h5stat_gentest.c
index b1ab168..0f696d0 100644
--- a/tools/test/h5stat/h5stat_gentest.c
+++ b/tools/test/h5stat/h5stat_gentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -77,7 +75,7 @@ gen_newgrat_file(const char *fname)
goto error;
/* Set file space handling strategy */
- if(H5Pset_file_space(fcpl, H5F_FILE_SPACE_ALL_PERSIST, (hsize_t)0) < 0)
+ if(H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_FSM_AGGR, 1, (hsize_t)1) < 0)
goto error;
/* Create file */
diff --git a/tools/test/h5stat/testfiles/h5stat_filters.ddl b/tools/test/h5stat/testfiles/h5stat_filters.ddl
index 1a4fd72..9f9e146 100644
--- a/tools/test/h5stat/testfiles/h5stat_filters.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_filters.ddl
@@ -82,12 +82,14 @@ Small # of attributes (objects with 1 to 10 attributes):
Attribute bins:
Total # of objects with attributes: 0
Max. # of attributes to objects: 0
+Free-space persist: FALSE
Free-space section threshold: 1 bytes
Small size free-space sections (< 10 bytes):
Total # of small size sections: 0
Free-space section bins:
Total # of sections: 0
-File space management strategy: H5F_FILE_SPACE_ALL
+File space management strategy: H5F_FSPACE_STRATEGY_FSM_AGGR
+File space page size: 4096 bytes
Summary of file space information:
File metadata: 37312 bytes
Raw data: 8659 bytes
diff --git a/tools/test/h5stat/testfiles/h5stat_idx.ddl b/tools/test/h5stat/testfiles/h5stat_idx.ddl
index b26f1a4..1b6ae0c 100644
--- a/tools/test/h5stat/testfiles/h5stat_idx.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_idx.ddl
@@ -79,15 +79,17 @@ Small # of attributes (objects with 1 to 10 attributes):
Attribute bins:
Total # of objects with attributes: 0
Max. # of attributes to objects: 0
+Free-space persist: FALSE
Free-space section threshold: 1 bytes
Small size free-space sections (< 10 bytes):
Total # of small size sections: 0
Free-space section bins:
Total # of sections: 0
-File space management strategy: H5F_FILE_SPACE_ALL
+File space management strategy: H5F_FSPACE_STRATEGY_FSM_AGGR
+File space page size: 4096 bytes
Summary of file space information:
File metadata: 965 bytes
Raw data: 110 bytes
Amount/Percent of tracked free space: 0 bytes/0.0%
- Unaccounted space: 1131 bytes
-Total space: 2206 bytes
+ Unaccounted space: 1083 bytes
+Total space: 2158 bytes
diff --git a/tools/test/h5stat/testfiles/h5stat_idx.h5 b/tools/test/h5stat/testfiles/h5stat_idx.h5
index 303d1f8..83ebcdb 100644
--- a/tools/test/h5stat/testfiles/h5stat_idx.h5
+++ b/tools/test/h5stat/testfiles/h5stat_idx.h5
Binary files differ
diff --git a/tools/test/h5stat/testfiles/h5stat_links2.ddl b/tools/test/h5stat/testfiles/h5stat_links2.ddl
index 4622884..9fc82cd 100644
--- a/tools/test/h5stat/testfiles/h5stat_links2.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_links2.ddl
@@ -91,12 +91,14 @@ Attribute bins:
# of objects with 10 - 99 attributes: 3
Total # of objects with attributes: 5
Max. # of attributes to objects: 25
+Free-space persist: FALSE
Free-space section threshold: 1 bytes
Small size free-space sections (< 10 bytes):
Total # of small size sections: 0
Free-space section bins:
Total # of sections: 0
-File space management strategy: H5F_FILE_SPACE_ALL
+File space management strategy: H5F_FSPACE_STRATEGY_FSM_AGGR
+File space page size: 4096 bytes
Summary of file space information:
File metadata: 16128 bytes
Raw data: 0 bytes
diff --git a/tools/test/h5stat/testfiles/h5stat_newgrat.ddl b/tools/test/h5stat/testfiles/h5stat_newgrat.ddl
index e305f58..130fe2f 100644
--- a/tools/test/h5stat/testfiles/h5stat_newgrat.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_newgrat.ddl
@@ -9,7 +9,7 @@ File information
Max. # of objects in group: 35001
File space information for file metadata (in bytes):
Superblock: 48
- Superblock extension: 119
+ Superblock extension: 186
User block: 0
Object headers: (total/unused)
Groups: 5145147/3220092
@@ -78,6 +78,7 @@ Attribute bins:
# of objects with 100 - 999 attributes: 1
Total # of objects with attributes: 1
Max. # of attributes to objects: 100
+Free-space persist: TRUE
Free-space section threshold: 1 bytes
Small size free-space sections (< 10 bytes):
# of sections of size 1: 1
@@ -86,10 +87,11 @@ Free-space section bins:
# of sections of size 1 - 9: 1
# of sections of size 10 - 99: 4
Total # of sections: 5
-File space management strategy: H5F_FILE_SPACE_ALL_PERSIST
+File space management strategy: H5F_FSPACE_STRATEGY_FSM_AGGR
+File space page size: 4096 bytes
Summary of file space information:
- File metadata: 6362036 bytes
+ File metadata: 6362103 bytes
Raw data: 0 bytes
Amount/Percent of tracked free space: 132 bytes/0.0%
Unaccounted space: 0 bytes
-Total space: 6362168 bytes
+Total space: 6362235 bytes
diff --git a/tools/test/h5stat/testfiles/h5stat_newgrat.h5 b/tools/test/h5stat/testfiles/h5stat_newgrat.h5
index c919b71..0d68e79 100644
--- a/tools/test/h5stat/testfiles/h5stat_newgrat.h5
+++ b/tools/test/h5stat/testfiles/h5stat_newgrat.h5
Binary files differ
diff --git a/tools/test/h5stat/testfiles/h5stat_numattrs1.ddl b/tools/test/h5stat/testfiles/h5stat_numattrs1.ddl
index fb5568d..af53776 100644
--- a/tools/test/h5stat/testfiles/h5stat_numattrs1.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_numattrs1.ddl
@@ -8,7 +8,8 @@ Attribute bins:
# of objects with 10 - 99 attributes: 3
Total # of objects with attributes: 5
Max. # of attributes to objects: 25
-File space management strategy: H5F_FILE_SPACE_ALL
+File space management strategy: H5F_FSPACE_STRATEGY_FSM_AGGR
+File space page size: 4096 bytes
Summary of file space information:
File metadata: 16128 bytes
Raw data: 0 bytes
diff --git a/tools/test/h5stat/testfiles/h5stat_numattrs2.ddl b/tools/test/h5stat/testfiles/h5stat_numattrs2.ddl
index ccb23c1..638781b 100644
--- a/tools/test/h5stat/testfiles/h5stat_numattrs2.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_numattrs2.ddl
@@ -91,12 +91,14 @@ Attribute bins:
# of objects with 10 - 99 attributes: 3
Total # of objects with attributes: 5
Max. # of attributes to objects: 25
+Free-space persist: FALSE
Free-space section threshold: 1 bytes
Small size free-space sections (< 10 bytes):
Total # of small size sections: 0
Free-space section bins:
Total # of sections: 0
-File space management strategy: H5F_FILE_SPACE_ALL
+File space management strategy: H5F_FSPACE_STRATEGY_FSM_AGGR
+File space page size: 4096 bytes
Summary of file space information:
File metadata: 16128 bytes
Raw data: 0 bytes
diff --git a/tools/test/h5stat/testfiles/h5stat_tsohm.ddl b/tools/test/h5stat/testfiles/h5stat_tsohm.ddl
index 4cf33fc..9369950 100644
--- a/tools/test/h5stat/testfiles/h5stat_tsohm.ddl
+++ b/tools/test/h5stat/testfiles/h5stat_tsohm.ddl
@@ -76,12 +76,14 @@ Small # of attributes (objects with 1 to 10 attributes):
Attribute bins:
Total # of objects with attributes: 0
Max. # of attributes to objects: 0
+Free-space persist: FALSE
Free-space section threshold: 1 bytes
Small size free-space sections (< 10 bytes):
Total # of small size sections: 0
Free-space section bins:
Total # of sections: 0
-File space management strategy: H5F_FILE_SPACE_ALL
+File space management strategy: H5F_FSPACE_STRATEGY_FSM_AGGR
+File space page size: 4096 bytes
Summary of file space information:
File metadata: 3850 bytes
Raw data: 0 bytes
diff --git a/tools/test/h5stat/testh5stat.sh.in b/tools/test/h5stat/testh5stat.sh.in
index e94f9bd..ca7ca4c 100644
--- a/tools/test/h5stat/testh5stat.sh.in
+++ b/tools/test/h5stat/testh5stat.sh.in
@@ -6,18 +6,16 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5stat tool
#
# Modifcations:
-# Vailin Choi; July 2013
-# Add tests for -l, -m, -a options
+# Vailin Choi; July 2013
+# Add tests for -l, -m, -a options
#
srcdir=@srcdir@
@@ -134,10 +132,10 @@ COPY_TESTFILES_TO_TESTDIR()
INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
- $CP -f $tstfile $TESTDIR
+ $CP -f $tstfile $TESTDIR
if [ $? -ne 0 ]; then
echo "Error: FAILED to copy $tstfile ."
-
+
# Comment out this to CREATE expected file
exit $EXIT_FAILURE
fi
@@ -201,9 +199,11 @@ TOOLTEST() {
cat $actual_err >> $actual
if [ ! -f $expect ]; then
- # Create the expect file if it doesn't yet exist.
- echo " CREATED"
- cp $actual $expect
+ # Create the expect file if it doesn't yet exist.
+ echo " CREATED"
+ cp $actual $expect
+ echo " Expected result (*.ddl) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -222,15 +222,15 @@ TOOLTEST() {
# Print a "SKIP" message
SKIP() {
- TESTING $STAT $@
- echo " -SKIP-"
+ TESTING $STAT $@
+ echo " -SKIP-"
}
-
+
##############################################################################
##############################################################################
-### T H E T E S T S ###
+### T H E T E S T S ###
##############################################################################
##############################################################################
# prepare for test
@@ -254,7 +254,7 @@ TOOLTEST h5stat_filters-dT.ddl -dT h5stat_filters.h5
TOOLTEST h5stat_filters-UD.ddl -D h5stat_filters.h5
TOOLTEST h5stat_filters-UT.ddl -T h5stat_filters.h5
#
-# h5stat_tsohm.h5 is a copy of ../../../test/tsohm.h5 generated by tsohm.c
+# h5stat_tsohm.h5 is a copy of ../../../test/tsohm.h5 generated by tsohm.c
# as of release 1.8.7-snap0 (on a 64-bit machine)
TOOLTEST h5stat_tsohm.ddl h5stat_tsohm.h5
# h5stat_newgrat.h5 is generated by h5stat_gentest.c
@@ -264,36 +264,36 @@ TOOLTEST h5stat_newgrat-UA.ddl -A h5stat_newgrat.h5
# h5stat_idx.h5 is generated by h5stat_gentest.c
TOOLTEST h5stat_idx.ddl h5stat_idx.h5
#
-# Tests for -l (--links) option on h5stat_threshold.h5:
-# -l 0 (incorrect threshold value)
-# -g -l 8
-# --links=8
-# --links=20 -g
+# Tests for -l (--links) option on h5stat_threshold.h5:
+# -l 0 (incorrect threshold value)
+# -g -l 8
+# --links=8
+# --links=20 -g
TOOLTEST h5stat_err1_links.ddl -l 0 h5stat_threshold.h5
TOOLTEST h5stat_links1.ddl -g -l 8 h5stat_threshold.h5
TOOLTEST h5stat_links2.ddl --links=8 h5stat_threshold.h5
TOOLTEST h5stat_links3.ddl --links=20 -g h5stat_threshold.h5
#
-# Tests for -l (--links) option on h5stat_newgrat.h5:
-# -g
-# -g -l 40000
+# Tests for -l (--links) option on h5stat_newgrat.h5:
+# -g
+# -g -l 40000
TOOLTEST h5stat_links4.ddl -g h5stat_newgrat.h5
TOOLTEST h5stat_links5.ddl -g -l 40000 h5stat_newgrat.h5
#
# Tests for -m (--dims) option on h5stat_threshold.h5
-# -d --dims=-1 (incorrect threshold value)
-# -gd -m 5
-# -d --di=15
+# -d --dims=-1 (incorrect threshold value)
+# -gd -m 5
+# -d --di=15
TOOLTEST h5stat_err1_dims.ddl -d --dims=-1 h5stat_threshold.h5
TOOLTEST h5stat_dims1.ddl -gd -m 5 h5stat_threshold.h5
TOOLTEST h5stat_dims2.ddl -d --di=15 h5stat_threshold.h5
#
# Tests for -a option on h5stat_threshold.h5
-# -a -2 (incorrect threshold value)
-# --numattrs (without threshold value)
-# -AS -a 10
-# -a 1
-# -A --numattrs=25
+# -a -2 (incorrect threshold value)
+# --numattrs (without threshold value)
+# -AS -a 10
+# -a 1
+# -A --numattrs=25
TOOLTEST h5stat_err1_numattrs.ddl -a -2 h5stat_threshold.h5
TOOLTEST h5stat_err2_numattrs.ddl --numattrs h5stat_threshold.h5
TOOLTEST h5stat_numattrs1.ddl -AS -a 10 h5stat_threshold.h5
@@ -301,7 +301,7 @@ TOOLTEST h5stat_numattrs2.ddl -a 1 h5stat_threshold.h5
TOOLTEST h5stat_numattrs3.ddl -A --numattrs=25 h5stat_threshold.h5
#
# Tests for -a option on h5stat_newgrat.h5
-# -A -a 100
+# -A -a 100
TOOLTEST h5stat_numattrs4.ddl -A -a 100 h5stat_newgrat.h5
#
diff --git a/tools/test/misc/CMakeLists.txt b/tools/test/misc/CMakeLists.txt
index a53fa73..5e3c0a2 100644
--- a/tools/test/misc/CMakeLists.txt
+++ b/tools/test/misc/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_MISC)
#-----------------------------------------------------------------------------
@@ -17,9 +17,16 @@ INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
set_target_properties (h5repart_gentest PROPERTIES FOLDER generator/tools)
#add_test (NAME h5repart_gentest COMMAND $<TARGET_FILE:h5repart_gentest>)
+ add_executable (h5clear_gentest ${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/h5clear_gentest.c)
+ TARGET_NAMING (h5clear_gentest STATIC)
+ TARGET_C_PROPERTIES (h5clear_gentest STATIC " " " ")
+ target_link_libraries (h5clear_gentest ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET})
+ set_target_properties (h5clear_gentest PROPERTIES FOLDER tools)
+ #add_test (NAME H5CLEAR-h5clear_gentest COMMAND $<TARGET_FILE:h5clear_gentest>)
+
add_subdirectory (${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/vds)
- endif (HDF5_BUILD_GENERATORS)
+ endif ()
add_executable (h5repart_test ${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/repart_test.c)
TARGET_NAMING (h5repart_test STATIC)
@@ -27,16 +34,12 @@ INCLUDE_DIRECTORIES (${HDF5_TOOLS_DIR}/lib)
target_link_libraries (h5repart_test ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET})
set_target_properties (h5repart_test PROPERTIES FOLDER tools)
- add_executable (h5clear_gentest ${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/h5clear_gentest.c)
- TARGET_NAMING (h5clear_gentest STATIC)
- TARGET_C_PROPERTIES (h5clear_gentest STATIC " " " ")
- target_link_libraries (h5clear_gentest ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET})
- set_target_properties (h5clear_gentest PROPERTIES FOLDER tools)
-
add_executable (clear_open_chk ${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/clear_open_chk.c)
TARGET_NAMING (clear_open_chk STATIC)
TARGET_C_PROPERTIES (clear_open_chk STATIC " " " ")
target_link_libraries (clear_open_chk ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET})
set_target_properties (clear_open_chk PROPERTIES FOLDER tools)
- include (CMakeTests.cmake)
+ include (CMakeTestsRepart.cmake)
+ include (CMakeTestsClear.cmake)
+ include (CMakeTestsMkgrp.cmake)
diff --git a/tools/test/misc/CMakeTests.cmake b/tools/test/misc/CMakeTests.cmake
deleted file mode 100644
index 9835e1e..0000000
--- a/tools/test/misc/CMakeTests.cmake
+++ /dev/null
@@ -1,340 +0,0 @@
-
-##############################################################################
-##############################################################################
-### T E S T I N G ###
-##############################################################################
-##############################################################################
-
- # --------------------------------------------------------------------
- # Copy all the HDF5 files from the source directory into the test directory
- # --------------------------------------------------------------------
- set (HDF5_REFERENCE_TEST_FILES
- family_file00000.h5
- family_file00001.h5
- family_file00002.h5
- family_file00003.h5
- family_file00004.h5
- family_file00005.h5
- family_file00006.h5
- family_file00007.h5
- family_file00008.h5
- family_file00009.h5
- family_file00010.h5
- family_file00011.h5
- family_file00012.h5
- family_file00013.h5
- family_file00014.h5
- family_file00015.h5
- family_file00016.h5
- family_file00017.h5
- )
-
- foreach (h5_file ${HDF5_REFERENCE_TEST_FILES})
- HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/${h5_file}" "h5repart_files")
- endforeach (h5_file ${HDF5_REFERENCE_TEST_FILES})
- add_custom_target(h5repart_files ALL COMMENT "Copying files needed by h5repart tests" DEPENDS ${h5repart_files_list})
-
- set (HDF5_MKGRP_TEST_FILES
- #h5mkgrp_help.txt
- #h5mkgrp_version
- h5mkgrp_single.ls
- h5mkgrp_single_v.ls
- h5mkgrp_single_p.ls
- h5mkgrp_single_l.ls
- h5mkgrp_several.ls
- h5mkgrp_several_v.ls
- h5mkgrp_several_p.ls
- h5mkgrp_several_l.ls
- h5mkgrp_nested_p.ls
- h5mkgrp_nested_lp.ls
- h5mkgrp_nested_mult_p.ls
- h5mkgrp_nested_mult_lp.ls
- )
-
- # make test dir
- file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
-
- foreach (h5_mkgrp_file ${HDF5_MKGRP_TEST_FILES})
- HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/${h5_mkgrp_file}" "${PROJECT_BINARY_DIR}/testfiles/${h5_mkgrp_file}" "h5mkgrp_files")
- endforeach (h5_mkgrp_file ${HDF5_MKGRP_TEST_FILES})
-
- HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/testfiles/h5mkgrp_help.txt" "${PROJECT_BINARY_DIR}/testfiles/h5mkgrp_help.txt" "h5mkgrp_files")
- add_custom_target(h5mkgrp_files ALL COMMENT "Copying files needed by h5mkgrp tests" DEPENDS ${h5mkgrp_files_list})
-
- configure_file (${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/testfiles/h5mkgrp_version.txt.in ${PROJECT_BINARY_DIR}/testfiles/h5mkgrp_version.txt @ONLY)
-
-##############################################################################
-##############################################################################
-### T H E T E S T S M A C R O S ###
-##############################################################################
-##############################################################################
-
- MACRO (ADD_H5_TEST resultfile resultcode resultoption)
- if (NOT HDF5_ENABLE_USING_MEMCHECKER)
- add_test (
- NAME H5MKGRP-${resultfile}-clear-objects
- COMMAND ${CMAKE_COMMAND}
- -E remove
- ${resultfile}.h5
- )
- set_tests_properties (H5MKGRP-${resultfile}-clear-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
- endif (NOT HDF5_ENABLE_USING_MEMCHECKER)
-
- add_test (
- NAME H5MKGRP-${resultfile}
- COMMAND $<TARGET_FILE:h5mkgrp> ${resultoption} ${resultfile}.h5 ${ARGN}
- )
- set_tests_properties (H5MKGRP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
- if (HDF5_ENABLE_USING_MEMCHECKER)
- if (NOT "${last_test}" STREQUAL "")
- set_tests_properties (H5MKGRP-${resultfile} PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- else (HDF5_ENABLE_USING_MEMCHECKER)
- set_tests_properties (H5MKGRP-${resultfile} PROPERTIES DEPENDS H5MKGRP-${resultfile}-clear-objects)
- add_test (
- NAME H5MKGRP-${resultfile}-h5ls
- COMMAND "${CMAKE_COMMAND}"
- -D "TEST_PROGRAM=$<TARGET_FILE:h5ls>"
- -D "TEST_ARGS:STRING=-v;-r;${resultfile}.h5"
- -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
- -D "TEST_OUTPUT=${resultfile}.out"
- -D "TEST_EXPECT=${resultcode}"
- -D "TEST_MASK_MOD=true"
- -D "TEST_REFERENCE=${resultfile}.ls"
- -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
- )
- set_tests_properties (H5MKGRP-${resultfile}-h5ls PROPERTIES DEPENDS H5MKGRP-${resultfile})
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_TEST resultfile resultcode resultoption)
-
- MACRO (ADD_H5_CMP resultfile resultcode)
- if (HDF5_ENABLE_USING_MEMCHECKER)
- add_test (NAME H5MKGRP_CMP-${resultfile} COMMAND $<TARGET_FILE:h5mkgrp> ${ARGN})
- else (HDF5_ENABLE_USING_MEMCHECKER)
- add_test (
- NAME H5MKGRP_CMP-${resultfile}-clear-objects
- COMMAND ${CMAKE_COMMAND}
- -E remove
- ${resultfile}.h5
- )
- set_tests_properties (H5MKGRP_CMP-${resultfile}-clear-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
- add_test (
- NAME H5MKGRP_CMP-${resultfile}
- COMMAND "${CMAKE_COMMAND}"
- -D "TEST_PROGRAM=$<TARGET_FILE:h5mkgrp>"
- -D "TEST_ARGS:STRING=${ARGN}"
- -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
- -D "TEST_OUTPUT=${resultfile}.out"
- -D "TEST_EXPECT=${resultcode}"
- -D "TEST_REFERENCE=${resultfile}.txt"
- -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
- )
- set_tests_properties (H5MKGRP_CMP-${resultfile} PROPERTIES DEPENDS H5MKGRP_CMP-${resultfile}-clear-objects)
- endif (HDF5_ENABLE_USING_MEMCHECKER)
- ENDMACRO (ADD_H5_CMP resultfile resultcode)
-
-##############################################################################
-##############################################################################
-### T H E T E S T S ###
-##############################################################################
-##############################################################################
-
- ###################### H5REPART #########################
- # Remove any output file left over from previous test run
- add_test (
- NAME H5REPART-clearall-objects
- COMMAND ${CMAKE_COMMAND}
- -E remove
- fst_family00000.h5
- scd_family00000.h5
- scd_family00001.h5
- scd_family00002.h5
- scd_family00003.h5
- family_to_sec2.h5
- )
- if (NOT "${last_test}" STREQUAL "")
- set_tests_properties (H5REPART-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- set (last_test "H5REPART-clearall-objects")
-
- # repartition family member size to 20,000 bytes.
- add_test (NAME H5REPART-h5repart_20K COMMAND $<TARGET_FILE:h5repart> -m 20000 family_file%05d.h5 fst_family%05d.h5)
- set_tests_properties (H5REPART-h5repart_20K PROPERTIES DEPENDS H5REPART-clearall-objects)
-
- # repartition family member size to 5 KB.
- add_test (NAME H5REPART-h5repart_5K COMMAND $<TARGET_FILE:h5repart> -m 5k family_file%05d.h5 scd_family%05d.h5)
- set_tests_properties (H5REPART-h5repart_5K PROPERTIES DEPENDS H5REPART-clearall-objects)
-
- # convert family file to sec2 file of 20,000 bytes
- add_test (NAME H5REPART-h5repart_sec2 COMMAND $<TARGET_FILE:h5repart> -m 20000 -family_to_sec2 family_file%05d.h5 family_to_sec2.h5)
- set_tests_properties (H5REPART-h5repart_sec2 PROPERTIES DEPENDS H5REPART-clearall-objects)
-
- # test the output files repartitioned above.
- add_test (NAME H5REPART-h5repart_test COMMAND $<TARGET_FILE:h5repart_test>)
- set_tests_properties (H5REPART-h5repart_test PROPERTIES DEPENDS "H5REPART-clearall-objects;H5REPART-h5repart_20K;H5REPART-h5repart_5K;H5REPART-h5repart_sec2")
-
- set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES}
- h5repart_test
- )
-
- ###################### H5CLEAR #########################
- # Remove any output file left over from previous test run
- add_test (
- NAME H5CLEAR-clearall-objects
- COMMAND ${CMAKE_COMMAND}
- -E remove
- h5clear_log_v3.h5
- h5clear_sec2_v0.h5
- h5clear_sec2_v2.h5
- h5clear_sec2_v3.h5
- latest_h5clear_log_v3.h5
- latest_h5clear_sec2_v3.h5
- )
- if (NOT "${last_test}" STREQUAL "")
- set_tests_properties (H5CLEAR-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- set (last_test "H5CLEAR-clearall-objects")
-
- # create the output files to be used.
- add_test (NAME H5CLEAR-h5clear_gentest COMMAND $<TARGET_FILE:h5clear_gentest>)
- set_tests_properties (H5CLEAR-h5clear_gentest PROPERTIES DEPENDS "H5CLEAR-clearall-objects")
-
- # Initial file open fails
- add_test (NAME H5CLEAR-clear_open_chk-sec2_v3_F COMMAND $<TARGET_FILE:clear_open_chk> h5clear_sec2_v3.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-sec2_v3_F PROPERTIES WILL_FAIL "true")
- set_tests_properties (H5CLEAR-clear_open_chk-sec2_v3_F PROPERTIES DEPENDS H5CLEAR-h5clear_gentest)
- # After "h5clear" the file, the subsequent file open succeeds
- add_test (NAME H5CLEAR-h5clear-sec2_v3 COMMAND $<TARGET_FILE:h5clear> h5clear_sec2_v3.h5)
- set_tests_properties (H5CLEAR-h5clear-sec2_v3 PROPERTIES DEPENDS H5CLEAR-clear_open_chk-sec2_v3_F)
- add_test (NAME H5CLEAR-clear_open_chk-sec2_v3 COMMAND $<TARGET_FILE:clear_open_chk> h5clear_sec2_v3.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-sec2_v3 PROPERTIES DEPENDS H5CLEAR-h5clear-sec2_v3)
-
- # Initial file open fails
- add_test (NAME H5CLEAR-clear_open_chk-log_v3_F COMMAND $<TARGET_FILE:clear_open_chk> h5clear_log_v3.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-log_v3_F PROPERTIES WILL_FAIL "true")
- set_tests_properties (H5CLEAR-clear_open_chk-log_v3_F PROPERTIES DEPENDS H5CLEAR-h5clear_gentest)
- # After "h5clear" the file, the subsequent file open succeeds
- add_test (NAME H5CLEAR-h5clear-log_v3 COMMAND $<TARGET_FILE:h5clear> h5clear_log_v3.h5)
- set_tests_properties (H5CLEAR-h5clear-log_v3 PROPERTIES DEPENDS H5CLEAR-clear_open_chk-log_v3_F)
- add_test (NAME H5CLEAR-clear_open_chk-log_v3 COMMAND $<TARGET_FILE:clear_open_chk> h5clear_log_v3.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-log_v3 PROPERTIES DEPENDS H5CLEAR-h5clear-log_v3)
-
- # Initial file open fails
- add_test (NAME H5CLEAR-clear_open_chk-latest_sec2_v3_F COMMAND $<TARGET_FILE:clear_open_chk> latest_h5clear_sec2_v3.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-latest_sec2_v3_F PROPERTIES WILL_FAIL "true")
- set_tests_properties (H5CLEAR-clear_open_chk-latest_sec2_v3_F PROPERTIES DEPENDS H5CLEAR-h5clear_gentest)
- # After "h5clear" the file, the subsequent file open succeeds
- add_test (NAME H5CLEAR-h5clear-latest_sec2_v3 COMMAND $<TARGET_FILE:h5clear> latest_h5clear_sec2_v3.h5)
- set_tests_properties (H5CLEAR-h5clear-latest_sec2_v3 PROPERTIES DEPENDS H5CLEAR-clear_open_chk-latest_sec2_v3_F)
- add_test (NAME H5CLEAR-clear_open_chk-latest_sec2_v3 COMMAND $<TARGET_FILE:clear_open_chk> latest_h5clear_sec2_v3.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-latest_sec2_v3 PROPERTIES DEPENDS H5CLEAR-h5clear-latest_sec2_v3)
-
- # Initial file open fails
- add_test (NAME H5CLEAR-clear_open_chk-latest_log_v3_F COMMAND $<TARGET_FILE:clear_open_chk> latest_h5clear_log_v3.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-latest_log_v3_F PROPERTIES WILL_FAIL "true")
- set_tests_properties (H5CLEAR-clear_open_chk-latest_log_v3_F PROPERTIES DEPENDS H5CLEAR-h5clear_gentest)
- # After "h5clear" the file, the subsequent file open succeeds
- add_test (NAME H5CLEAR-h5clear-latest_log_v3 COMMAND $<TARGET_FILE:h5clear> latest_h5clear_log_v3.h5)
- set_tests_properties (H5CLEAR-h5clear-latest_log_v3 PROPERTIES DEPENDS H5CLEAR-clear_open_chk-latest_log_v3_F)
- add_test (NAME H5CLEAR-clear_open_chk-latest_log_v3 COMMAND $<TARGET_FILE:clear_open_chk> latest_h5clear_log_v3.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-latest_log_v3 PROPERTIES DEPENDS H5CLEAR-h5clear-latest_log_v3)
-
- #
- # File open succeeds because the library does not check status_flags for file with < v3 superblock
- add_test (NAME H5CLEAR-clear_open_chk-sec2_v0_P COMMAND $<TARGET_FILE:clear_open_chk> h5clear_sec2_v0.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-sec2_v0_P PROPERTIES DEPENDS H5CLEAR-h5clear_gentest)
- # After "h5clear" the file, the subsequent file open succeeds
- add_test (NAME H5CLEAR-h5clear-sec2_v0 COMMAND $<TARGET_FILE:h5clear> h5clear_sec2_v0.h5)
- set_tests_properties (H5CLEAR-h5clear-sec2_v0 PROPERTIES DEPENDS H5CLEAR-clear_open_chk-sec2_v0_P)
- add_test (NAME H5CLEAR-clear_open_chk-sec2_v0 COMMAND $<TARGET_FILE:clear_open_chk> h5clear_sec2_v0.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-sec2_v0 PROPERTIES DEPENDS H5CLEAR-h5clear-sec2_v0)
-
- #
- # File open succeeds because the library does not check status_flags for file with < v3 superblock
- add_test (NAME H5CLEAR-clear_open_chk-sec2_v2_P COMMAND $<TARGET_FILE:clear_open_chk> h5clear_sec2_v2.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-sec2_v2_P PROPERTIES DEPENDS H5CLEAR-h5clear_gentest)
- # After "h5clear" the file, the subsequent file open succeeds
- add_test (NAME H5CLEAR-h5clear-sec2_v2 COMMAND $<TARGET_FILE:h5clear> h5clear_sec2_v2.h5)
- set_tests_properties (H5CLEAR-h5clear-sec2_v2 PROPERTIES DEPENDS H5CLEAR-clear_open_chk-sec2_v2_P)
- add_test (NAME H5CLEAR-clear_open_chk-sec2_v2 COMMAND $<TARGET_FILE:clear_open_chk> h5clear_sec2_v2.h5)
- set_tests_properties (H5CLEAR-clear_open_chk-sec2_v2 PROPERTIES DEPENDS H5CLEAR-h5clear-sec2_v2)
-
- set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES}
- h5clear_gentest
- )
-
- ###################### H5MKGRP #########################
- if (HDF5_ENABLE_USING_MEMCHECKER)
- add_test (
- NAME H5MKGRP-clearall-objects
- COMMAND ${CMAKE_COMMAND}
- -E remove
- h5mkgrp_help.out
- h5mkgrp_help.out.err
- h5mkgrp_version.out
- h5mkgrp_version.out.err
- h5mkgrp_single.h5
- h5mkgrp_single.out
- h5mkgrp_single.out.err
- h5mkgrp_single_v.h5
- h5mkgrp_single_v.out
- h5mkgrp_single_v.out.err
- h5mkgrp_single_p.h5
- h5mkgrp_single_p.out
- h5mkgrp_single_p.out.err
- h5mkgrp_single_l.h5
- h5mkgrp_single_l.out
- h5mkgrp_single_l.out.err
- h5mkgrp_several.h5
- h5mkgrp_several.out
- h5mkgrp_several.out.err
- h5mkgrp_several_v.h5
- h5mkgrp_several_v.out
- h5mkgrp_several_v.out.err
- h5mkgrp_several_p.h5
- h5mkgrp_several_p.out
- h5mkgrp_several_p.out.err
- h5mkgrp_several_l.h5
- h5mkgrp_several_l.out
- h5mkgrp_several_l.out.err
- h5mkgrp_nested_p.h5
- h5mkgrp_nested_p.out
- h5mkgrp_nested_p.out.err
- h5mkgrp_nested_lp.h5
- h5mkgrp_nested_lp.out
- h5mkgrp_nested_lp.out.err
- h5mkgrp_nested_mult_p.h5
- h5mkgrp_nested_mult_p.out
- h5mkgrp_nested_mult_p.out.err
- h5mkgrp_nested_mult_lp.h5
- h5mkgrp_nested_mult_lp.out
- h5mkgrp_nested_mult_lp.out.err
- )
- set_tests_properties (H5MKGRP-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
- if (NOT "${last_test}" STREQUAL "")
- set_tests_properties (H5MKGRP-clearall-objects PROPERTIES DEPENDS ${last_test})
- endif (NOT "${last_test}" STREQUAL "")
- set (last_test "H5MKGRP-clearall-objects")
- endif (HDF5_ENABLE_USING_MEMCHECKER)
-
- # Check that help & version is displayed properly
- ADD_H5_CMP (h5mkgrp_help 0 "-h")
- ADD_H5_CMP (h5mkgrp_version 0 "-V")
-
- # Create single group at root level
- ADD_H5_TEST (h5mkgrp_single 0 "" single)
- ADD_H5_TEST (h5mkgrp_single_v 0 "-v" single)
- ADD_H5_TEST (h5mkgrp_single_p 0 "-p" single)
- ADD_H5_TEST (h5mkgrp_single_l 0 "-l" latest)
-
- # Create several groups at root level
- ADD_H5_TEST (h5mkgrp_several 0 "" one two)
- ADD_H5_TEST (h5mkgrp_several_v 0 "-v" one two)
- ADD_H5_TEST (h5mkgrp_several_p 0 "-p" one two)
- ADD_H5_TEST (h5mkgrp_several_l 0 "-l" one two)
-
- # Create various nested groups
- ADD_H5_TEST (h5mkgrp_nested_p 0 "-p" /one/two)
- ADD_H5_TEST (h5mkgrp_nested_lp 0 "-lp" /one/two)
- ADD_H5_TEST (h5mkgrp_nested_mult_p 0 "-p" /one/two /three/four)
- ADD_H5_TEST (h5mkgrp_nested_mult_lp 0 "-lp" /one/two /three/four)
diff --git a/tools/test/misc/CMakeTestsClear.cmake b/tools/test/misc/CMakeTestsClear.cmake
new file mode 100644
index 0000000..7eba4a1
--- /dev/null
+++ b/tools/test/misc/CMakeTestsClear.cmake
@@ -0,0 +1,264 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+##############################################################################
+##############################################################################
+### T E S T I N G ###
+##############################################################################
+##############################################################################
+
+ # --------------------------------------------------------------------
+ # Copy all the HDF5 files from the source directory into the test directory
+ # --------------------------------------------------------------------
+ set (HDF5_TEST_FILES
+ h5clear_log_v3.h5
+ h5clear_mdc_image.h5
+ mod_h5clear_mdc_image.h5
+ latest_h5clear_log_v3.h5
+ latest_h5clear_sec2_v3.h5
+ )
+ set (HDF5_SEC2_TEST_FILES
+ h5clear_sec2_v0.h5
+ h5clear_sec2_v2.h5
+ h5clear_sec2_v3.h5
+ )
+ set (HDF5_REFERENCE_TEST_FILES
+ h5clear_usage.ddl
+ h5clear_open_fail.ddl
+ h5clear_missing_file.ddl
+ h5clear_no_mdc_image.ddl
+ )
+
+ foreach (h5_file ${HDF5_TEST_FILES} ${HDF5_SEC2_TEST_FILES} ${HDF5_REFERENCE_TEST_FILES})
+ HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/testfiles/${h5_file}" "h5clear_files")
+ endforeach ()
+ # make second copy of h5clear_sec2.h5
+ foreach (h5_file ${HDF5_SEC2_TEST_FILES})
+ HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/testfiles/orig_${h5_file}" "h5clear_files")
+ endforeach ()
+ # make second copy of mod_h5clear_mdc_image.h5
+ HDFTEST_COPY_FILE("${PROJECT_SOURCE_DIR}/testfiles/mod_h5clear_mdc_image.h5" "${PROJECT_BINARY_DIR}/testfiles/mod_h5clear_mdc_image2.h5" "h5clear_files")
+ add_custom_target(h5clear_files ALL COMMENT "Copying files needed by h5clear tests" DEPENDS ${h5clear_files_list})
+
+##############################################################################
+##############################################################################
+### T H E T E S T S M A C R O S ###
+##############################################################################
+##############################################################################
+
+ macro (ADD_H5_CMP testname resultfile resultcode)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (
+ NAME H5CLEAR_CMP-${testname}-clear-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ ${testname}.out
+ ${testname}.out.err
+ )
+ add_test (
+ NAME H5CLEAR_CMP-${testname}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:h5clear>"
+ -D "TEST_ARGS:STRING=${ARGN}"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
+ -D "TEST_OUTPUT=${testname}.out"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_REFERENCE=${resultfile}.ddl"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ set_tests_properties (H5CLEAR_CMP-${testname} PROPERTIES DEPENDS H5CLEAR_CMP-${testname}-clear-objects)
+ endif ()
+ endmacro ()
+
+ macro (ADD_H5_RETTEST testname resultcode)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (
+ NAME H5CLEAR_RET-${testname}
+ COMMAND $<TARGET_FILE:h5clear> ${ARGN}
+ )
+ set_tests_properties (H5CLEAR_RET-${testname} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ set_tests_properties (H5CLEAR_RET-${testname} PROPERTIES WILL_FAIL "${resultcode}")
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5CLEAR_RET-${testname} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5CLEAR_RET-${testname}")
+ endif ()
+ endmacro ()
+
+ macro (ADD_H5_TEST testname resultcode)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (
+ NAME H5CLEAR-clear_open_chk-copy_${testname}.h5
+ COMMAND ${CMAKE_COMMAND}
+ -E copy_if_different
+ "${PROJECT_SOURCE_DIR}/testfiles/${testname}.h5" "${PROJECT_BINARY_DIR}/testfiles/${testname}.h5"
+ )
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5CLEAR-clear_open_chk-copy_${testname}.h5 PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5CLEAR-clear_open_chk-copy_${testname}.h5")
+ # Initial file open fails OR
+ # File open succeeds because the library does not check status_flags for file with < v3 superblock
+ add_test (NAME H5CLEAR-clear_open_chk-${testname}_${resultcode} COMMAND $<TARGET_FILE:clear_open_chk> ${testname}.h5)
+ set_tests_properties (H5CLEAR-clear_open_chk-${testname}_${resultcode} PROPERTIES WILL_FAIL "${resultcode}")
+ set_tests_properties (H5CLEAR-clear_open_chk-${testname}_${resultcode} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5CLEAR-clear_open_chk-${testname}_${resultcode} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ # After "h5clear" the file, the subsequent file open succeeds
+ add_test (NAME H5CLEAR-h5clear-${testname} COMMAND $<TARGET_FILE:h5clear> -s ${testname}.h5)
+ set_tests_properties (H5CLEAR-h5clear-${testname} PROPERTIES DEPENDS H5CLEAR-clear_open_chk-${testname}_${resultcode})
+ set_tests_properties (H5CLEAR-h5clear-${testname} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ add_test (NAME H5CLEAR-clear_open_chk-${testname} COMMAND $<TARGET_FILE:clear_open_chk> ${testname}.h5)
+ set_tests_properties (H5CLEAR-clear_open_chk-${testname} PROPERTIES DEPENDS H5CLEAR-h5clear-${testname})
+ set_tests_properties (H5CLEAR-clear_open_chk-${testname} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ set (last_test "H5CLEAR-clear_open_chk-${testname}")
+ endif ()
+ endmacro ()
+
+##############################################################################
+##############################################################################
+### T H E T E S T S ###
+##############################################################################
+##############################################################################
+#
+#
+#
+# The following are tests to verify the status_flags field is cleared properly:
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ # Remove any output file left over from previous test run
+ add_test (
+ NAME H5CLEAR-clearall-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ h5clear_log_v3.h5
+ h5clear_mdc_image.h5
+ h5clear_sec2_v0.h5
+ h5clear_sec2_v2.h5
+ h5clear_sec2_v3.h5
+ orig_h5clear_sec2_v0.h5
+ orig_h5clear_sec2_v2.h5
+ orig_h5clear_sec2_v3.h5
+ latest_h5clear_log_v3.h5
+ latest_h5clear_sec2_v3.h5
+ mod_h5clear_mdc_image.h5
+ mod_h5clear_mdc_image2.h5
+ )
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5CLEAR-clearall-objects PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5CLEAR-clearall-objects")
+
+ foreach (h5_file ${HDF5_TEST_FILES} ${HDF5_SEC2_TEST_FILES})
+ add_test (
+ NAME H5CLEAR-copy_${h5_file}
+ COMMAND ${CMAKE_COMMAND}
+ -E copy_if_different
+ "${PROJECT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/testfiles/${h5_file}"
+ )
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5CLEAR-copy_${h5_file} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5CLEAR-copy_${h5_file}")
+ endforeach ()
+ # make second copy of h5clear_sec2.h5
+ foreach (h5_file ${HDF5_SEC2_TEST_FILES})
+ add_test (
+ NAME H5CLEAR-copy_orig_${h5_file}
+ COMMAND ${CMAKE_COMMAND}
+ -E copy_if_different
+ "${PROJECT_SOURCE_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/testfiles/orig_${h5_file}"
+ )
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5CLEAR-copy_orig_${h5_file} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5CLEAR-copy_orig_${h5_file}")
+ endforeach ()
+ # make second copy of mod_h5clear_mdc_image.h5
+ add_test (
+ NAME H5CLEAR-copy_mod_h5clear_mdc_image2.h5
+ COMMAND ${CMAKE_COMMAND}
+ -E copy_if_different
+ "${PROJECT_SOURCE_DIR}/testfiles/mod_h5clear_mdc_image.h5" "${PROJECT_BINARY_DIR}/testfiles/mod_h5clear_mdc_image2.h5"
+ )
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5CLEAR-copy_mod_h5clear_mdc_image2.h5 PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5CLEAR-copy_mod_h5clear_mdc_image2.h5")
+endif()
+
+#
+#
+#
+# The following are tests to verify the expected output from h5clear
+# "h5clear -h"
+# "h5clear" (no options, no file)
+# "h5clear junk.h5" (no options, nonexisting file)
+# "h5clear orig_h5clear_sec2_v3.h5" (no options, existing file)
+# "h5clear -m" (valid 1 option, no file)
+# "h5clear -s junk.h5" (valid 1 option, nonexisting file)
+# "h5clear -m -s" (valid 2 options, no file)
+# "h5clear -m -s junk.h5" (valid 2 options, nonexisting file)
+# "h5clear -m orig_h5clear_sec2_v2.h5" (valid 1 option, existing file, no cache image)
+# "h5clear -s -m orig_h5clear_sec2_v0.h5" (valid 2 options, existing file, no cache image)
+ ADD_H5_CMP (h5clear_usage_h h5clear_usage 0 "-h")
+ ADD_H5_CMP (h5clear_usage h5clear_usage 1 "")
+ ADD_H5_CMP (h5clear_usage_junk h5clear_usage 1 "" junk.h5)
+ ADD_H5_CMP (h5clear_usage_none h5clear_usage 1 "" orig_h5clear_sec2_v3.h5)
+ ADD_H5_CMP (h5clear_missing_file_m h5clear_missing_file 1 "-m")
+ ADD_H5_CMP (h5clear_open_fail_s h5clear_open_fail 1 "-s" junk.h5)
+ ADD_H5_CMP (h5clear_missing_file_ms h5clear_missing_file 1 "-m" "-s")
+ ADD_H5_CMP (h5clear_open_fail_ms h5clear_open_fail 1 "-m" "-s" junk.h5)
+ ADD_H5_CMP (h5clear_no_mdc_image_m h5clear_no_mdc_image 0 "-m" orig_h5clear_sec2_v2.h5)
+ ADD_H5_CMP (h5clear_no_mdc_image_ms h5clear_no_mdc_image 0 "-s" "-m" orig_h5clear_sec2_v0.h5)
+#
+#
+#
+# The following are tests to verify the expected exit code from h5clear:
+# "h5clear -m h5clear_mdc_image.h5" (valid option, existing file, succeed exit code)
+# "h5clear --vers" (valid option, version #, succeed exit code)
+# "h5clear -k" (invalid 1 option, no file, fail exit code)
+# "h5clear -k junk.h5" (invalid 1 option, nonexisting file, fail exit code)
+# "h5clear -l h5clear_sec2_v2.h5" (invalid 1 option, existing file, fail exit code)
+# "h5clear -m -k" (valid/invalid 2 options, nofile, fail exit code)
+# "h5clear -l -m" (invalid/valid 2 options, nofile, fail exit code)
+# "h5clear -m -l junk.h5" (valid/invalid 2 options, nonexisting file, fail exit code)
+# "h5clear -l -m junk.h5" (invalid/valid 2 options, nonexisting file, fail exit code)
+# "h5clear -m -l h5clear_sec2_v0.h5" (valid/invalid 2 options, existing file, fail exit code)
+# "h5clear -l -m h5clear_sec2_v0.h5" (invalid/valid 2 options, existing file, fail exit code)
+ ADD_H5_RETTEST (h5clear_mdc_image "false" "-m" h5clear_mdc_image.h5)
+ ADD_H5_RETTEST (h5clear_vers "false" "--vers")
+ ADD_H5_RETTEST (h5clear_k "true" "-k")
+ ADD_H5_RETTEST (h5clear_k_junk "true" "-k" junk.h5)
+ ADD_H5_RETTEST (h5clear_l_sec2 "true" "-l" h5clear_sec2_v2.h5)
+ ADD_H5_RETTEST (h5clear_mk "true" "-m" "-k")
+ ADD_H5_RETTEST (h5clear_lm "true" "-l" "-m")
+ ADD_H5_RETTEST (h5clear_ml_junk "true" "-m" "-l" junk.h5)
+ ADD_H5_RETTEST (h5clear_lm_junk "true" "-l" "-m" junk.h5)
+ ADD_H5_RETTEST (h5clear_ml_sec2 "true" "-m" "-l" h5clear_sec2_v0.h5)
+ ADD_H5_RETTEST (h5clear_lm_sec2 "true" "-l" "-m" h5clear_sec2_v0.h5)
+#
+#
+#
+# h5clear_mdc_image.h5 already has cache image removed earlier, verify the expected warning from h5clear:
+ ADD_H5_CMP (h5clear_mdc_image_m h5clear_no_mdc_image 0 "-m" mod_h5clear_mdc_image.h5)
+ ADD_H5_CMP (h5clear_mdc_image_sm h5clear_no_mdc_image 0 "-s" "-m" mod_h5clear_mdc_image2.h5)
+#
+#
+#
+# The following are tests to verify the status_flags field is cleared properly:
+ ADD_H5_TEST (h5clear_sec2_v3 "true")
+ ADD_H5_TEST (h5clear_log_v3 "true")
+ ADD_H5_TEST (latest_h5clear_sec2_v3 "true")
+ ADD_H5_TEST (latest_h5clear_log_v3 "true")
+ ADD_H5_TEST (h5clear_sec2_v0 "false")
+ ADD_H5_TEST (h5clear_sec2_v2 "false")
diff --git a/tools/test/misc/CMakeTestsMkgrp.cmake b/tools/test/misc/CMakeTestsMkgrp.cmake
new file mode 100644
index 0000000..7011b19
--- /dev/null
+++ b/tools/test/misc/CMakeTestsMkgrp.cmake
@@ -0,0 +1,199 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+##############################################################################
+##############################################################################
+### T E S T I N G ###
+##############################################################################
+##############################################################################
+
+ # --------------------------------------------------------------------
+ # Copy all the HDF5 files from the source directory into the test directory
+ # --------------------------------------------------------------------
+ set (HDF5_MKGRP_TEST_FILES
+ #h5mkgrp_help.txt
+ #h5mkgrp_version
+ h5mkgrp_single.ls
+ h5mkgrp_single_v.ls
+ h5mkgrp_single_p.ls
+ h5mkgrp_single_l.ls
+ h5mkgrp_several.ls
+ h5mkgrp_several_v.ls
+ h5mkgrp_several_p.ls
+ h5mkgrp_several_l.ls
+ h5mkgrp_nested_p.ls
+ h5mkgrp_nested_lp.ls
+ h5mkgrp_nested_mult_p.ls
+ h5mkgrp_nested_mult_lp.ls
+ )
+
+ # make test dir
+ file (MAKE_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+
+ foreach (h5_mkgrp_file ${HDF5_MKGRP_TEST_FILES})
+ HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/${h5_mkgrp_file}" "${PROJECT_BINARY_DIR}/testfiles/${h5_mkgrp_file}" "h5mkgrp_files")
+ endforeach ()
+
+ HDFTEST_COPY_FILE("${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/testfiles/h5mkgrp_help.txt" "${PROJECT_BINARY_DIR}/testfiles/h5mkgrp_help.txt" "h5mkgrp_files")
+ add_custom_target(h5mkgrp_files ALL COMMENT "Copying files needed by h5mkgrp tests" DEPENDS ${h5mkgrp_files_list})
+
+ configure_file (${HDF5_TOOLS_TEST_MISC_SOURCE_DIR}/testfiles/h5mkgrp_version.txt.in ${PROJECT_BINARY_DIR}/testfiles/h5mkgrp_version.txt @ONLY)
+
+##############################################################################
+##############################################################################
+### T H E T E S T S M A C R O S ###
+##############################################################################
+##############################################################################
+
+ macro (ADD_H5_TEST resultfile resultcode resultoption)
+ if (NOT HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (
+ NAME H5MKGRP-${resultfile}-clear-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ ${resultfile}.h5
+ )
+ set_tests_properties (H5MKGRP-${resultfile}-clear-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ endif ()
+
+ add_test (
+ NAME H5MKGRP-${resultfile}
+ COMMAND $<TARGET_FILE:h5mkgrp> ${resultoption} ${resultfile}.h5 ${ARGN}
+ )
+ set_tests_properties (H5MKGRP-${resultfile} PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5MKGRP-${resultfile} PROPERTIES DEPENDS ${last_test})
+ endif ()
+ else (HDF5_ENABLE_USING_MEMCHECKER)
+ set_tests_properties (H5MKGRP-${resultfile} PROPERTIES DEPENDS H5MKGRP-${resultfile}-clear-objects)
+ add_test (
+ NAME H5MKGRP-${resultfile}-h5ls
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:h5ls>"
+ -D "TEST_ARGS:STRING=-v;-r;${resultfile}.h5"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
+ -D "TEST_OUTPUT=${resultfile}.out"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_MASK_MOD=true"
+ -D "TEST_REFERENCE=${resultfile}.ls"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ set_tests_properties (H5MKGRP-${resultfile}-h5ls PROPERTIES DEPENDS H5MKGRP-${resultfile})
+ endif ()
+ endmacro ()
+
+ macro (ADD_H5_CMP resultfile resultcode)
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME H5MKGRP_CMP-${resultfile} COMMAND $<TARGET_FILE:h5mkgrp> ${ARGN})
+ else ()
+ add_test (
+ NAME H5MKGRP_CMP-${resultfile}-clear-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ ${resultfile}.h5
+ )
+ set_tests_properties (H5MKGRP_CMP-${resultfile}-clear-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ add_test (
+ NAME H5MKGRP_CMP-${resultfile}
+ COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:h5mkgrp>"
+ -D "TEST_ARGS:STRING=${ARGN}"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}/testfiles"
+ -D "TEST_OUTPUT=${resultfile}.out"
+ -D "TEST_EXPECT=${resultcode}"
+ -D "TEST_REFERENCE=${resultfile}.txt"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+ set_tests_properties (H5MKGRP_CMP-${resultfile} PROPERTIES DEPENDS H5MKGRP_CMP-${resultfile}-clear-objects)
+ endif ()
+ endmacro ()
+
+##############################################################################
+##############################################################################
+### T H E T E S T S ###
+##############################################################################
+##############################################################################
+ if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (
+ NAME H5MKGRP-clearall-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ h5mkgrp_help.out
+ h5mkgrp_help.out.err
+ h5mkgrp_version.out
+ h5mkgrp_version.out.err
+ h5mkgrp_single.h5
+ h5mkgrp_single.out
+ h5mkgrp_single.out.err
+ h5mkgrp_single_v.h5
+ h5mkgrp_single_v.out
+ h5mkgrp_single_v.out.err
+ h5mkgrp_single_p.h5
+ h5mkgrp_single_p.out
+ h5mkgrp_single_p.out.err
+ h5mkgrp_single_l.h5
+ h5mkgrp_single_l.out
+ h5mkgrp_single_l.out.err
+ h5mkgrp_several.h5
+ h5mkgrp_several.out
+ h5mkgrp_several.out.err
+ h5mkgrp_several_v.h5
+ h5mkgrp_several_v.out
+ h5mkgrp_several_v.out.err
+ h5mkgrp_several_p.h5
+ h5mkgrp_several_p.out
+ h5mkgrp_several_p.out.err
+ h5mkgrp_several_l.h5
+ h5mkgrp_several_l.out
+ h5mkgrp_several_l.out.err
+ h5mkgrp_nested_p.h5
+ h5mkgrp_nested_p.out
+ h5mkgrp_nested_p.out.err
+ h5mkgrp_nested_lp.h5
+ h5mkgrp_nested_lp.out
+ h5mkgrp_nested_lp.out.err
+ h5mkgrp_nested_mult_p.h5
+ h5mkgrp_nested_mult_p.out
+ h5mkgrp_nested_mult_p.out.err
+ h5mkgrp_nested_mult_lp.h5
+ h5mkgrp_nested_mult_lp.out
+ h5mkgrp_nested_mult_lp.out.err
+ )
+ set_tests_properties (H5MKGRP-clearall-objects PROPERTIES WORKING_DIRECTORY "${PROJECT_BINARY_DIR}/testfiles")
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5MKGRP-clearall-objects PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5MKGRP-clearall-objects")
+ endif ()
+
+ # Check that help & version is displayed properly
+ ADD_H5_CMP (h5mkgrp_help 0 "-h")
+ ADD_H5_CMP (h5mkgrp_version 0 "-V")
+
+ # Create single group at root level
+ ADD_H5_TEST (h5mkgrp_single 0 "" single)
+ ADD_H5_TEST (h5mkgrp_single_v 0 "-v" single)
+ ADD_H5_TEST (h5mkgrp_single_p 0 "-p" single)
+ ADD_H5_TEST (h5mkgrp_single_l 0 "-l" latest)
+
+ # Create several groups at root level
+ ADD_H5_TEST (h5mkgrp_several 0 "" one two)
+ ADD_H5_TEST (h5mkgrp_several_v 0 "-v" one two)
+ ADD_H5_TEST (h5mkgrp_several_p 0 "-p" one two)
+ ADD_H5_TEST (h5mkgrp_several_l 0 "-l" one two)
+
+ # Create various nested groups
+ ADD_H5_TEST (h5mkgrp_nested_p 0 "-p" /one/two)
+ ADD_H5_TEST (h5mkgrp_nested_lp 0 "-lp" /one/two)
+ ADD_H5_TEST (h5mkgrp_nested_mult_p 0 "-p" /one/two /three/four)
+ ADD_H5_TEST (h5mkgrp_nested_mult_lp 0 "-lp" /one/two /three/four)
diff --git a/tools/test/misc/CMakeTestsRepart.cmake b/tools/test/misc/CMakeTestsRepart.cmake
new file mode 100644
index 0000000..37da903
--- /dev/null
+++ b/tools/test/misc/CMakeTestsRepart.cmake
@@ -0,0 +1,96 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
+
+##############################################################################
+##############################################################################
+### T E S T I N G ###
+##############################################################################
+##############################################################################
+
+ # --------------------------------------------------------------------
+ # Copy all the HDF5 files from the source directory into the test directory
+ # --------------------------------------------------------------------
+ set (HDF5_REFERENCE_TEST_FILES
+ family_file00000.h5
+ family_file00001.h5
+ family_file00002.h5
+ family_file00003.h5
+ family_file00004.h5
+ family_file00005.h5
+ family_file00006.h5
+ family_file00007.h5
+ family_file00008.h5
+ family_file00009.h5
+ family_file00010.h5
+ family_file00011.h5
+ family_file00012.h5
+ family_file00013.h5
+ family_file00014.h5
+ family_file00015.h5
+ family_file00016.h5
+ family_file00017.h5
+ )
+
+ foreach (h5_file ${HDF5_REFERENCE_TEST_FILES})
+ HDFTEST_COPY_FILE("${HDF5_TOOLS_DIR}/testfiles/${h5_file}" "${PROJECT_BINARY_DIR}/${h5_file}" "h5repart_files")
+ endforeach ()
+ add_custom_target(h5repart_files ALL COMMENT "Copying files needed by h5repart tests" DEPENDS ${h5repart_files_list})
+
+##############################################################################
+##############################################################################
+### T H E T E S T S M A C R O S ###
+##############################################################################
+##############################################################################
+
+
+##############################################################################
+##############################################################################
+### T H E T E S T S ###
+##############################################################################
+##############################################################################
+
+ # Remove any output file left over from previous test run
+ add_test (
+ NAME H5REPART-clearall-objects
+ COMMAND ${CMAKE_COMMAND}
+ -E remove
+ fst_family00000.h5
+ scd_family00000.h5
+ scd_family00001.h5
+ scd_family00002.h5
+ scd_family00003.h5
+ family_to_sec2.h5
+ )
+ if (NOT "${last_test}" STREQUAL "")
+ set_tests_properties (H5REPART-clearall-objects PROPERTIES DEPENDS ${last_test})
+ endif ()
+ set (last_test "H5REPART-clearall-objects")
+
+ # repartition family member size to 20,000 bytes.
+ add_test (NAME H5REPART-h5repart_20K COMMAND $<TARGET_FILE:h5repart> -m 20000 family_file%05d.h5 fst_family%05d.h5)
+ set_tests_properties (H5REPART-h5repart_20K PROPERTIES DEPENDS H5REPART-clearall-objects)
+
+ # repartition family member size to 5 KB.
+ add_test (NAME H5REPART-h5repart_5K COMMAND $<TARGET_FILE:h5repart> -m 5k family_file%05d.h5 scd_family%05d.h5)
+ set_tests_properties (H5REPART-h5repart_5K PROPERTIES DEPENDS H5REPART-clearall-objects)
+
+ # convert family file to sec2 file of 20,000 bytes
+ add_test (NAME H5REPART-h5repart_sec2 COMMAND $<TARGET_FILE:h5repart> -m 20000 -family_to_sec2 family_file%05d.h5 family_to_sec2.h5)
+ set_tests_properties (H5REPART-h5repart_sec2 PROPERTIES DEPENDS H5REPART-clearall-objects)
+
+ # test the output files repartitioned above.
+ add_test (NAME H5REPART-h5repart_test COMMAND $<TARGET_FILE:h5repart_test>)
+ set_tests_properties (H5REPART-h5repart_test PROPERTIES DEPENDS "H5REPART-clearall-objects;H5REPART-h5repart_20K;H5REPART-h5repart_5K;H5REPART-h5repart_sec2")
+
+ set (H5_DEP_EXECUTABLES ${H5_DEP_EXECUTABLES}
+ h5repart_test
+ )
diff --git a/tools/test/misc/Makefile.am b/tools/test/misc/Makefile.am
index 1025a8a..f2d2489 100644
--- a/tools/test/misc/Makefile.am
+++ b/tools/test/misc/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
@@ -36,7 +34,7 @@ SCRIPT_DEPEND=../../src/misc/h5repart$(EXEEXT) ../../src/misc/h5mkgrp$(EXEEXT) .
# Temporary files. *.h5 are generated by h5repart_gentest. They should
# copied to the testfiles/ directory if update is required. fst_family*.h5
# and scd_family*.h5 were created by setting the HDF5_NOCLEANUP variable.
-CHECK_CLEANFILES+=*.h5 ../testfiles/fst_family*.h5 ../testfiles/scd_family*.h5
+CHECK_CLEANFILES+=*.h5 ../testfiles/fst_family*.h5 ../testfiles/scd_family*.h5 append.log
# These were generated by configure. Remove them only when distclean.
DISTCLEANFILES=testh5repart.sh testh5clear.sh
diff --git a/tools/test/misc/clear_open_chk.c b/tools/test/misc/clear_open_chk.c
index f3e6ba3..3a61385 100644
--- a/tools/test/misc/clear_open_chk.c
+++ b/tools/test/misc/clear_open_chk.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
#include "H5private.h"
@@ -27,28 +25,28 @@ usage(void)
} /* usage() */
/*-------------------------------------------------------------------------
- * Function: main
+ * Function: main
*
- * Purpose: To open the file which has zero or nonzero status_flags in
- * the superblock.
+ * Purpose: To open the file which has zero or nonzero status_flags in
+ * the superblock.
*
- * Return: 0 on success
- * 1 on failure
+ * Return: 0 on success
+ * 1 on failure
*
- * Programmer: Vailin Choi; July 2013
+ * Programmer: Vailin Choi; July 2013
*
*-------------------------------------------------------------------------
*/
int
main(int argc, char *argv[])
{
- char *fname; /* The HDF5 file name */
- hid_t fid; /* File ID */
+ char *fname; /* The HDF5 file name */
+ hid_t fid; /* File ID */
/* Check the # of arguments */
if(argc != 2) {
- usage();
- return(EXIT_FAILURE);
+ usage();
+ HDexit(EXIT_FAILURE);
}
/* Get the file name */
@@ -56,17 +54,18 @@ main(int argc, char *argv[])
/* Try opening the file */
if((fid = h5tools_fopen(fname, H5F_ACC_RDONLY, H5P_DEFAULT, NULL, NULL, (size_t)0)) < 0) {
- HDfprintf(stderr, "clear_open_chk: unable to open the file\n");
- return EXIT_FAILURE;
+ HDfprintf(stderr, "clear_open_chk: unable to open the file\n");
+ HDfree(fname);
+ HDexit(EXIT_FAILURE);
}
+ HDfree(fname);
/* Close the file */
if(H5Fclose(fid) < 0) {
- HDfprintf(stderr, "clear_open_chk: cannot close the file\n");
- return EXIT_FAILURE;
+ HDfprintf(stderr, "clear_open_chk: cannot close the file\n");
+ HDexit(EXIT_FAILURE);
}
/* Return success */
- return EXIT_SUCCESS;
-
+ HDexit(EXIT_SUCCESS);
} /* main() */
diff --git a/tools/test/misc/h5clear_gentest.c b/tools/test/misc/h5clear_gentest.c
index 0f20c35..326109c 100644
--- a/tools/test/misc/h5clear_gentest.c
+++ b/tools/test/misc/h5clear_gentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#include "hdf5.h"
#include "H5private.h"
@@ -25,6 +23,103 @@ const char *FILENAME[] = {
#define KB 1024U
+#define CACHE_IMAGE_FILE "h5clear_mdc_image.h5"
+#define DSET "DSET"
+
+/*-------------------------------------------------------------------------
+ * Function: gen_cache_image_file
+ *
+ * Purpose: To create a file with cache image feature enabled.
+ *
+ * Return: Success: 0
+ * Failure: 1
+ *
+ * Programmer: Vailin Choi; March 2017
+ *
+ *-------------------------------------------------------------------------
+ */
+static int
+gen_cache_image_file(const char *fname)
+{
+ hid_t fid = -1; /* File ID */
+ hid_t did = -1, sid = -1; /* Dataset ID, dataspace ID */
+ hid_t fapl = -1; /* File access property list */
+ hid_t dcpl = -1; /* Dataset creation property list */
+ hsize_t dims[2]; /* Dimension sizes */
+ hsize_t chunks[2]; /* Chunked dimension sizes */
+ int buf[50][100]; /* Buffer for data to write */
+ int i, j; /* Local index variables */
+ H5AC_cache_image_config_t cache_image_config = /* Cache image input configuration */
+ { H5AC__CURR_CACHE_IMAGE_CONFIG_VERSION,
+ TRUE, FALSE,
+ H5AC__CACHE_IMAGE__ENTRY_AGEOUT__NONE};
+
+ /* Create a copy of file access property list */
+ if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
+ goto error;
+
+ /* Enable latest format in fapl */
+ if(H5Pset_libver_bounds(fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
+ goto error;
+
+ /* Enable metadata cache image in fapl */
+ if(H5Pset_mdc_image_config(fapl, &cache_image_config) < 0)
+ goto error;
+
+ /* Create the file */
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
+ goto error;
+
+ /* Create dataspace */
+ dims[0] = 50;
+ dims[1] = 100;
+ if((sid = H5Screate_simple(2, dims, NULL)) < 0)
+ goto error;
+
+ /* Initialize buffer for writing to dataset */
+ for(i = 0; i < 50; i++)
+ for(j = 0; j < 100; j++)
+ buf[i][j] = i * j;
+
+ /* Set up to create a chunked dataset */
+ if((dcpl = H5Pcreate(H5P_DATASET_CREATE)) < 0)
+ goto error;
+ chunks[0] = 5;
+ chunks[1] = 10;
+ if(H5Pset_chunk(dcpl, 2, chunks) < 0)
+ goto error;
+ if((did = H5Dcreate2(fid, DSET, H5T_NATIVE_INT, sid, H5P_DEFAULT, dcpl, H5P_DEFAULT)) < 0)
+ goto error;
+
+ /* Write to the dataset */
+ if(H5Dwrite(did, H5T_NATIVE_INT, H5S_ALL, H5S_ALL, H5P_DEFAULT, buf) < 0)
+ goto error;
+
+ /* Closing */
+ if(H5Dclose(did) < 0)
+ goto error;
+ if(H5Pclose(dcpl) < 0)
+ goto error;
+ if(H5Pclose(fapl) < 0)
+ goto error;
+ if(H5Sclose(sid) < 0)
+ goto error;
+ if(H5Fclose(fid) < 0)
+ goto error;
+ return 0;
+
+error:
+ H5E_BEGIN_TRY {
+ H5Pclose(dcpl);
+ H5Sclose(sid);
+ H5Dclose(did);
+ H5Fclose(fid);
+ H5Pclose(fapl);
+ H5Pclose(dcpl);
+ } H5E_END_TRY;
+ return 1;
+}
+
/*-------------------------------------------------------------------------
* Function: main
*
@@ -45,7 +140,7 @@ const char *FILENAME[] = {
* status_flags properly so users can open the files afterwards.
*
* Return: Success: 0
- * Failure: 1
+ * Failure: 1
*
* Programmer: Vailin Choi; July 2013
*
@@ -60,67 +155,75 @@ main(void)
char fname[512]; /* File name */
unsigned new_format; /* To use latest library format or not */
+ /* Generate a file with cache image feature enabled */
+ if(gen_cache_image_file(CACHE_IMAGE_FILE) < 0)
+ goto error;
+
+ /*
+ * Generate files with invalid status_flags
+ */
+
/* Create a copy of the file access property list */
if((fapl = H5Pcreate(H5P_FILE_ACCESS)) < 0)
- goto error;
+ goto error;
/* Copy the file access property list */
if((new_fapl = H5Pcopy(fapl)) < 0)
- goto error;
+ goto error;
/* Set to latest library format */
if(H5Pset_libver_bounds(new_fapl, H5F_LIBVER_LATEST, H5F_LIBVER_LATEST) < 0)
- goto error;
+ goto error;
/* Files created within this for loop will have v3 superblock and nonzero status_flags */
for(new_format = FALSE; new_format <= TRUE; new_format++) {
- hid_t fapl2, my_fapl; /* File access property lists */
-
- /* Set to use the appropriate file access property list */
- if(new_format)
- fapl2 = new_fapl;
- else
- fapl2 = fapl;
- /*
- * Create a sec2 file
- */
- if((my_fapl = H5Pcopy(fapl2)) < 0)
- goto error;
- /* Create the file */
- sprintf(fname, "%s%s", new_format? "latest_":"", FILENAME[0]);
- if((fid = H5Fcreate(fname, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, my_fapl)) < 0)
- goto error;
-
- /* Flush the file */
- if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
- goto error;
+ hid_t fapl2, my_fapl; /* File access property lists */
+
+ /* Set to use the appropriate file access property list */
+ if(new_format)
+ fapl2 = new_fapl;
+ else
+ fapl2 = fapl;
+ /*
+ * Create a sec2 file
+ */
+ if((my_fapl = H5Pcopy(fapl2)) < 0)
+ goto error;
+ /* Create the file */
+ sprintf(fname, "%s%s", new_format? "latest_":"", FILENAME[0]);
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, my_fapl)) < 0)
+ goto error;
+
+ /* Flush the file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ goto error;
- /* Close the property list */
- if(H5Pclose(my_fapl) < 0)
- goto error;
-
- /*
- * Create a log file
- */
- /* Create a copy of file access property list */
- if((my_fapl = H5Pcopy(fapl2)) < 0)
- goto error;
-
- /* Setup the fapl for the log driver */
- if(H5Pset_fapl_log(my_fapl, "append.log", (unsigned long long)H5FD_LOG_ALL, (size_t)(4 * KB)) < 0)
- goto error;
-
- /* Create the file */
- sprintf(fname, "%s%s", new_format? "latest_":"", FILENAME[1]);
- if((fid = H5Fcreate(fname, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, my_fapl)) < 0)
- goto error;
-
- /* Flush the file */
- if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
- goto error;
-
- /* Close the property list */
- if(H5Pclose(my_fapl) < 0)
- goto error;
+ /* Close the property list */
+ if(H5Pclose(my_fapl) < 0)
+ goto error;
+
+ /*
+ * Create a log file
+ */
+ /* Create a copy of file access property list */
+ if((my_fapl = H5Pcopy(fapl2)) < 0)
+ goto error;
+
+ /* Setup the fapl for the log driver */
+ if(H5Pset_fapl_log(my_fapl, "append.log", (unsigned long long)H5FD_LOG_ALL, (size_t)(4 * KB)) < 0)
+ goto error;
+
+ /* Create the file */
+ sprintf(fname, "%s%s", new_format? "latest_":"", FILENAME[1]);
+ if((fid = H5Fcreate(fname, H5F_ACC_TRUNC | (new_format ? 0 : H5F_ACC_SWMR_WRITE), H5P_DEFAULT, my_fapl)) < 0)
+ goto error;
+
+ /* Flush the file */
+ if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
+ goto error;
+
+ /* Close the property list */
+ if(H5Pclose(my_fapl) < 0)
+ goto error;
} /* end for */
@@ -128,38 +231,38 @@ main(void)
* Create a sec2 file with v0 superblock but nonzero status_flags
*/
if((fid = H5Fcreate(FILENAME[2], H5F_ACC_TRUNC, H5P_DEFAULT, fapl)) < 0)
- goto error;
+ goto error;
/* Flush the file */
if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
- goto error;
+ goto error;
/*
* Create a sec2 file with v2 superblock but nonzero status_flags
*/
if((fcpl = H5Pcreate(H5P_FILE_CREATE)) < 0)
- goto error;
+ goto error;
if(H5Pset_shared_mesg_nindexes(fcpl, 1) < 0)
goto error;
if(H5Pset_shared_mesg_index(fcpl, 0, H5O_SHMESG_DTYPE_FLAG, 50) < 0)
goto error;
if((fid = H5Fcreate(FILENAME[3], H5F_ACC_TRUNC, fcpl, fapl)) < 0)
- goto error;
+ goto error;
/* Flush the file */
if(H5Fflush(fid, H5F_SCOPE_GLOBAL) < 0)
- goto error;
+ goto error;
/* Close the property lists */
if(H5Pclose(fapl) < 0)
- goto error;
+ goto error;
if(H5Pclose(new_fapl) < 0)
- goto error;
+ goto error;
if(H5Pclose(fcpl) < 0)
- goto error;
+ goto error;
fflush(stdout);
fflush(stderr);
diff --git a/tools/test/misc/h5perf_gentest.c b/tools/test/misc/h5perf_gentest.c
index 50c18c1..3784278 100644
--- a/tools/test/misc/h5perf_gentest.c
+++ b/tools/test/misc/h5perf_gentest.c
@@ -1,3 +1,14 @@
+/* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * *
+ * Copyright by The HDF Group. *
+ * All rights reserved. *
+ * *
+ * This file is part of HDF5. The full HDF5 copyright notice, including *
+ * terms governing use, modification, and redistribution, is contained in *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
+ * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*****************************************************************************
This test generates attributes, groups, and datasets of many types. It
creates a large number of attributes, groups, and datasets by specifying
diff --git a/tools/test/misc/h5repart_gentest.c b/tools/test/misc/h5repart_gentest.c
index 8a34694..5c1ff87 100644
--- a/tools/test/misc/h5repart_gentest.c
+++ b/tools/test/misc/h5repart_gentest.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/misc/repart_test.c b/tools/test/misc/repart_test.c
index e6da779..372f46a 100644
--- a/tools/test/misc/repart_test.c
+++ b/tools/test/misc/repart_test.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/misc/talign.c b/tools/test/misc/talign.c
index be373e7..9a72557 100644
--- a/tools/test/misc/talign.c
+++ b/tools/test/misc/talign.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -19,7 +17,7 @@
*/
#include <string.h>
#include <stdlib.h>
-/*#include <unistd.h> *//* Required for unlink() */
+/*#include <unistd.h> *//* Required for unlink() */
#include "hdf5.h"
#include "H5private.h"
@@ -88,8 +86,7 @@ int main(void)
H5Tinsert(cmp, "Not Ok", sizeof(fok) + sizeof(string5), array_dt);
H5Tclose(array_dt);
- fix = h5tools_get_native_type(cmp);
-
+ fix = H5Tget_native_type(cmp, H5T_DIR_DEFAULT);
cmp1 = H5Tcreate(H5T_COMPOUND, sizeof(fok));
cdim[0] = sizeof(fok) / sizeof(float);
@@ -108,7 +105,7 @@ int main(void)
H5Tclose(array_dt);
plist = H5Pcreate(H5P_DATASET_XFER);
- if((error = H5Pset_preserve(plist, 1)) < 0)
+ if((error = H5Pset_preserve(plist, 1)) < 0)
goto out;
/*
@@ -210,35 +207,3 @@ out:
return result;
}
-/*-------------------------------------------------------------------------
- * Function: h5tools_get_native_type
- *
- * Purpose: Wrapper around H5Tget_native_type() to work around
- * Problems with bitfields.
- *
- * Return: Success: datatype ID
- *
- * Failure: FAIL
- *
- * Programmer: Quincey Koziol
- * Tuesday, October 5, 2004
- *
- * Modifications:
- *
- *-------------------------------------------------------------------------
- */
-hid_t
-h5tools_get_native_type(hid_t type)
-{
- hid_t p_type;
- H5T_class_t type_class;
-
- type_class = H5Tget_class(type);
- if(type_class==H5T_BITFIELD)
- p_type=H5Tcopy(type);
- else
- p_type = H5Tget_native_type(type,H5T_DIR_DEFAULT);
-
- return(p_type);
-}
-
diff --git a/tools/test/h5diff/testfiles/h5diff_dset_idx2.h5 b/tools/test/misc/testfiles/h5clear_log_v3.h5
index db7584d..aed33a4 100644
--- a/tools/test/h5diff/testfiles/h5diff_dset_idx2.h5
+++ b/tools/test/misc/testfiles/h5clear_log_v3.h5
Binary files differ
diff --git a/tools/test/misc/testfiles/h5clear_mdc_image.h5 b/tools/test/misc/testfiles/h5clear_mdc_image.h5
new file mode 100644
index 0000000..6ed8b70
--- /dev/null
+++ b/tools/test/misc/testfiles/h5clear_mdc_image.h5
Binary files differ
diff --git a/tools/test/misc/testfiles/h5clear_missing_file.ddl b/tools/test/misc/testfiles/h5clear_missing_file.ddl
new file mode 100644
index 0000000..1e5150c
--- /dev/null
+++ b/tools/test/misc/testfiles/h5clear_missing_file.ddl
@@ -0,0 +1,15 @@
+usage: h5clear [OPTIONS] file_name
+ OPTIONS
+ -h, --help Print a usage message and exit
+ -V, --version Print version number and exit
+ -s, --status Clear the status_flags field in the file's superblock
+ -m, --image Remove the metadata cache image from the file
+
+Examples of use:
+
+h5clear -s file_name
+ Clear the status_flags field in the superblock of the HDF5 file <file_name>.
+
+h5clear -m file_name
+ Remove the metadata cache image from the HDF5 file <file_name>.
+h5clear error: missing file name
diff --git a/tools/test/misc/testfiles/h5clear_no_mdc_image.ddl b/tools/test/misc/testfiles/h5clear_no_mdc_image.ddl
new file mode 100644
index 0000000..f5acd71
--- /dev/null
+++ b/tools/test/misc/testfiles/h5clear_no_mdc_image.ddl
@@ -0,0 +1 @@
+h5clear warning: No cache image in the file
diff --git a/tools/test/misc/testfiles/h5clear_open_fail.ddl b/tools/test/misc/testfiles/h5clear_open_fail.ddl
new file mode 100644
index 0000000..895ecd4
--- /dev/null
+++ b/tools/test/misc/testfiles/h5clear_open_fail.ddl
@@ -0,0 +1 @@
+h5clear error: h5tools_fopen
diff --git a/tools/test/misc/testfiles/h5clear_sec2_v0.h5 b/tools/test/misc/testfiles/h5clear_sec2_v0.h5
new file mode 100644
index 0000000..571fd46
--- /dev/null
+++ b/tools/test/misc/testfiles/h5clear_sec2_v0.h5
Binary files differ
diff --git a/tools/test/misc/testfiles/h5clear_sec2_v2.h5 b/tools/test/misc/testfiles/h5clear_sec2_v2.h5
new file mode 100644
index 0000000..92833e6
--- /dev/null
+++ b/tools/test/misc/testfiles/h5clear_sec2_v2.h5
Binary files differ
diff --git a/tools/test/misc/testfiles/h5clear_sec2_v3.h5 b/tools/test/misc/testfiles/h5clear_sec2_v3.h5
new file mode 100644
index 0000000..9f1e6ce
--- /dev/null
+++ b/tools/test/misc/testfiles/h5clear_sec2_v3.h5
Binary files differ
diff --git a/tools/test/misc/testfiles/h5clear_usage.ddl b/tools/test/misc/testfiles/h5clear_usage.ddl
new file mode 100644
index 0000000..a399ae7
--- /dev/null
+++ b/tools/test/misc/testfiles/h5clear_usage.ddl
@@ -0,0 +1,14 @@
+usage: h5clear [OPTIONS] file_name
+ OPTIONS
+ -h, --help Print a usage message and exit
+ -V, --version Print version number and exit
+ -s, --status Clear the status_flags field in the file's superblock
+ -m, --image Remove the metadata cache image from the file
+
+Examples of use:
+
+h5clear -s file_name
+ Clear the status_flags field in the superblock of the HDF5 file <file_name>.
+
+h5clear -m file_name
+ Remove the metadata cache image from the HDF5 file <file_name>.
diff --git a/tools/test/misc/testfiles/latest_h5clear_log_v3.h5 b/tools/test/misc/testfiles/latest_h5clear_log_v3.h5
new file mode 100644
index 0000000..da5c426
--- /dev/null
+++ b/tools/test/misc/testfiles/latest_h5clear_log_v3.h5
Binary files differ
diff --git a/tools/test/misc/testfiles/latest_h5clear_sec2_v3.h5 b/tools/test/misc/testfiles/latest_h5clear_sec2_v3.h5
new file mode 100644
index 0000000..e511248
--- /dev/null
+++ b/tools/test/misc/testfiles/latest_h5clear_sec2_v3.h5
Binary files differ
diff --git a/tools/test/misc/testfiles/mod_h5clear_mdc_image.h5 b/tools/test/misc/testfiles/mod_h5clear_mdc_image.h5
new file mode 100644
index 0000000..310134b
--- /dev/null
+++ b/tools/test/misc/testfiles/mod_h5clear_mdc_image.h5
Binary files differ
diff --git a/tools/test/misc/testh5clear.sh.in b/tools/test/misc/testh5clear.sh.in
index a6836d4..2966b2c 100644
--- a/tools/test/misc/testh5clear.sh.in
+++ b/tools/test/misc/testh5clear.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5clear tool
#
@@ -20,11 +18,19 @@ TESTNAME=h5clear
EXIT_SUCCESS=0
EXIT_FAILURE=1
-H5CLEAR=../../src/misc/h5clear
-H5CLEAR_BIN=`pwd`/$H5CLEAR # The path of the tool binary
+H5CLEAR=../../src/misc/h5clear # The tool name
+H5CLEAR_BIN=`pwd`/$H5CLEAR # The path of the tool binary
-OPENCHK=clear_open_chk # Try opening the test file
-OPENCHK_BIN=`pwd`/$OPENCHK # The path to the binary
+OPENCHK=clear_open_chk # Program to check whether the file can be opened
+OPENCHK_BIN=`pwd`/$OPENCHK # The path to the binary
+
+RM='rm -rf'
+CMP='cmp -s'
+DIFF='diff -c'
+CP='cp'
+DIRNAME='dirname'
+LS='ls'
+AWK='awk'
SUCCEED=0
FAIL=1
@@ -32,6 +38,87 @@ FAIL=1
nerrors=0
verbose=yes
+# source dirs
+SRC_TOOLS="$srcdir/../.."
+
+SRC_TOOLS_TESTFILES="$SRC_TOOLS/testfiles"
+# testfiles source dirs for tools
+SRC_H5CLEAR_TESTFILES="$SRC_TOOLS/test/misc/testfiles"
+
+TESTDIR=./testh5clear
+test -d $TESTDIR || mkdir -p $TESTDIR
+
+######################################################################
+# test files
+# --------------------------------------------------------------------
+# All the test files copy from source directory to test directory
+# NOTE: Keep this framework to add/remove test files.
+# Any test files from other tools can be used in this framework.
+# This list are also used for checking exist.
+# Comment '#' without space can be used.
+# --------------------------------------------------------------------
+
+#
+# copy test files and expected output files from source dirs to test dir
+#
+COPY_TESTFILES="
+$SRC_H5CLEAR_TESTFILES/h5clear_usage.ddl
+$SRC_H5CLEAR_TESTFILES/h5clear_open_fail.ddl
+$SRC_H5CLEAR_TESTFILES/h5clear_missing_file.ddl
+$SRC_H5CLEAR_TESTFILES/h5clear_no_mdc_image.ddl
+$SRC_H5CLEAR_TESTFILES/h5clear_sec2_v0.h5
+$SRC_H5CLEAR_TESTFILES/h5clear_sec2_v2.h5
+$SRC_H5CLEAR_TESTFILES/h5clear_sec2_v3.h5
+$SRC_H5CLEAR_TESTFILES/h5clear_log_v3.h5
+$SRC_H5CLEAR_TESTFILES/latest_h5clear_log_v3.h5
+$SRC_H5CLEAR_TESTFILES/latest_h5clear_sec2_v3.h5
+$SRC_H5CLEAR_TESTFILES/h5clear_mdc_image.h5
+$SRC_H5CLEAR_TESTFILES/mod_h5clear_mdc_image.h5
+"
+
+COPY_TESTFILES_TO_TESTDIR()
+{
+ # copy test files. Used -f to make sure get a new copy
+ for tstfile in $COPY_TESTFILES
+ do
+ # ignore '#' comment
+ echo $tstfile | tr -d ' ' | grep '^#' > /dev/null
+ RET=$?
+ if [ $RET -eq 1 ]; then
+ # skip cp if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=`$DIRNAME $tstfile`
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $CP -f $tstfile $TESTDIR
+ if [ $? -ne 0 ]; then
+ echo "Error: FAILED to copy $tstfile ."
+
+ # Comment out this to CREATE expected file
+ exit $EXIT_FAILURE
+ fi
+ fi
+ fi
+ done
+}
+
+CLEAN_TESTFILES_AND_TESTDIR()
+{
+ # skip rm if srcdir is same as destdir
+ # this occurs when build/test performed in source dir and
+ # make cp fail
+ SDIR=$SRC_H5CLEAR_TESTFILES
+ INODE_SDIR=`$LS -i -d $SDIR | $AWK -F' ' '{print $1}'`
+ INODE_DDIR=`$LS -i -d $TESTDIR | $AWK -F' ' '{print $1}'`
+ if [ "$INODE_SDIR" != "$INODE_DDIR" ]; then
+ $RM $TESTDIR
+ fi
+}
+
+#
+#
# Print a line-line message left justified in a field of 70 characters
# beginning with the word "Testing".
#
@@ -40,68 +127,207 @@ TESTING() {
echo "Testing $* $SPACES" | cut -c1-70 | tr -d '\012'
}
+# $1 is the expected output
+# $2 is the output from testing
+COMPARE_OUT() {
+ if $CMP $1 $2; then
+ echo "PASSED"
+ else
+ echo "*FAILED*"
+ echo " Expected result (*.ddl) differs from actual result (*.out)"
+ nerrors="`expr $nerrors + 1`"
+ test yes = "$verbose" && $DIFF $expect $actual |sed 's/^/ /'
+ fi
+}
+#
+#
+#
+# Run h5clear and verify the expected output
+# $1: the test file name
+# $2: option 1 passed to the tool
+# $3: option 2 passed to the tool
+# $4: the expected output
+TOOLTEST_OUT() {
+ fname=$1
+ option1=$2
+ option2=$3
+ expected=$4
+ # Prepare expected and actual output
+ expect="$TESTDIR/$expected"
+ actual="$TESTDIR/`basename $expected .ddl`.out"
+ actual_err="$TESTDIR/`basename $expected .ddl`.err"
+ actual_sav=${actual}-sav
+ actual_err_sav=${actual_err}-sav
+
+ # Run test.
+ TESTING $H5CLEAR $option1 $option2 $fname
+ (
+ cd $TESTDIR
+ $RUNSERIAL $H5CLEAR_BIN $option1 $option2 $fname
+ ) >$actual 2>$actual_err
+ cp $actual $actual_sav
+ cp $actual_err $actual_err_sav
+ cat $actual_err >> $actual
+
+ # Compare output
+ COMPARE_OUT $expect $actual
+
+ # Clean up output file
+ if test -z "$HDF5_NOCLEANUP"; then
+ $RM $actual $actual_err $actual_sav $actual_err_sav
+ fi
+}
-# (1)Use "clear_open_chk" to check if the file open succeeds or fails
# $1 is the filename to open
-# $2 is the expected return from "clear_open_chk"
-# (2) Use "h5clear" to clear the status_flags in the test file
-# (3) Open the test file via "clear_open_chk"
-TOOLTEST() {
+# $2 is the expected return from the open/check program
+OPEN_CHK() {
fname=$1
expected=$2
- # (1)
- $OPENCHK_BIN $fname 2>/dev/null
+ (
+ cd $TESTDIR
+ $OPENCHK_BIN $fname 2>/dev/null
+ )
actual=$?
if test $actual -ne $expected; then
- echo "Unexpected return from $OPENCHK"
- nerrors=`expr $nerrors + 1`
+ echo "Unexpected return from $OPENCHK"
+ nerrors=`expr $nerrors + 1`
fi
- # (2)
- TESTING $H5CLEAR $1
+}
+
+# Run h5clear and verify the expected exit code
+# $1 is the filename to open
+# $2 is option 1
+# $3 is option 2
+# $4 is the expected exit code from the tool
+TOOLTEST() {
fname=$1
- # Use "h5clear" to clear the status_flags in the test file
- $RUNSERIAL $H5CLEAR_BIN $fname
- if test $? -ne $SUCCEED; then
- echo ".....$H5CLEAR: should succeed"
- nerrors=`expr $nerrors + 1`
- else
- # (3) Open the test file via "clear_open_chk"
- $OPENCHK_BIN $fname
- if test $? -ne $SUCCEED; then
- echo "......$OPENCHK: should succeed"
+ option1=$2
+ option2=$3
+ ret_expected=$4
+ #
+ TESTING $H5CLEAR $option1 $option2 $fname
+ # h5clear $option1 $option2 $fname
+ (
+ cd $TESTDIR
+ $RUNSERIAL $H5CLEAR_BIN $option1 $option2 $fname 2>/dev/null 1>/dev/null
+ )
+ if test $? -ne $ret_expected; then
+ echo "*FAILED*"
nerrors=`expr $nerrors + 1`
else
echo "PASSED"
fi
- fi
}
##############################################################################
##############################################################################
-### T H E T E S T S ###
+### T H E T E S T S ###
##############################################################################
##############################################################################
+# prepare for testing
+COPY_TESTFILES_TO_TESTDIR
+# For Cmake
+$CP -f $TESTDIR/h5clear_sec2_v0.h5 $TESTDIR/orig_h5clear_sec2_v0.h5
+$CP -f $TESTDIR/h5clear_sec2_v2.h5 $TESTDIR/orig_h5clear_sec2_v2.h5
+$CP -f $TESTDIR/h5clear_sec2_v3.h5 $TESTDIR/orig_h5clear_sec2_v3.h5
+#
+#
+#
+# The following are tests to verify the expected output from h5clear
+# "h5clear -h"
+# "h5clear" (no options, no file)
+# "h5clear junk.h5" (no options, nonexisting file)
+# "h5clear orig_h5clear_sec2_v3.h5" (no options, existing file)
+# "h5clear -m" (valid 1 option, no file)
+# "h5clear -s junk.h5" (valid 1 option, nonexisting file)
+# "h5clear -m -s" (valid 2 options, no file)
+# "h5clear -m -s junk.h5" (valid 2 options, nonexisting file)
+# "h5clear -m orig_h5clear_sec2_v2.h5" (valid 1 option, existing file, no cache image)
+# "h5clear -s -m orig_h5clear_sec2_v0.h5" (valid 2 options, existing file, no cache image)
+TOOLTEST_OUT "" -h "" h5clear_usage.ddl
+TOOLTEST_OUT "" "" "" h5clear_usage.ddl
+TOOLTEST_OUT junk.h5 "" "" h5clear_usage.ddl
+TOOLTEST_OUT orig_h5clear_sec2_v3.h5 "" "" h5clear_usage.ddl
+TOOLTEST_OUT "" -m "" h5clear_missing_file.ddl
+TOOLTEST_OUT junk.h5 -s "" h5clear_open_fail.ddl
+TOOLTEST_OUT "" -m -s h5clear_missing_file.ddl
+TOOLTEST_OUT junk.h5 -m -s h5clear_open_fail.ddl
+TOOLTEST_OUT orig_h5clear_sec2_v2.h5 -m "" h5clear_no_mdc_image.ddl
+TOOLTEST_OUT orig_h5clear_sec2_v0.h5 -s -m h5clear_no_mdc_image.ddl
#
-# Initial file open fails
-# After "h5clear" the file, the subsequent file open succeeds
-TOOLTEST h5clear_sec2_v3.h5 $FAIL
#
-TOOLTEST h5clear_log_v3.h5 $FAIL
#
-TOOLTEST latest_h5clear_sec2_v3.h5 $FAIL
+# The following are tests to verify the expected exit code from h5clear:
+# "h5clear -m h5clear_mdc_image.h5" (valid option, existing file, succeed exit code)
+# "h5clear --vers" (valid option, version #, succeed exit code)
+# "h5clear -k" (invalid 1 option, no file, fail exit code)
+# "h5clear -k junk.h5" (invalid 1 option, nonexisting file, fail exit code)
+# "h5clear -l h5clear_sec2_v2.h5" (invalid 1 option, existing file, fail exit code)
+# "h5clear -m -k" (valid/invalid 2 options, nofile, fail exit code)
+# "h5clear -l -m" (invalid/valid 2 options, nofile, fail exit code)
+# "h5clear -m -l junk.h5" (valid/invalid 2 options, nonexisting file, fail exit code)
+# "h5clear -l -m junk.h5" (invalid/valid 2 options, nonexisting file, fail exit code)
+# "h5clear -m -l h5clear_sec2_v0.h5" (valid/invalid 2 options, existing file, fail exit code)
+# "h5clear -l -m h5clear_sec2_v0.h5" (invalid/valid 2 options, existing file, fail exit code)
+TOOLTEST h5clear_mdc_image.h5 -m "" $SUCCEED
+TOOLTEST "" --vers "" $SUCCEED
+TOOLTEST "" -k "" $FAIL
+TOOLTEST junk.h5 -k "" $FAIL
+TOOLTEST h5clear_sec2_v2.h5 -l "" $FAIL
+TOOLTEST "" -m -k $FAIL
+TOOLTEST "" -l -m $FAIL
+TOOLTEST junk.h5 -m -l $FAIL
+TOOLTEST junk.h5 -l -m $FAIL
+TOOLTEST h5clear_sec2_v0.h5 -m -l $FAIL
+TOOLTEST h5clear_sec2_v0.h5 -l -m $FAIL
#
-TOOLTEST latest_h5clear_log_v3.h5 $FAIL
#
#
-# File open succeeds because the library does not check status_flags for file with < v3 superblock
-TOOLTEST h5clear_sec2_v0.h5 $SUCCEED
-TOOLTEST h5clear_sec2_v2.h5 $SUCCEED
+# h5clear_mdc_image.h5 already has cache image removed earlier, verify the expected warning from h5clear:
+TOOLTEST_OUT mod_h5clear_mdc_image.h5 -m "" h5clear_no_mdc_image.ddl
+TOOLTEST_OUT mod_h5clear_mdc_image.h5 -s -m h5clear_no_mdc_image.ddl
+#
+#
+#
+# The following are tests to verify the status_flags field is cleared properly:
+OPEN_CHK h5clear_sec2_v3.h5 $FAIL
+TOOLTEST h5clear_sec2_v3.h5 -s "" $SUCCEED
+OPEN_CHK h5clear_sec2_v3.h5 $SUCCEED
+#
+OPEN_CHK h5clear_log_v3.h5 $FAIL
+TOOLTEST h5clear_log_v3.h5 -s "" $SUCCEED
+OPEN_CHK h5clear_log_v3.h5 $SUCCEED
+#
+OPEN_CHK latest_h5clear_sec2_v3.h5 $FAIL
+TOOLTEST latest_h5clear_sec2_v3.h5 -s "" $SUCCEED
+OPEN_CHK latest_h5clear_sec2_v3.h5 $SUCCEED
+#
+OPEN_CHK latest_h5clear_log_v3.h5 $FAIL
+TOOLTEST latest_h5clear_log_v3.h5 -s "" $SUCCEED
+OPEN_CHK latest_h5clear_log_v3.h5 $SUCCEED
+#
+# File open succeeds for the following 2 test files because the
+# library does not check status_flags for files with < v3 superblock:
+OPEN_CHK h5clear_sec2_v0.h5 $SUCCEED
+TOOLTEST h5clear_sec2_v0.h5 -s "" $SUCCEED
+OPEN_CHK h5clear_sec2_v0.h5 $SUCCEED
+#
+OPEN_CHK h5clear_sec2_v2.h5 $SUCCEED
+TOOLTEST h5clear_sec2_v2.h5 -s "" $SUCCEED
+OPEN_CHK h5clear_sec2_v2.h5 $SUCCEED
+#
+#
#
# Clean up test files
if test -z "$HDF5_NOCLEANUP"; then
rm -f h5clear_*.h5 latest_h5clear*.h5
fi
+#
+#
+#
+# Clean up temporary files/directories
+CLEAN_TESTFILES_AND_TESTDIR
if test $nerrors -eq 0 ; then
echo "All $TESTNAME tests passed."
diff --git a/tools/test/misc/testh5mkgrp.sh.in b/tools/test/misc/testh5mkgrp.sh.in
index 9252934..3ad1f71 100644
--- a/tools/test/misc/testh5mkgrp.sh.in
+++ b/tools/test/misc/testh5mkgrp.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5mkgrp tool
#
@@ -100,7 +98,7 @@ COPY_TESTFILES_TO_TESTDIR()
$CP -f $tstfile $TESTDIR
if [ $? -ne 0 ]; then
echo "Error: FAILED to copy $tstfile ."
-
+
# Comment out this to CREATE expected file
exit $EXIT_FAILURE
fi
@@ -124,7 +122,7 @@ CLEAN_TESTFILES_AND_TESTDIR()
# Print a line-line message left justified in a field of 70 characters
# beginning with the word "Testing".
-TESTING()
+TESTING()
{
SPACES=" "
echo "Testing $* $SPACES" |cut -c1-70 |tr -d '\012'
@@ -136,7 +134,7 @@ TESTING()
# Print a line-line message left justified in a field of 70 characters
# beginning with the word "Verifying".
#
-VERIFY_H5LS()
+VERIFY_H5LS()
{
SPACES=" "
echo "Verifying h5ls file structure $* $SPACES" | cut -c1-70 | tr -d '\012'
@@ -148,7 +146,7 @@ VERIFY_H5LS()
# Assumed arguments:
# $* arguments for h5mkgrp.
-TOOLTEST()
+TOOLTEST()
{
TESTING $H5MKGRP $@
(
@@ -173,7 +171,7 @@ TOOLTEST()
# Call the h5ls tool to verify the correct output data in the destination file
#
-H5LSTEST()
+H5LSTEST()
{
expect="$TESTDIR/`basename $1 .h5`.ls"
actual="$TESTDIR/`basename $1 .h5`.out"
@@ -193,9 +191,11 @@ H5LSTEST()
STDERR_FILTER $actual
if [ ! -f $expect ]; then
- # Create the expect file if it doesn't yet exist.
- echo " CREATED"
- cp $actual $expect
+ # Create the expect file if it doesn't yet exist.
+ echo " CREATED"
+ cp $actual $expect
+ echo " Expected result (*.ls) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -217,7 +217,7 @@ H5LSTEST()
# $1 is test file name
# $2 is h5mkgrp options
# $* are groups to create
-RUNTEST()
+RUNTEST()
{
FILEOUT=$1
shift
@@ -246,7 +246,7 @@ RUNTEST()
# $1 is test expected output file
# $2 is h5mkgrp options
# $* are groups to create
-CMPTEST()
+CMPTEST()
{
FILEOUT=$1
expect="$TESTDIR/`basename $1 .h5`.txt"
@@ -262,11 +262,13 @@ CMPTEST()
$RUNSERIAL $H5MKGRP_BIN $@
) >$actual 2>$actual_err
cat $actual_err >> $actual
-
+
if [ ! -f $expect ]; then
# Create the expect file if it doesn't yet exist.
- echo " CREATED"
- cp $actual $expect
+ echo " CREATED"
+ cp $actual $expect
+ echo " Expected result (*.txt) missing"
+ nerrors="`expr $nerrors + 1`"
elif $CMP $expect $actual; then
echo " PASSED"
else
@@ -304,7 +306,7 @@ RUNTEST h5mkgrp_several_v.h5 "-v" one two
RUNTEST h5mkgrp_several_p.h5 "-p" one two
RUNTEST h5mkgrp_several_l.h5 "-l" one two
-# Create various nested groups
+# Create various nested groups
RUNTEST h5mkgrp_nested_p.h5 "-p" /one/two
RUNTEST h5mkgrp_nested_lp.h5 "-lp" /one/two
RUNTEST h5mkgrp_nested_mult_p.h5 "-p" /one/two /three/four
diff --git a/tools/test/misc/testh5repart.sh.in b/tools/test/misc/testh5repart.sh.in
index 2106a1d..594d37e 100644
--- a/tools/test/misc/testh5repart.sh.in
+++ b/tools/test/misc/testh5repart.sh.in
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Tests for the h5repart tool
@@ -43,7 +41,7 @@ SRC_TOOLS="$srcdir/../.."
SRC_TOOLS_TESTFILES="$SRC_TOOLS/testfiles"
-TESTDIR=./testfiles
+TESTDIR=./testrepart
test -d $TESTDIR || mkdir -p $TESTDIR
#
diff --git a/tools/test/misc/vds/CMakeLists.txt b/tools/test/misc/vds/CMakeLists.txt
index 23dc85c..ec672e5 100644
--- a/tools/test/misc/vds/CMakeLists.txt
+++ b/tools/test/misc/vds/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_MISC_VDS)
#-----------------------------------------------------------------------------
@@ -12,7 +12,7 @@ MACRO (ADD_H5_GENERATOR genfile)
TARGET_C_PROPERTIES (${genfile} STATIC " " " ")
target_link_libraries (${genfile} ${HDF5_LIB_TARGET} ${HDF5_TOOLS_LIB_TARGET})
set_target_properties (${genfile} PROPERTIES FOLDER generator/tools)
-ENDMACRO (ADD_H5_GENERATOR genfile)
+ENDMACRO ()
# generator executables
set (H5_GENERATORS
@@ -25,4 +25,4 @@ set (H5_GENERATORS
foreach (gen ${H5_GENERATORS})
ADD_H5_GENERATOR (${gen})
-endforeach (gen ${H5_GENERATORS})
+endforeach ()
diff --git a/tools/test/misc/vds/Makefile.am b/tools/test/misc/vds/Makefile.am
index 89aed64..73c335f 100644
--- a/tools/test/misc/vds/Makefile.am
+++ b/tools/test/misc/vds/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/test/misc/vds/UC_1.h b/tools/test/misc/vds/UC_1.h
index 2150cfa..9d1f758 100644
--- a/tools/test/misc/vds/UC_1.h
+++ b/tools/test/misc/vds/UC_1.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef UC_1_H
diff --git a/tools/test/misc/vds/UC_1_one_dim_gen.c b/tools/test/misc/vds/UC_1_one_dim_gen.c
index f47b982..b997280 100644
--- a/tools/test/misc/vds/UC_1_one_dim_gen.c
+++ b/tools/test/misc/vds/UC_1_one_dim_gen.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/misc/vds/UC_2.h b/tools/test/misc/vds/UC_2.h
index fe3f350..a3ee0f7 100644
--- a/tools/test/misc/vds/UC_2.h
+++ b/tools/test/misc/vds/UC_2.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef UC_2_H
diff --git a/tools/test/misc/vds/UC_2_two_dims_gen.c b/tools/test/misc/vds/UC_2_two_dims_gen.c
index d08cc5f..bd70fda 100644
--- a/tools/test/misc/vds/UC_2_two_dims_gen.c
+++ b/tools/test/misc/vds/UC_2_two_dims_gen.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/misc/vds/UC_3.h b/tools/test/misc/vds/UC_3.h
index 0654a48..0c26dfb 100644
--- a/tools/test/misc/vds/UC_3.h
+++ b/tools/test/misc/vds/UC_3.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef UC_3_H
diff --git a/tools/test/misc/vds/UC_3_gaps_gen.c b/tools/test/misc/vds/UC_3_gaps_gen.c
index 7cb208b..e6e7226 100644
--- a/tools/test/misc/vds/UC_3_gaps_gen.c
+++ b/tools/test/misc/vds/UC_3_gaps_gen.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/misc/vds/UC_4.h b/tools/test/misc/vds/UC_4.h
index bfcafed..5dba4fb 100644
--- a/tools/test/misc/vds/UC_4.h
+++ b/tools/test/misc/vds/UC_4.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef UC_4_H
diff --git a/tools/test/misc/vds/UC_4_printf_gen.c b/tools/test/misc/vds/UC_4_printf_gen.c
index d067d47..b328046 100644
--- a/tools/test/misc/vds/UC_4_printf_gen.c
+++ b/tools/test/misc/vds/UC_4_printf_gen.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/misc/vds/UC_5.h b/tools/test/misc/vds/UC_5.h
index 96b2af3..3f8fe41 100644
--- a/tools/test/misc/vds/UC_5.h
+++ b/tools/test/misc/vds/UC_5.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef UC_5_H
diff --git a/tools/test/misc/vds/UC_5_stride_gen.c b/tools/test/misc/vds/UC_5_stride_gen.c
index 38d24a6..a105bbd 100644
--- a/tools/test/misc/vds/UC_5_stride_gen.c
+++ b/tools/test/misc/vds/UC_5_stride_gen.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/misc/vds/UC_common.h b/tools/test/misc/vds/UC_common.h
index 0e61016..962a091 100644
--- a/tools/test/misc/vds/UC_common.h
+++ b/tools/test/misc/vds/UC_common.h
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef USE_CASE_COMMON_H
diff --git a/tools/test/perform/CMakeLists.txt b/tools/test/perform/CMakeLists.txt
index bea2d0c..20250c6 100644
--- a/tools/test/perform/CMakeLists.txt
+++ b/tools/test/perform/CMakeLists.txt
@@ -1,4 +1,4 @@
-cmake_minimum_required (VERSION 3.1.0)
+cmake_minimum_required (VERSION 3.2.2)
PROJECT (HDF5_TOOLS_TEST_PERFORM )
#-----------------------------------------------------------------------------
@@ -40,7 +40,7 @@ if (HDF5_BUILD_PERFORM_STANDALONE)
set_property (TARGET h5perf_serial_alone
APPEND PROPERTY COMPILE_DEFINITIONS STANDALONE
)
-endif (HDF5_BUILD_PERFORM_STANDALONE)
+endif ()
#-- Adding test for chunk
set (chunk_SOURCES
@@ -118,7 +118,7 @@ if (H5_HAVE_PARALLEL AND BUILD_TESTING)
set_property (TARGET h5perf_alone
APPEND PROPERTY COMPILE_DEFINITIONS STANDALONE
)
- endif (HDF5_BUILD_PERFORM_STANDALONE)
-endif (H5_HAVE_PARALLEL AND BUILD_TESTING)
+ endif ()
+endif ()
include (CMakeTests.cmake)
diff --git a/tools/test/perform/CMakeTests.cmake b/tools/test/perform/CMakeTests.cmake
index 0ea40a3..2933563 100644
--- a/tools/test/perform/CMakeTests.cmake
+++ b/tools/test/perform/CMakeTests.cmake
@@ -1,3 +1,14 @@
+#
+# Copyright by The HDF Group.
+# All rights reserved.
+#
+# This file is part of HDF5. The full HDF5 copyright notice, including
+# terms governing use, modification, and redistribution, is contained in
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
+#
##############################################################################
##############################################################################
@@ -27,28 +38,119 @@ add_test (
x-gnuplot
)
-add_test (NAME PERFORM_h5perf_serial COMMAND $<TARGET_FILE:h5perf_serial>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME PERFORM_h5perf_serial COMMAND $<TARGET_FILE:h5perf_serial>)
+else ()
+ add_test (NAME PERFORM_h5perf_serial COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:h5perf_serial>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=h5perf_serial.txt"
+ #-D "TEST_REFERENCE=h5perf_serial.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
set_tests_properties (PERFORM_h5perf_serial PROPERTIES TIMEOUT 1800)
if (HDF5_BUILD_PERFORM_STANDALONE)
add_test (NAME PERFORM_h5perf_serial_alone COMMAND $<TARGET_FILE:h5perf_serial_alone>)
-endif (HDF5_BUILD_PERFORM_STANDALONE)
+endif ()
-add_test (NAME PERFORM_chunk COMMAND $<TARGET_FILE:chunk>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME PERFORM_chunk COMMAND $<TARGET_FILE:chunk>)
+else ()
+ add_test (NAME PERFORM_chunk COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:chunk>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=chunk.txt"
+ #-D "TEST_REFERENCE=chunk.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
-add_test (NAME PERFORM_iopipe COMMAND $<TARGET_FILE:iopipe>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME PERFORM_iopipe COMMAND $<TARGET_FILE:iopipe>)
+else ()
+ add_test (NAME PERFORM_iopipe COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:iopipe>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=iopipe.txt"
+ #-D "TEST_REFERENCE=iopipe.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
-add_test (NAME PERFORM_overhead COMMAND $<TARGET_FILE:overhead>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME PERFORM_overhead COMMAND $<TARGET_FILE:overhead>)
+else ()
+ add_test (NAME PERFORM_overhead COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:overhead>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=overhead.txt"
+ #-D "TEST_REFERENCE=overhead.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
-add_test (NAME PERFORM_perf_meta COMMAND $<TARGET_FILE:perf_meta>)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME PERFORM_perf_meta COMMAND $<TARGET_FILE:perf_meta>)
+else ()
+ add_test (NAME PERFORM_perf_meta COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:perf_meta>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=perf_meta.txt"
+ #-D "TEST_REFERENCE=perf_meta.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
-add_test (NAME PERFORM_zip_perf_help COMMAND $<TARGET_FILE:zip_perf> "-h")
-add_test (NAME PERFORM_zip_perf COMMAND $<TARGET_FILE:zip_perf> tfilters.h5)
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME PERFORM_zip_perf_help COMMAND $<TARGET_FILE:zip_perf> "-h")
+else ()
+ add_test (NAME PERFORM_zip_perf_help COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:zip_perf>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=zip_perf-h.txt"
+ #-D "TEST_REFERENCE=zip_perf-h.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
+if (HDF5_ENABLE_USING_MEMCHECKER)
+ add_test (NAME PERFORM_zip_perf COMMAND $<TARGET_FILE:zip_perf> tfilters.h5)
+else ()
+ add_test (NAME PERFORM_zip_perf COMMAND "${CMAKE_COMMAND}"
+ -D "TEST_PROGRAM=$<TARGET_FILE:zip_perf>"
+ -D "TEST_ARGS:STRING="
+ -D "TEST_EXPECT=0"
+ -D "TEST_SKIP_COMPARE=TRUE"
+ -D "TEST_OUTPUT=zip_perf.txt"
+ #-D "TEST_REFERENCE=zip_perf.out"
+ -D "TEST_FOLDER=${PROJECT_BINARY_DIR}"
+ -P "${HDF_RESOURCES_EXT_DIR}/runTest.cmake"
+ )
+endif ()
if (H5_HAVE_PARALLEL)
add_test (NAME PERFORM_h5perf COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:h5perf>)
if (HDF5_BUILD_PERFORM_STANDALONE)
add_test (NAME PERFORM_h5perf_alone COMMAND ${MPIEXEC} ${MPIEXEC_PREFLAGS} ${MPIEXEC_NUMPROC_FLAG} ${MPIEXEC_MAX_NUMPROCS} ${MPIEXEC_POSTFLAGS} $<TARGET_FILE:h5perf_alone>)
- endif (HDF5_BUILD_PERFORM_STANDALONE)
-endif (H5_HAVE_PARALLEL)
+ endif ()
+endif ()
diff --git a/tools/test/perform/COPYING b/tools/test/perform/COPYING
index 6903daf..6497ace 100644
--- a/tools/test/perform/COPYING
+++ b/tools/test/perform/COPYING
@@ -5,12 +5,9 @@
The files and subdirectories in this directory are part of HDF5.
The full HDF5 copyright notice, including terms governing use,
- modification, and redistribution, is contained in the files COPYING
- and Copyright.html. COPYING can be found at the root of the source
- code distribution tree; Copyright.html can be found at the root
- level of an installed copy of the electronic HDF5 document set and
- is linked from the top-level documents page. It can also be found
- at http://www.hdfgroup.org/HDF5/doc/Copyright.html. If you do not
- have access to either file, you may request a copy from
+ modification, and redistribution, is contained in the COPYING file
+ which can be found at the root of the source code distribution tree
+ or in https://support.hdfgroup.org/ftp/HDF5/releases. If you do
+ not have access to either file, you may request a copy from
help@hdfgroup.org.
diff --git a/tools/test/perform/Makefile.am b/tools/test/perform/Makefile.am
index 1af0e7f..5a89a66 100644
--- a/tools/test/perform/Makefile.am
+++ b/tools/test/perform/Makefile.am
@@ -5,12 +5,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
##
## Makefile.am
## Run automake to generate a Makefile.in from this file.
diff --git a/tools/test/perform/build_h5perf_alone.sh b/tools/test/perform/build_h5perf_alone.sh
index b65e863..30d272f 100644
--- a/tools/test/perform/build_h5perf_alone.sh
+++ b/tools/test/perform/build_h5perf_alone.sh
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Name: build_h5perf_alone.sh
#
diff --git a/tools/test/perform/build_h5perf_serial_alone.sh b/tools/test/perform/build_h5perf_serial_alone.sh
index 2566609..099e7f9 100644
--- a/tools/test/perform/build_h5perf_serial_alone.sh
+++ b/tools/test/perform/build_h5perf_serial_alone.sh
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
# Name: build_h5perf_serial_alone.sh
#
diff --git a/tools/test/perform/chunk.c b/tools/test/perform/chunk.c
index b1419ee..804f88e 100644
--- a/tools/test/perform/chunk.c
+++ b/tools/test/perform/chunk.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/perform/gen_report.pl b/tools/test/perform/gen_report.pl
index 285f5d7..34b3a83 100644
--- a/tools/test/perform/gen_report.pl
+++ b/tools/test/perform/gen_report.pl
@@ -6,12 +6,10 @@
#
# This file is part of HDF5. The full HDF5 copyright notice, including
# terms governing use, modification, and redistribution, is contained in
-# the files COPYING and Copyright.html. COPYING can be found at the root
-# of the source code distribution tree; Copyright.html can be found at the
-# root level of an installed copy of the electronic HDF5 document set and
-# is linked from the top-level documents page. It can also be found at
-# http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have
-# access to either file, you may request a copy from help@hdfgroup.org.
+# the COPYING file, which can be found at the root of the source code
+# distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases.
+# If you do not have access to either file, you may request a copy from
+# help@hdfgroup.org.
#
#
diff --git a/tools/test/perform/iopipe.c b/tools/test/perform/iopipe.c
index fd62d37..eac099b 100644
--- a/tools/test/perform/iopipe.c
+++ b/tools/test/perform/iopipe.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/perform/overhead.c b/tools/test/perform/overhead.c
index 98093c7..81f9de6 100644
--- a/tools/test/perform/overhead.c
+++ b/tools/test/perform/overhead.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/perform/perf.c b/tools/test/perform/perf.c
index 7b9590c..b421328 100644
--- a/tools/test/perform/perf.c
+++ b/tools/test/perform/perf.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/perform/perf_meta.c b/tools/test/perform/perf_meta.c
index 2c3a19c..c24e598 100644
--- a/tools/test/perform/perf_meta.c
+++ b/tools/test/perform/perf_meta.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/perform/pio_engine.c b/tools/test/perform/pio_engine.c
index ab11efd..1c0d621 100644
--- a/tools/test/perform/pio_engine.c
+++ b/tools/test/perform/pio_engine.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/perform/pio_perf.c b/tools/test/perform/pio_perf.c
index c1bfadb..597629e 100644
--- a/tools/test/perform/pio_perf.c
+++ b/tools/test/perform/pio_perf.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
diff --git a/tools/test/perform/pio_perf.h b/tools/test/perform/pio_perf.h
index 89cf3a8..b595c90 100644
--- a/tools/test/perform/pio_perf.h
+++ b/tools/test/perform/pio_perf.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef PIO_PERF_H__
diff --git a/tools/test/perform/pio_standalone.c b/tools/test/perform/pio_standalone.c
index 475c678..bd5fb6f 100644
--- a/tools/test/perform/pio_standalone.c
+++ b/tools/test/perform/pio_standalone.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/tools/test/perform/pio_standalone.h b/tools/test/perform/pio_standalone.h
index f088c93..a40cff8 100644
--- a/tools/test/perform/pio_standalone.h
+++ b/tools/test/perform/pio_standalone.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef PIO_STANDALONE_H__
@@ -191,15 +189,6 @@ H5_DLL int HDfprintf (FILE *stream, const char *fmt, ...);
#define HDoff_t off_t
#endif
-#ifndef H5_HAVE_WIN32_API
-/* These definitions differ in Windows and are defined in
- * H5win32defs for that platform.
- */
-typedef struct stat h5_stat_t;
-typedef off_t h5_stat_size_t;
-#define HDoff_t off_t
-#endif /* H5_HAVE_WIN32_API */
-
#define HDftell(F) ftell(F)
#define HDftruncate(F,L) ftruncate(F,L)
#define HDfwrite(M,Z,N,F) fwrite(M,Z,N,F)
diff --git a/tools/test/perform/sio_engine.c b/tools/test/perform/sio_engine.c
index abf1fa6..4fead3f 100644
--- a/tools/test/perform/sio_engine.c
+++ b/tools/test/perform/sio_engine.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -1064,6 +1062,7 @@ done:
do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
{
int ret_code = SUCCESS;
+ hid_t fcpl;
switch (param->io_type) {
case POSIXIO:
@@ -1088,9 +1087,17 @@ do_fopen(parameters *param, char *fname, file_descr *fd /*out*/, int flags)
GOTOERROR(FAIL);
}
+ fcpl = H5Pcreate(H5P_FILE_CREATE);
+ if(param->page_size) {
+ H5Pset_file_space_strategy(fcpl, H5F_FSPACE_STRATEGY_PAGE, 0, (hsize_t)1);
+ H5Pset_file_space_page_size(fcpl, param->page_size);
+ if(param->page_buffer_size)
+ H5Pset_page_buffer_size(fapl, param->page_buffer_size, 0, 0);
+ }
+
/* create the parallel file */
if (flags & (SIO_CREATE | SIO_WRITE)) {
- fd->h5fd = H5Fcreate(fname, H5F_ACC_TRUNC, H5P_DEFAULT, fapl);
+ fd->h5fd = H5Fcreate(fname, H5F_ACC_TRUNC, fcpl, fapl);
} else {
fd->h5fd = H5Fopen(fname, H5F_ACC_RDONLY, fapl);
}
diff --git a/tools/test/perform/sio_perf.c b/tools/test/perform/sio_perf.c
index aceccb0..e77b99e 100644
--- a/tools/test/perform/sio_perf.c
+++ b/tools/test/perform/sio_perf.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/*
@@ -100,11 +98,7 @@ static const char *progname = "h5perf_serial";
* It seems that only the options that accept additional information
* such as dataset size (-e) require the colon next to it.
*/
-#if 1
-static const char *s_opts = "a:A:B:c:Cd:D:e:F:ghi:Imno:p:P:r:stT:v:wx:X:";
-#else
-static const char *s_opts = "a:A:bB:c:Cd:D:e:F:ghi:Imno:p:P:r:stT:wx:X:";
-#endif /* 1 */
+static const char *s_opts = "a:A:B:b:c:Cd:D:e:F:G:ghi:Imno:p:P:r:stT:v:wx:X:";
static struct long_options l_opts[] = {
{ "align", require_arg, 'a' },
{ "alig", require_arg, 'a' },
@@ -296,7 +290,8 @@ struct options {
int h5_extendable; /* Perform the write tests only */
int verify; /* Verify data correctness */
vfdtype vfd; /* File driver */
-
+ size_t page_buffer_size;
+ size_t page_size;
};
typedef struct _minmax {
@@ -405,6 +400,8 @@ run_test_loop(struct options *opts)
parms.h5_write_only = opts->h5_write_only;
parms.verify = opts->verify;
parms.vfd = opts->vfd;
+ parms.page_buffer_size = opts->page_buffer_size;
+ parms.page_size = opts->page_size;
/* load multidimensional options */
parms.num_bytes = 1;
@@ -869,6 +866,16 @@ report_parameters(struct options *opts)
recover_size_and_print((long long)opts->buf_size[i], " ");
HDfprintf(output, "\n");
+ if(opts->page_size) {
+ HDfprintf(output, "Page Aggregation Enabled. Page size = %ld\n", opts->page_size);
+ if(opts->page_buffer_size)
+ HDfprintf(output, "Page Buffering Enabled. Page Buffer size = %ld\n", opts->page_buffer_size);
+ else
+ HDfprintf(output, "Page Buffering Disabled\n");
+ }
+ else
+ HDfprintf(output, "Page Aggregation Disabled\n");
+
HDfprintf(output, "Dimension access order=");
for (i=0; i<rank; i++)
recover_size_and_print((long long)opts->order[i], " ");
@@ -945,6 +952,9 @@ parse_command_line(int argc, char *argv[])
cl_opts = (struct options *)HDmalloc(sizeof(struct options));
+ cl_opts->page_buffer_size = 0;
+ cl_opts->page_size = 0;
+
cl_opts->output_file = NULL;
cl_opts->io_types = 0; /* will set default after parsing options */
cl_opts->num_iters = 1;
@@ -979,6 +989,12 @@ parse_command_line(int argc, char *argv[])
case 'a':
cl_opts->h5_alignment = parse_size_directive(opt_arg);
break;
+ case 'G':
+ cl_opts->page_size = parse_size_directive(opt_arg);
+ break;
+ case 'b':
+ cl_opts->page_buffer_size = parse_size_directive(opt_arg);
+ break;
case 'A':
{
const char *end = opt_arg;
diff --git a/tools/test/perform/sio_perf.h b/tools/test/perform/sio_perf.h
index 7a7d708..366b791 100644
--- a/tools/test/perform/sio_perf.h
+++ b/tools/test/perform/sio_perf.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef SIO_PERF_H__
@@ -67,6 +65,8 @@ typedef struct parameters_ {
int h5_extendable; /* Make HDF5 dataset chunked */
int h5_write_only; /* Perform the write tests only */
int verify; /* Verify data correctness */
+ size_t page_size;
+ size_t page_buffer_size;
} parameters;
typedef struct results_ {
diff --git a/tools/test/perform/sio_standalone.c b/tools/test/perform/sio_standalone.c
index d92ed30..dfdbc55 100644
--- a/tools/test/perform/sio_standalone.c
+++ b/tools/test/perform/sio_standalone.c
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
diff --git a/tools/test/perform/sio_standalone.h b/tools/test/perform/sio_standalone.h
index 83dffa2..e73a6b5 100644
--- a/tools/test/perform/sio_standalone.h
+++ b/tools/test/perform/sio_standalone.h
@@ -4,12 +4,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
#ifndef SIO_STANDALONE_H__
@@ -213,15 +211,6 @@ H5_DLL int HDfprintf (FILE *stream, const char *fmt, ...);
#define HDoff_t off_t
#endif
-#ifndef H5_HAVE_WIN32_API
-/* These definitions differ in Windows and are defined in
- * H5win32defs for that platform.
- */
-typedef struct stat h5_stat_t;
-typedef off_t h5_stat_size_t;
-#define HDoff_t off_t
-#endif /* H5_HAVE_WIN32_API */
-
#define HDftell(F) ftell(F)
#define HDftruncate(F,L) ftruncate(F,L)
#define HDfwrite(M,Z,N,F) fwrite(M,Z,N,F)
diff --git a/tools/test/perform/zip_perf.c b/tools/test/perform/zip_perf.c
index d9b1fa2..0636d3b 100644
--- a/tools/test/perform/zip_perf.c
+++ b/tools/test/perform/zip_perf.c
@@ -5,12 +5,10 @@
* *
* This file is part of HDF5. The full HDF5 copyright notice, including *
* terms governing use, modification, and redistribution, is contained in *
- * the files COPYING and Copyright.html. COPYING can be found at the root *
- * of the source code distribution tree; Copyright.html can be found at the *
- * root level of an installed copy of the electronic HDF5 document set and *
- * is linked from the top-level documents page. It can also be found at *
- * http://hdfgroup.org/HDF5/doc/Copyright.html. If you do not have *
- * access to either file, you may request a copy from help@hdfgroup.org. *
+ * the COPYING file, which can be found at the root of the source code *
+ * distribution tree, or in https://support.hdfgroup.org/ftp/HDF5/releases. *
+ * If you do not have access to either file, you may request a copy from *
+ * help@hdfgroup.org. *
* * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * */
/* ===========================================================================
diff --git a/tools/testfiles/file_space.ddl b/tools/testfiles/file_space.ddl
index bed5349..6b8aa47 100644
--- a/tools/testfiles/file_space.ddl
+++ b/tools/testfiles/file_space.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_AGGR_VFD
- FREE_SPACE_THRESHOLD 10
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_NONE
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 8192
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/testfiles/file_space.h5 b/tools/testfiles/file_space.h5
index 425d0c2..d9e2890 100644
--- a/tools/testfiles/file_space.h5
+++ b/tools/testfiles/file_space.h5
Binary files differ
diff --git a/tools/testfiles/tbitfields_be.h5.xml b/tools/testfiles/tbitfields_be.h5.xml
new file mode 100644
index 0000000..26bbd91
--- /dev/null
+++ b/tools/testfiles/tbitfields_be.h5.xml
@@ -0,0 +1,103 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<hdf5:HDF5-File xmlns:hdf5="http://hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd" xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xsi:schemaLocation="http://hdfgroup.org/HDF5/XML/schema/HDF5-File http://www.hdfgroup.org/HDF5/XML/schema/HDF5-File.xsd">
+<hdf5:RootGroup OBJ-XID="xid_696" H5Path="/">
+ <hdf5:Group Name="typetests" OBJ-XID="xid_1344" H5Path="/typetests" Parents="xid_696" H5ParentPaths="/" >
+ <hdf5:Dataset Name="bitfield_1" OBJ-XID="xid_1720" H5Path= "/typetests/bitfield_1" Parents="xid_1344" H5ParentPaths="/typetests">
+ <hdf5:StorageLayout>
+ <hdf5:ContiguousLayout/>
+ </hdf5:StorageLayout>
+ <hdf5:FillValueInfo FillTime="FillIfSet" AllocationTime="Late">
+ <hdf5:FillValue>
+ <hdf5:NoFill/>
+ </hdf5:FillValue>
+ </hdf5:FillValueInfo>
+ <hdf5:Dataspace>
+ <hdf5:SimpleDataspace Ndims="1">
+ <hdf5:Dimension DimSize="32" MaxDimSize="32"/>
+ </hdf5:SimpleDataspace>
+ </hdf5:Dataspace>
+ <hdf5:DataType>
+ <hdf5:AtomicType>
+ <hdf5:BitfieldType ByteOrder="LE" Size="1"/>
+ </hdf5:AtomicType>
+ </hdf5:DataType>
+ <hdf5:Data>
+ <hdf5:DataFromFile>
+ 0xff
+ 0xfe
+ 0xfd
+ 0xfc
+ 0xfb
+ 0xfa
+ 0xf9
+ 0xf8
+ 0xf7
+ 0xf6
+ 0xf5
+ 0xf4
+ 0xf3
+ 0xf2
+ 0xf1
+ 0xf0
+ 0xef
+ 0xee
+ 0xed
+ 0xec
+ 0xeb
+ 0xea
+ 0xe9
+ 0xe8
+ 0xe7
+ 0xe6
+ 0xe5
+ 0xe4
+ 0xe3
+ 0xe2
+ 0xe1
+ 0xe0
+ </hdf5:DataFromFile>
+ </hdf5:Data>
+ </hdf5:Dataset>
+ <hdf5:Dataset Name="bitfield_2" OBJ-XID="xid_2352" H5Path= "/typetests/bitfield_2" Parents="xid_1344" H5ParentPaths="/typetests">
+ <hdf5:StorageLayout>
+ <hdf5:ContiguousLayout/>
+ </hdf5:StorageLayout>
+ <hdf5:FillValueInfo FillTime="FillIfSet" AllocationTime="Late">
+ <hdf5:FillValue>
+ <hdf5:NoFill/>
+ </hdf5:FillValue>
+ </hdf5:FillValueInfo>
+ <hdf5:Dataspace>
+ <hdf5:SimpleDataspace Ndims="1">
+ <hdf5:Dimension DimSize="16" MaxDimSize="16"/>
+ </hdf5:SimpleDataspace>
+ </hdf5:Dataspace>
+ <hdf5:DataType>
+ <hdf5:AtomicType>
+ <hdf5:BitfieldType ByteOrder="LE" Size="2"/>
+ </hdf5:AtomicType>
+ </hdf5:DataType>
+ <hdf5:Data>
+ <hdf5:DataFromFile>
+ fe:ff
+ fc:fd
+ fa:fb
+ f8:f9
+ f6:f7
+ f4:f5
+ f2:f3
+ f0:f1
+ ee:ef
+ ec:ed
+ ea:eb
+ e8:e9
+ e6:e7
+ e4:e5
+ e2:e3
+ e0:e1
+ </hdf5:DataFromFile>
+ </hdf5:Data>
+ </hdf5:Dataset>
+ </hdf5:Group>
+</hdf5:RootGroup>
+</hdf5:HDF5-File>
diff --git a/tools/testfiles/tbitfields.h5.xml b/tools/testfiles/tbitfields_le.h5.xml
index c8ba026..c8ba026 100644
--- a/tools/testfiles/tbitfields.h5.xml
+++ b/tools/testfiles/tbitfields_le.h5.xml
diff --git a/tools/testfiles/tbitnopaque.h5 b/tools/testfiles/tbitnopaque.h5
index 2b1f23d..089a86d 100644
--- a/tools/testfiles/tbitnopaque.h5
+++ b/tools/testfiles/tbitnopaque.h5
Binary files differ
diff --git a/tools/testfiles/tbitnopaque_be.ddl b/tools/testfiles/tbitnopaque_be.ddl
new file mode 100644
index 0000000..b0de7c4
--- /dev/null
+++ b/tools/testfiles/tbitnopaque_be.ddl
@@ -0,0 +1,293 @@
+HDF5 "tbitnopaque.h5" {
+GROUP "/" {
+ GROUP "bittypetests" {
+ DATASET "bitfield_1" {
+ DATATYPE H5T_STD_B8LE
+ DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
+ DATA {
+ (0): 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6,
+ (10): 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, 0xef, 0xee, 0xed, 0xec,
+ (20): 0xeb, 0xea, 0xe9, 0xe8, 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2,
+ (30): 0xe1, 0xe0
+ }
+ }
+ DATASET "bitfield_2" {
+ DATATYPE H5T_STD_B16LE
+ DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
+ DATA {
+ (0): ff:ff, ff:ef, ff:df, ff:cf, ff:bf, ff:af, ff:9f, ff:8f, ff:7f,
+ (9): ff:6f, ff:5f, ff:4f, ff:3f, ff:2f, ff:1f, ff:0f, fe:ff, fe:ef,
+ (18): fe:df, fe:cf, fe:bf, fe:af, fe:9f, fe:8f, fe:7f, fe:6f, fe:5f,
+ (27): fe:4f, fe:3f, fe:2f, fe:1f, fe:0f
+ }
+ }
+ DATASET "bitfield_3" {
+ DATATYPE H5T_STD_B32LE
+ DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
+ DATA {
+ (0): ff:ff:ff:ff, ff:ff:ff:df, ff:ff:ff:bf, ff:ff:ff:9f,
+ (4): ff:ff:ff:7f, ff:ff:ff:5f, ff:ff:ff:3f, ff:ff:ff:1f,
+ (8): ff:ff:fe:ff, ff:ff:fe:df, ff:ff:fe:bf, ff:ff:fe:9f,
+ (12): ff:ff:fe:7f, ff:ff:fe:5f, ff:ff:fe:3f, ff:ff:fe:1f,
+ (16): ff:ff:fd:ff, ff:ff:fd:df, ff:ff:fd:bf, ff:ff:fd:9f,
+ (20): ff:ff:fd:7f, ff:ff:fd:5f, ff:ff:fd:3f, ff:ff:fd:1f,
+ (24): ff:ff:fc:ff, ff:ff:fc:df, ff:ff:fc:bf, ff:ff:fc:9f,
+ (28): ff:ff:fc:7f, ff:ff:fc:5f, ff:ff:fc:3f, ff:ff:fc:1f
+ }
+ }
+ DATASET "bitfield_4" {
+ DATATYPE H5T_STD_B64LE
+ DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
+ DATA {
+ (0): ff:ff:ff:ff:ff:ff:ff:ff, ff:ff:ff:ff:ff:ff:ff:bf,
+ (2): ff:ff:ff:ff:ff:ff:ff:7f, ff:ff:ff:ff:ff:ff:ff:3f,
+ (4): ff:ff:ff:ff:ff:ff:fe:ff, ff:ff:ff:ff:ff:ff:fe:bf,
+ (6): ff:ff:ff:ff:ff:ff:fe:7f, ff:ff:ff:ff:ff:ff:fe:3f,
+ (8): ff:ff:ff:ff:ff:ff:fd:ff, ff:ff:ff:ff:ff:ff:fd:bf,
+ (10): ff:ff:ff:ff:ff:ff:fd:7f, ff:ff:ff:ff:ff:ff:fd:3f,
+ (12): ff:ff:ff:ff:ff:ff:fc:ff, ff:ff:ff:ff:ff:ff:fc:bf,
+ (14): ff:ff:ff:ff:ff:ff:fc:7f, ff:ff:ff:ff:ff:ff:fc:3f,
+ (16): ff:ff:ff:ff:ff:ff:fb:ff, ff:ff:ff:ff:ff:ff:fb:bf,
+ (18): ff:ff:ff:ff:ff:ff:fb:7f, ff:ff:ff:ff:ff:ff:fb:3f,
+ (20): ff:ff:ff:ff:ff:ff:fa:ff, ff:ff:ff:ff:ff:ff:fa:bf,
+ (22): ff:ff:ff:ff:ff:ff:fa:7f, ff:ff:ff:ff:ff:ff:fa:3f,
+ (24): ff:ff:ff:ff:ff:ff:f9:ff, ff:ff:ff:ff:ff:ff:f9:bf,
+ (26): ff:ff:ff:ff:ff:ff:f9:7f, ff:ff:ff:ff:ff:ff:f9:3f,
+ (28): ff:ff:ff:ff:ff:ff:f8:ff, ff:ff:ff:ff:ff:ff:f8:bf,
+ (30): ff:ff:ff:ff:ff:ff:f8:7f, ff:ff:ff:ff:ff:ff:f8:3f
+ }
+ }
+ }
+ GROUP "cmpdtypetests" {
+ DATASET "compound_1" {
+ DATATYPE H5T_COMPOUND {
+ H5T_STD_B8LE "a";
+ H5T_STD_B16LE "b";
+ H5T_STD_B32LE "c";
+ H5T_STD_B64LE "d";
+ }
+ DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
+ DATA {
+ (0): {
+ 0xff,
+ ff:ff,
+ ff:ff:ff:ff,
+ ff:ff:ff:ff:ff:ff:ff:ff
+ },
+ (1): {
+ 0xfe,
+ ff:ef,
+ ff:ff:ff:df,
+ ff:ff:ff:ff:ff:ff:ff:bf
+ },
+ (2): {
+ 0xfd,
+ ff:df,
+ ff:ff:ff:bf,
+ ff:ff:ff:ff:ff:ff:ff:7f
+ },
+ (3): {
+ 0xfc,
+ ff:cf,
+ ff:ff:ff:9f,
+ ff:ff:ff:ff:ff:ff:ff:3f
+ },
+ (4): {
+ 0xfb,
+ ff:bf,
+ ff:ff:ff:7f,
+ ff:ff:ff:ff:ff:ff:fe:ff
+ },
+ (5): {
+ 0xfa,
+ ff:af,
+ ff:ff:ff:5f,
+ ff:ff:ff:ff:ff:ff:fe:bf
+ },
+ (6): {
+ 0xf9,
+ ff:9f,
+ ff:ff:ff:3f,
+ ff:ff:ff:ff:ff:ff:fe:7f
+ },
+ (7): {
+ 0xf8,
+ ff:8f,
+ ff:ff:ff:1f,
+ ff:ff:ff:ff:ff:ff:fe:3f
+ },
+ (8): {
+ 0xf7,
+ ff:7f,
+ ff:ff:fe:ff,
+ ff:ff:ff:ff:ff:ff:fd:ff
+ },
+ (9): {
+ 0xf6,
+ ff:6f,
+ ff:ff:fe:df,
+ ff:ff:ff:ff:ff:ff:fd:bf
+ },
+ (10): {
+ 0xf5,
+ ff:5f,
+ ff:ff:fe:bf,
+ ff:ff:ff:ff:ff:ff:fd:7f
+ },
+ (11): {
+ 0xf4,
+ ff:4f,
+ ff:ff:fe:9f,
+ ff:ff:ff:ff:ff:ff:fd:3f
+ },
+ (12): {
+ 0xf3,
+ ff:3f,
+ ff:ff:fe:7f,
+ ff:ff:ff:ff:ff:ff:fc:ff
+ },
+ (13): {
+ 0xf2,
+ ff:2f,
+ ff:ff:fe:5f,
+ ff:ff:ff:ff:ff:ff:fc:bf
+ },
+ (14): {
+ 0xf1,
+ ff:1f,
+ ff:ff:fe:3f,
+ ff:ff:ff:ff:ff:ff:fc:7f
+ },
+ (15): {
+ 0xf0,
+ ff:0f,
+ ff:ff:fe:1f,
+ ff:ff:ff:ff:ff:ff:fc:3f
+ },
+ (16): {
+ 0xef,
+ fe:ff,
+ ff:ff:fd:ff,
+ ff:ff:ff:ff:ff:ff:fb:ff
+ },
+ (17): {
+ 0xee,
+ fe:ef,
+ ff:ff:fd:df,
+ ff:ff:ff:ff:ff:ff:fb:bf
+ },
+ (18): {
+ 0xed,
+ fe:df,
+ ff:ff:fd:bf,
+ ff:ff:ff:ff:ff:ff:fb:7f
+ },
+ (19): {
+ 0xec,
+ fe:cf,
+ ff:ff:fd:9f,
+ ff:ff:ff:ff:ff:ff:fb:3f
+ },
+ (20): {
+ 0xeb,
+ fe:bf,
+ ff:ff:fd:7f,
+ ff:ff:ff:ff:ff:ff:fa:ff
+ },
+ (21): {
+ 0xea,
+ fe:af,
+ ff:ff:fd:5f,
+ ff:ff:ff:ff:ff:ff:fa:bf
+ },
+ (22): {
+ 0xe9,
+ fe:9f,
+ ff:ff:fd:3f,
+ ff:ff:ff:ff:ff:ff:fa:7f
+ },
+ (23): {
+ 0xe8,
+ fe:8f,
+ ff:ff:fd:1f,
+ ff:ff:ff:ff:ff:ff:fa:3f
+ },
+ (24): {
+ 0xe7,
+ fe:7f,
+ ff:ff:fc:ff,
+ ff:ff:ff:ff:ff:ff:f9:ff
+ },
+ (25): {
+ 0xe6,
+ fe:6f,
+ ff:ff:fc:df,
+ ff:ff:ff:ff:ff:ff:f9:bf
+ },
+ (26): {
+ 0xe5,
+ fe:5f,
+ ff:ff:fc:bf,
+ ff:ff:ff:ff:ff:ff:f9:7f
+ },
+ (27): {
+ 0xe4,
+ fe:4f,
+ ff:ff:fc:9f,
+ ff:ff:ff:ff:ff:ff:f9:3f
+ },
+ (28): {
+ 0xe3,
+ fe:3f,
+ ff:ff:fc:7f,
+ ff:ff:ff:ff:ff:ff:f8:ff
+ },
+ (29): {
+ 0xe2,
+ fe:2f,
+ ff:ff:fc:5f,
+ ff:ff:ff:ff:ff:ff:f8:bf
+ },
+ (30): {
+ 0xe1,
+ fe:1f,
+ ff:ff:fc:3f,
+ ff:ff:ff:ff:ff:ff:f8:7f
+ },
+ (31): {
+ 0xe0,
+ fe:0f,
+ ff:ff:fc:1f,
+ ff:ff:ff:ff:ff:ff:f8:3f
+ }
+ }
+ }
+ }
+ GROUP "opaquetypetests" {
+ DATASET "opaque_1" {
+ DATATYPE H5T_OPAQUE {
+ OPAQUE_TAG "1-byte opaque type";
+ }
+ DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
+ DATA {
+ (0): 0xff, 0xfe, 0xfd, 0xfc, 0xfb, 0xfa, 0xf9, 0xf8, 0xf7, 0xf6,
+ (10): 0xf5, 0xf4, 0xf3, 0xf2, 0xf1, 0xf0, 0xef, 0xee, 0xed, 0xec,
+ (20): 0xeb, 0xea, 0xe9, 0xe8, 0xe7, 0xe6, 0xe5, 0xe4, 0xe3, 0xe2,
+ (30): 0xe1, 0xe0
+ }
+ }
+ DATASET "opaque_2" {
+ DATATYPE H5T_OPAQUE {
+ OPAQUE_TAG "2-byte opaque type";
+ }
+ DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
+ DATA {
+ (0): ff:ff, ef:ff, df:ff, cf:ff, bf:ff, af:ff, 9f:ff, 8f:ff, 7f:ff,
+ (9): 6f:ff, 5f:ff, 4f:ff, 3f:ff, 2f:ff, 1f:ff, 0f:ff, ff:fe, ef:fe,
+ (18): df:fe, cf:fe, bf:fe, af:fe, 9f:fe, 8f:fe, 7f:fe, 6f:fe, 5f:fe,
+ (27): 4f:fe, 3f:fe, 2f:fe, 1f:fe, 0f:fe
+ }
+ }
+ }
+}
+}
diff --git a/tools/testfiles/tbitnopaque.ddl b/tools/testfiles/tbitnopaque_le.ddl
index 9000463..0c59c0b 100644
--- a/tools/testfiles/tbitnopaque.ddl
+++ b/tools/testfiles/tbitnopaque_le.ddl
@@ -15,24 +15,24 @@ GROUP "/" {
DATATYPE H5T_STD_B16LE
DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
DATA {
- (0): ff:ff, 00:00, ef:ff, 00:00, df:ff, 00:00, cf:ff, 00:00, bf:ff,
- (9): 00:00, af:ff, 00:00, 9f:ff, 00:00, 8f:ff, 00:00, 7f:ff, 00:00,
- (18): 6f:ff, 00:00, 5f:ff, 00:00, 4f:ff, 00:00, 3f:ff, 00:00, 2f:ff,
- (27): 00:00, 1f:ff, 00:00, 0f:ff, 00:00
+ (0): ff:ff, ef:ff, df:ff, cf:ff, bf:ff, af:ff, 9f:ff, 8f:ff, 7f:ff,
+ (9): 6f:ff, 5f:ff, 4f:ff, 3f:ff, 2f:ff, 1f:ff, 0f:ff, ff:fe, ef:fe,
+ (18): df:fe, cf:fe, bf:fe, af:fe, 9f:fe, 8f:fe, 7f:fe, 6f:fe, 5f:fe,
+ (27): 4f:fe, 3f:fe, 2f:fe, 1f:fe, 0f:fe
}
}
DATASET "bitfield_3" {
DATATYPE H5T_STD_B32LE
DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
DATA {
- (0): ff:ff:ff:ff, 00:00:00:00, df:ff:ff:ff, 00:00:00:00,
- (4): bf:ff:ff:ff, 00:00:00:00, 9f:ff:ff:ff, 00:00:00:00,
- (8): 7f:ff:ff:ff, 00:00:00:00, 5f:ff:ff:ff, 00:00:00:00,
- (12): 3f:ff:ff:ff, 00:00:00:00, 1f:ff:ff:ff, 00:00:00:00,
- (16): ff:fe:ff:ff, 00:00:00:00, df:fe:ff:ff, 00:00:00:00,
- (20): bf:fe:ff:ff, 00:00:00:00, 9f:fe:ff:ff, 00:00:00:00,
- (24): 7f:fe:ff:ff, 00:00:00:00, 5f:fe:ff:ff, 00:00:00:00,
- (28): 3f:fe:ff:ff, 00:00:00:00, 1f:fe:ff:ff, 00:00:00:00
+ (0): ff:ff:ff:ff, df:ff:ff:ff, bf:ff:ff:ff, 9f:ff:ff:ff,
+ (4): 7f:ff:ff:ff, 5f:ff:ff:ff, 3f:ff:ff:ff, 1f:ff:ff:ff,
+ (8): ff:fe:ff:ff, df:fe:ff:ff, bf:fe:ff:ff, 9f:fe:ff:ff,
+ (12): 7f:fe:ff:ff, 5f:fe:ff:ff, 3f:fe:ff:ff, 1f:fe:ff:ff,
+ (16): ff:fd:ff:ff, df:fd:ff:ff, bf:fd:ff:ff, 9f:fd:ff:ff,
+ (20): 7f:fd:ff:ff, 5f:fd:ff:ff, 3f:fd:ff:ff, 1f:fd:ff:ff,
+ (24): ff:fc:ff:ff, df:fc:ff:ff, bf:fc:ff:ff, 9f:fc:ff:ff,
+ (28): 7f:fc:ff:ff, 5f:fc:ff:ff, 3f:fc:ff:ff, 1f:fc:ff:ff
}
}
DATASET "bitfield_4" {
@@ -282,10 +282,10 @@ GROUP "/" {
}
DATASPACE SIMPLE { ( 32 ) / ( 32 ) }
DATA {
- (0): ff:ff, 00:00, ef:ff, 00:00, df:ff, 00:00, cf:ff, 00:00, bf:ff,
- (9): 00:00, af:ff, 00:00, 9f:ff, 00:00, 8f:ff, 00:00, 7f:ff, 00:00,
- (18): 6f:ff, 00:00, 5f:ff, 00:00, 4f:ff, 00:00, 3f:ff, 00:00, 2f:ff,
- (27): 00:00, 1f:ff, 00:00, 0f:ff, 00:00
+ (0): ff:ff, ef:ff, df:ff, cf:ff, bf:ff, af:ff, 9f:ff, 8f:ff, 7f:ff,
+ (9): 6f:ff, 5f:ff, 4f:ff, 3f:ff, 2f:ff, 1f:ff, 0f:ff, ff:fe, ef:fe,
+ (18): df:fe, cf:fe, bf:fe, af:fe, 9f:fe, 8f:fe, 7f:fe, 6f:fe, 5f:fe,
+ (27): 4f:fe, 3f:fe, 2f:fe, 1f:fe, 0f:fe
}
}
}
diff --git a/tools/testfiles/tboot1.ddl b/tools/testfiles/tboot1.ddl
index 4758948..73ae4e5 100644
--- a/tools/testfiles/tboot1.ddl
+++ b/tools/testfiles/tboot1.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/testfiles/tboot2.ddl b/tools/testfiles/tboot2.ddl
index d83b92b..504a634 100644
--- a/tools/testfiles/tboot2.ddl
+++ b/tools/testfiles/tboot2.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/testfiles/tboot2A.ddl b/tools/testfiles/tboot2A.ddl
index d83b92b..504a634 100644
--- a/tools/testfiles/tboot2A.ddl
+++ b/tools/testfiles/tboot2A.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/testfiles/tboot2B.ddl b/tools/testfiles/tboot2B.ddl
index d83b92b..504a634 100644
--- a/tools/testfiles/tboot2B.ddl
+++ b/tools/testfiles/tboot2B.ddl
@@ -9,8 +9,10 @@ SUPER_BLOCK {
BTREE_RANK 16
BTREE_LEAF 4
ISTORE_K 32
- FILE_SPACE_STRATEGY H5F_FILE_SPACE_ALL
- FREE_SPACE_THRESHOLD 1
+ FILE_SPACE_STRATEGY H5F_FSPACE_STRATEGY_FSM_AGGR
+ FREE_SPACE_PERSIST FALSE
+ FREE_SPACE_SECTION_THRESHOLD 1
+ FILE_SPACE_PAGE_SIZE 4096
USER_BLOCK {
USERBLOCK_SIZE 0
}
diff --git a/tools/testfiles/tdset_idx.ddl b/tools/testfiles/tdset_idx.ddl
deleted file mode 100644
index 65d9f44..0000000
--- a/tools/testfiles/tdset_idx.ddl
+++ /dev/null
@@ -1,61 +0,0 @@
-#############################
-Expected output for 'h5dump -p -H tdset_idx.h5'
-#############################
-HDF5 "tdset_idx.h5" {
-GROUP "/" {
- DATASET "dset_btree" {
- DATATYPE H5T_STD_I32LE
- DATASPACE SIMPLE { ( 20, 10 ) / ( 200, 100 ) }
- STORAGE_LAYOUT {
- CHUNKED ( 5, 5 )
- SIZE 800
- }
- FILTERS {
- NONE
- }
- FILLVALUE {
- FILL_TIME H5D_FILL_TIME_IFSET
- VALUE 0
- }
- ALLOCATION_TIME {
- H5D_ALLOC_TIME_INCR
- }
- }
- DATASET "dset_filter" {
- DATATYPE H5T_STD_I32LE
- DATASPACE SIMPLE { ( 20, 10 ) / ( 20, 10 ) }
- STORAGE_LAYOUT {
- CHUNKED ( 5, 5 )
- SIZE 200 (4.000:1 COMPRESSION)
- }
- FILTERS {
- COMPRESSION DEFLATE { LEVEL 9 }
- }
- FILLVALUE {
- FILL_TIME H5D_FILL_TIME_IFSET
- VALUE 0
- }
- ALLOCATION_TIME {
- H5D_ALLOC_TIME_INCR
- }
- }
- DATASET "dset_fixed" {
- DATATYPE H5T_STD_I32LE
- DATASPACE SIMPLE { ( 20, 10 ) / ( 20, 10 ) }
- STORAGE_LAYOUT {
- CHUNKED ( 5, 5 )
- SIZE 800
- }
- FILTERS {
- NONE
- }
- FILLVALUE {
- FILL_TIME H5D_FILL_TIME_IFSET
- VALUE 0
- }
- ALLOCATION_TIME {
- H5D_ALLOC_TIME_INCR
- }
- }
-}
-}
diff --git a/tools/testfiles/tudfilter.ddl b/tools/testfiles/tudfilter.ddl
new file mode 100644
index 0000000..b2aa7e6
--- /dev/null
+++ b/tools/testfiles/tudfilter.ddl
@@ -0,0 +1,30 @@
+HDF5 "tudfilter.h5" {
+GROUP "/" {
+ DATASET "dynlibud" {
+ DATATYPE H5T_STD_I32LE
+ DATASPACE SIMPLE { ( 20, 10 ) / ( 20, 10 ) }
+ DATA {
+ (0,0): 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,
+ (1,0): 10, 11, 12, 13, 14, 15, 16, 17, 18, 19,
+ (2,0): 20, 21, 22, 23, 24, 25, 26, 27, 28, 29,
+ (3,0): 30, 31, 32, 33, 34, 35, 36, 37, 38, 39,
+ (4,0): 40, 41, 42, 43, 44, 45, 46, 47, 48, 49,
+ (5,0): 50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
+ (6,0): 60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
+ (7,0): 70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
+ (8,0): 80, 81, 82, 83, 84, 85, 86, 87, 88, 89,
+ (9,0): 90, 91, 92, 93, 94, 95, 96, 97, 98, 99,
+ (10,0): 100, 101, 102, 103, 104, 105, 106, 107, 108, 109,
+ (11,0): 110, 111, 112, 113, 114, 115, 116, 117, 118, 119,
+ (12,0): 120, 121, 122, 123, 124, 125, 126, 127, 128, 129,
+ (13,0): 130, 131, 132, 133, 134, 135, 136, 137, 138, 139,
+ (14,0): 140, 141, 142, 143, 144, 145, 146, 147, 148, 149,
+ (15,0): 150, 151, 152, 153, 154, 155, 156, 157, 158, 159,
+ (16,0): 160, 161, 162, 163, 164, 165, 166, 167, 168, 169,
+ (17,0): 170, 171, 172, 173, 174, 175, 176, 177, 178, 179,
+ (18,0): 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ (19,0): 190, 191, 192, 193, 194, 195, 196, 197, 198, 199
+ }
+ }
+}
+}
diff --git a/tools/testfiles/tudfilter.h5 b/tools/testfiles/tudfilter.h5
new file mode 100644
index 0000000..081b000
--- /dev/null
+++ b/tools/testfiles/tudfilter.h5
Binary files differ
diff --git a/tools/testfiles/tudfilter.ls b/tools/testfiles/tudfilter.ls
new file mode 100644
index 0000000..1882888
--- /dev/null
+++ b/tools/testfiles/tudfilter.ls
@@ -0,0 +1,16 @@
+dynlibud Dataset {20, 10}
+ Data:
+ (0,0) 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
+ (1,9) 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34,
+ (3,5) 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50,
+ (5,1) 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66,
+ (6,7) 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82,
+ (8,3) 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98,
+ (9,9) 99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111,
+ (11,2) 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124,
+ (12,5) 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137,
+ (13,8) 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150,
+ (15,1) 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163,
+ (16,4) 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176,
+ (17,7) 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189,
+ (19,0) 190, 191, 192, 193, 194, 195, 196, 197, 198, 199